author | Benjamin Bouvier <benj@benj.me> |
Thu, 15 Feb 2018 12:41:55 +0100 | |
changeset 404136 | 12219bfe0748a9bef50559e5c8eefca83655627a |
parent 404135 | bdeee319fd1a6ae8e99c08913d7c10d8622ebfdd |
child 404137 | b0db89ec8e9700b63779372d473c91900e6beb17 |
push id | 99939 |
push user | bbouvier@mozilla.com |
push date | Fri, 16 Feb 2018 10:00:41 +0000 |
treeherder | mozilla-inbound@a274eb9c8f1f [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | luke |
bugs | 1422043 |
milestone | 60.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/js/src/jit-test/lib/wasm.js +++ b/js/src/jit-test/lib/wasm.js @@ -176,19 +176,17 @@ function wasmGetScriptBreakpoints(wasmSc const WasmHelpers = {}; (function() { let enabled = false; try { enableSingleStepProfiling(); disableSingleStepProfiling(); enabled = true; - } catch (e) { - print(e.message); - } + } catch (e) {} WasmHelpers.isSingleStepProfilingEnabled = enabled; })(); WasmHelpers._normalizeStack = (stack, preciseStacks) => { var wasmFrameTypes = [ {re:/^jit call to int64 wasm function$/, sub:"i64>"}, {re:/^out-of-line coercion for jit entry arguments \(in wasm\)$/, sub:"ool>"}, {re:/^wasm-function\[(\d+)\] \(.*\)$/, sub:"$1"},
--- a/js/src/wasm/AsmJS.cpp +++ b/js/src/wasm/AsmJS.cpp @@ -330,18 +330,18 @@ struct js::AsmJSMetadata : Metadata, Asm uint32_t srcEndBeforeCurly() const { return srcStart + srcLength; } uint32_t srcEndAfterCurly() const { return srcStart + srcLengthWithRightBrace; } - explicit AsmJSMetadata(UniqueMetadataTier tier) - : Metadata(Move(tier), ModuleKind::AsmJS), + AsmJSMetadata() + : Metadata(ModuleKind::AsmJS), cacheResult(CacheResult::Miss), srcStart(0), strict(false) {} ~AsmJSMetadata() override {} const AsmJSExport& lookupAsmJSExport(uint32_t funcIndex) const { // The AsmJSExportVector isn't stored in sorted order so do a linear @@ -1808,21 +1808,17 @@ class MOZ_STACK_CLASS ModuleValidator } } va_end(args); } public: bool init() { - auto tierMetadata = js::MakeUnique<MetadataTier>(Tier::Ion); - if (!tierMetadata) - return false; - - asmJSMetadata_ = cx_->new_<AsmJSMetadata>(Move(tierMetadata)); + asmJSMetadata_ = cx_->new_<AsmJSMetadata>(); if (!asmJSMetadata_) return false; asmJSMetadata_->toStringStart = moduleFunctionNode_->pn_funbox->toStringStart; asmJSMetadata_->srcStart = moduleFunctionNode_->pn_body->pn_pos.begin; asmJSMetadata_->strict = parser_.pc->sc()->strict() && !parser_.pc->sc()->hasExplicitUseStrict(); asmJSMetadata_->scriptSource.reset(parser_.ss); @@ -8652,21 +8648,17 @@ LookupAsmJSModuleInCache(JSContext* cx, Assumptions assumptions; if (!assumptions.initBuildIdFromContext(cx)) return false; if (!Module::assumptionsMatch(assumptions, cursor, remain)) return true; - auto tierMetadata = js::MakeUnique<MetadataTier>(Tier::Ion); - if (!tierMetadata) - return false; - - MutableAsmJSMetadata asmJSMetadata = cx->new_<AsmJSMetadata>(Move(tierMetadata)); + MutableAsmJSMetadata asmJSMetadata = cx->new_<AsmJSMetadata>(); if (!asmJSMetadata) return false; *module = Module::deserialize(/* bytecodeBegin = */ nullptr, /* bytecodeSize = */ 0, cursor, compiledSize, asmJSMetadata.get()); if (!*module) { ReportOutOfMemory(cx); return false;
--- a/js/src/wasm/WasmCode.cpp +++ b/js/src/wasm/WasmCode.cpp @@ -32,16 +32,17 @@ #include "jit/MacroAssembler-inl.h" using namespace js; using namespace js::jit; using namespace js::wasm; using mozilla::BinarySearch; using mozilla::MakeEnumeratedRange; +using mozilla::PodAssign; using JS::GenericNaN; bool CodeSegment::registerInProcessMap() { if (!RegisterCodeSegment(this)) return false; registered_ = true; @@ -84,16 +85,23 @@ CodeSegment::AllocateCodeBytes(uint32_t return nullptr; // We account for the bytes allocated in WasmModuleObject::create, where we // have the necessary JSContext. return UniqueCodeBytes((uint8_t*)p, FreeCode(codeLength)); } +const Code& +CodeSegment::code() const +{ + MOZ_ASSERT(codeTier_); + return codeTier_->code(); +} + void FreeCode::operator()(uint8_t* bytes) { MOZ_ASSERT(codeLength); MOZ_ASSERT(codeLength == RoundupCodeLength(codeLength)); #ifdef MOZ_VTUNE vtune::UnmarkBytes(bytes, codeLength); @@ -158,29 +166,30 @@ StaticallyUnlink(uint8_t* base, const Li static bool AppendToString(const char* str, UTF8Bytes* bytes) { return bytes->append(str, strlen(str)) && bytes->append('\0'); } #endif static void -SendCodeRangesToProfiler(const ModuleSegment& ms, const Bytes& bytecode, const Metadata& metadata) +SendCodeRangesToProfiler(const ModuleSegment& ms, const Bytes& bytecode, const Metadata& metadata, + const CodeRangeVector& codeRanges) { bool enabled = false; #ifdef JS_ION_PERF enabled |= PerfFuncEnabled(); #endif #ifdef MOZ_VTUNE enabled |= vtune::IsProfilingActive(); #endif if (!enabled) return; - for (const CodeRange& codeRange : metadata.metadata(ms.tier()).codeRanges) { + for (const CodeRange& codeRange : codeRanges) { if (!codeRange.hasFuncIndex()) continue; uintptr_t start = uintptr_t(ms.base() + codeRange.begin()); uintptr_t size = codeRange.end() - codeRange.begin(); UTF8Bytes name; if (!metadata.getFuncName(&bytecode, codeRange.funcIndex(), &name)) @@ -231,17 +240,18 @@ SendCodeRangesToProfiler(const ModuleSeg } } /* static */ UniqueModuleSegment ModuleSegment::create(Tier tier, MacroAssembler& masm, const ShareableBytes& bytecode, const LinkDataTier& linkData, - const Metadata& metadata) + const Metadata& metadata, + const CodeRangeVector& codeRanges) { // Round up the code size to page size since this is eventually required by // the executable-code allocator and for setting memory protection. uint32_t bytesNeeded = masm.bytesNeeded(); uint32_t padding = ComputeByteAlignment(bytesNeeded, gc::SystemPageSize()); uint32_t codeLength = bytesNeeded + padding; UniqueCodeBytes codeBytes = AllocateCodeBytes(codeLength); @@ -249,68 +259,71 @@ ModuleSegment::create(Tier tier, return nullptr; // We'll flush the icache after static linking, in initialize(). masm.executableCopy(codeBytes.get(), /* flushICache = */ false); // Zero the padding. memset(codeBytes.get() + bytesNeeded, 0, padding); - return create(tier, Move(codeBytes), codeLength, bytecode, linkData, metadata); + return create(tier, Move(codeBytes), codeLength, bytecode, linkData, metadata, codeRanges); } /* static */ UniqueModuleSegment ModuleSegment::create(Tier tier, const Bytes& unlinkedBytes, const ShareableBytes& bytecode, const LinkDataTier& linkData, - const Metadata& metadata) + const Metadata& metadata, + const CodeRangeVector& codeRanges) { // The unlinked bytes are a snapshot of the MacroAssembler's contents so // round up just like in the MacroAssembler overload above. uint32_t padding = ComputeByteAlignment(unlinkedBytes.length(), gc::SystemPageSize()); uint32_t codeLength = unlinkedBytes.length() + padding; UniqueCodeBytes codeBytes = AllocateCodeBytes(codeLength); if (!codeBytes) return nullptr; memcpy(codeBytes.get(), unlinkedBytes.begin(), unlinkedBytes.length()); memset(codeBytes.get() + unlinkedBytes.length(), 0, padding); - return create(tier, Move(codeBytes), codeLength, bytecode, linkData, metadata); + return create(tier, Move(codeBytes), codeLength, bytecode, linkData, metadata, codeRanges); } /* static */ UniqueModuleSegment ModuleSegment::create(Tier tier, UniqueCodeBytes codeBytes, uint32_t codeLength, const ShareableBytes& bytecode, const LinkDataTier& linkData, - const Metadata& metadata) + const Metadata& metadata, + const CodeRangeVector& codeRanges) { // These should always exist and should never be first in the code segment. auto ms = js::MakeUnique<ModuleSegment>(); if (!ms) return nullptr; - if (!ms->initialize(tier, Move(codeBytes), codeLength, bytecode, linkData, metadata)) + if (!ms->initialize(tier, Move(codeBytes), codeLength, bytecode, linkData, metadata, codeRanges)) return nullptr; return UniqueModuleSegment(ms.release()); } bool ModuleSegment::initialize(Tier tier, UniqueCodeBytes codeBytes, uint32_t codeLength, const ShareableBytes& bytecode, const LinkDataTier& linkData, - const Metadata& metadata) + const Metadata& metadata, + const CodeRangeVector& codeRanges) { MOZ_ASSERT(bytes_ == nullptr); MOZ_ASSERT(linkData.interruptOffset); MOZ_ASSERT(linkData.outOfBoundsOffset); MOZ_ASSERT(linkData.unalignedAccessOffset); MOZ_ASSERT(linkData.trapOffset); tier_ = tier; @@ -328,17 +341,17 @@ ModuleSegment::initialize(Tier tier, // Reprotect the whole region to avoid having separate RW and RX mappings. if (!ExecutableAllocator::makeExecutable(bytes_.get(), RoundupCodeLength(codeLength))) return false; if (!registerInProcessMap()) return false; - SendCodeRangesToProfiler(*this, bytecode.bytes, metadata); + SendCodeRangesToProfiler(*this, bytecode.bytes, metadata, codeRanges); return true; } size_t ModuleSegment::serializedSize() const { return sizeof(uint32_t) + length_; @@ -360,33 +373,34 @@ ModuleSegment::serialize(uint8_t* cursor uint8_t* base = cursor; cursor = WriteBytes(cursor, bytes_.get(), length_); StaticallyUnlink(base, linkData); return cursor; } const uint8_t* ModuleSegment::deserialize(const uint8_t* cursor, const ShareableBytes& bytecode, - const LinkDataTier& linkData, const Metadata& metadata) + const LinkDataTier& linkData, const Metadata& metadata, + const CodeRangeVector& codeRanges) { uint32_t length; cursor = ReadScalar<uint32_t>(cursor, &length); if (!cursor) return nullptr; MOZ_ASSERT(length_ % gc::SystemPageSize() == 0); UniqueCodeBytes bytes = AllocateCodeBytes(length); if (!bytes) return nullptr; cursor = ReadBytes(cursor, bytes.get(), length); if (!cursor) return nullptr; - if (!initialize(Tier::Serialized, Move(bytes), length, bytecode, linkData, metadata)) + if (!initialize(Tier::Serialized, Move(bytes), length, bytecode, linkData, metadata, codeRanges)) return nullptr; return cursor; } size_t FuncExport::serializedSize() const { @@ -535,134 +549,95 @@ MetadataTier::deserialize(const uint8_t* (cursor = trapSites.deserialize(cursor)) && (cursor = DeserializeVector(cursor, &funcImports)) && (cursor = DeserializeVector(cursor, &funcExports)); debugTrapFarJumpOffsets.clear(); debugFuncToCodeRange.clear(); return cursor; } -void -Metadata::commitTier2() const -{ - MOZ_RELEASE_ASSERT(metadata2_.get()); - MOZ_RELEASE_ASSERT(!hasTier2_); - hasTier2_ = true; -} - -void -Metadata::setTier2(UniqueMetadataTier metadata) const -{ - MOZ_RELEASE_ASSERT(metadata->tier == Tier::Ion && metadata1_->tier != Tier::Ion); - MOZ_RELEASE_ASSERT(!metadata2_.get()); - metadata2_ = Move(metadata); -} - -Tiers -Metadata::tiers() const -{ - if (hasTier2()) - return Tiers(metadata1_->tier, metadata2_->tier); - return Tiers(metadata1_->tier); -} - -const MetadataTier& -Metadata::metadata(Tier t) const +bool +MetadataTier::clone(const MetadataTier& src) { - switch (t) { - case Tier::Baseline: - if (metadata1_->tier == Tier::Baseline) - return *metadata1_; - MOZ_CRASH("No metadata at this tier"); - case Tier::Ion: - if (metadata1_->tier == Tier::Ion) - return *metadata1_; - if (hasTier2()) - return *metadata2_; - MOZ_CRASH("No metadata at this tier"); - default: - MOZ_CRASH(); - } -} + if (!memoryAccesses.appendAll(src.memoryAccesses)) + return false; + if (!codeRanges.appendAll(src.codeRanges)) + return false; + if (!callSites.appendAll(src.callSites)) + return false; + if (!debugTrapFarJumpOffsets.appendAll(src.debugTrapFarJumpOffsets)) + return false; + if (!debugFuncToCodeRange.appendAll(src.debugFuncToCodeRange)) + return false; -MetadataTier& -Metadata::metadata(Tier t) -{ - switch (t) { - case Tier::Baseline: - if (metadata1_->tier == Tier::Baseline) - return *metadata1_; - MOZ_CRASH("No metadata at this tier"); - case Tier::Ion: - if (metadata1_->tier == Tier::Ion) - return *metadata1_; - if (hasTier2()) - return *metadata2_; - MOZ_CRASH("No metadata at this tier"); - default: - MOZ_CRASH(); + for (Trap trap : MakeEnumeratedRange(Trap::Limit)) { + if (!trapSites[trap].appendAll(src.trapSites[trap])) + return false; } + + if (!funcImports.resize(src.funcImports.length())) + return false; + for (size_t i = 0; i < src.funcImports.length(); i++) + funcImports[i].clone(src.funcImports[i]); + + if (!funcExports.resize(src.funcExports.length())) + return false; + for (size_t i = 0; i < src.funcExports.length(); i++) + funcExports[i].clone(src.funcExports[i]); + + return true; } size_t Metadata::serializedSize() const { return sizeof(pod()) + - metadata(Tier::Serialized).serializedSize() + SerializedVectorSize(sigIds) + SerializedPodVectorSize(globals) + SerializedPodVectorSize(tables) + SerializedPodVectorSize(funcNames) + SerializedPodVectorSize(customSections) + filename.serializedSize() + baseURL.serializedSize() + sourceMapURL.serializedSize(); } size_t Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const { - size_t sum = 0; - - for (auto t : tiers()) - sum += metadata(t).sizeOfExcludingThis(mallocSizeOf); - - return sum + - SizeOfVectorExcludingThis(sigIds, mallocSizeOf) + + return SizeOfVectorExcludingThis(sigIds, mallocSizeOf) + globals.sizeOfExcludingThis(mallocSizeOf) + tables.sizeOfExcludingThis(mallocSizeOf) + funcNames.sizeOfExcludingThis(mallocSizeOf) + customSections.sizeOfExcludingThis(mallocSizeOf) + filename.sizeOfExcludingThis(mallocSizeOf) + baseURL.sizeOfExcludingThis(mallocSizeOf) + sourceMapURL.sizeOfExcludingThis(mallocSizeOf); } uint8_t* Metadata::serialize(uint8_t* cursor) const { MOZ_ASSERT(!debugEnabled && debugFuncArgTypes.empty() && debugFuncReturnTypes.empty()); cursor = WriteBytes(cursor, &pod(), sizeof(pod())); - cursor = metadata(Tier::Serialized).serialize(cursor); cursor = SerializeVector(cursor, sigIds); cursor = SerializePodVector(cursor, globals); cursor = SerializePodVector(cursor, tables); cursor = SerializePodVector(cursor, funcNames); cursor = SerializePodVector(cursor, customSections); cursor = filename.serialize(cursor); cursor = baseURL.serialize(cursor); cursor = sourceMapURL.serialize(cursor); return cursor; } /* static */ const uint8_t* Metadata::deserialize(const uint8_t* cursor) { (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) && - (cursor = metadata(Tier::Serialized).deserialize(cursor)) && (cursor = DeserializeVector(cursor, &sigIds)) && (cursor = DeserializePodVector(cursor, &globals)) && (cursor = DeserializePodVector(cursor, &tables)) && (cursor = DeserializePodVector(cursor, &funcNames)) && (cursor = DeserializePodVector(cursor, &customSections)) && (cursor = filename.deserialize(cursor)); (cursor = baseURL.deserialize(cursor)); (cursor = sourceMapURL.deserialize(cursor)); @@ -722,16 +697,60 @@ Metadata::getFuncName(const Bytes* maybe const char* funcIndexStr = NumberToCString(nullptr, &cbuf, funcIndex); MOZ_ASSERT(funcIndexStr); return name->append(beforeFuncIndex, strlen(beforeFuncIndex)) && name->append(funcIndexStr, strlen(funcIndexStr)) && name->append(afterFuncIndex, strlen(afterFuncIndex)); } +size_t +CodeTier::serializedSize() const +{ + return segment_->serializedSize() + + metadata_->serializedSize(); +} + +uint8_t* +CodeTier::serialize(uint8_t* cursor, const LinkDataTier& linkData) const +{ + cursor = metadata_->serialize(cursor); + cursor = segment_->serialize(cursor, linkData); + return cursor; +} + +const uint8_t* +CodeTier::deserialize(const uint8_t* cursor, const SharedBytes& bytecode, Metadata& metadata, + const LinkDataTier& linkData) +{ + metadata_ = js::MakeUnique<MetadataTier>(Tier::Serialized); + if (!metadata_) + return nullptr; + cursor = metadata_->deserialize(cursor); + if (!cursor) + return nullptr; + + auto segment = Move(js::MakeUnique<ModuleSegment>()); + if (!segment) + return nullptr; + cursor = segment->deserialize(cursor, *bytecode, linkData, metadata, metadata_->codeRanges); + if (!cursor) + return nullptr; + segment_ = takeOwnership(Move(segment)); + + return cursor; +} + +void +CodeTier::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, size_t* data) const +{ + segment_->addSizeOfMisc(mallocSizeOf, code, data); + *data += metadata_->sizeOfExcludingThis(mallocSizeOf); +} + bool JumpTables::init(CompileMode mode, const ModuleSegment& ms, const CodeRangeVector& codeRanges) { // Note a fast jit entry has two addresses, to be compatible with // ion/baseline functions which have the raw vs checked args entries, // both used all over the place in jit calls. This allows the fast entries // to be compatible with jit code pointer loading routines. // We can use the same entry for both kinds of jit entries since a wasm @@ -772,88 +791,96 @@ JumpTables::init(CompileMode mode, const if (cr.isFunction()) setTieringEntry(cr.funcIndex(), codeBase + cr.funcTierEntry()); else if (cr.isJitEntry()) setJitEntry(cr.funcIndex(), codeBase + cr.begin()); } return true; } -Code::Code(UniqueModuleSegment tier, const Metadata& metadata, JumpTables&& maybeJumpTables) - : metadata_(&metadata), +Code::Code(UniqueCodeTier codeTier, const Metadata& metadata, JumpTables&& maybeJumpTables) + : tier1_(takeOwnership(Move(codeTier))), + metadata_(&metadata), profilingLabels_(mutexid::WasmCodeProfilingLabels, CacheableCharsVector()), jumpTables_(Move(maybeJumpTables)) { - segment1_ = takeOwnership(Move(tier)); } Code::Code() : profilingLabels_(mutexid::WasmCodeProfilingLabels, CacheableCharsVector()) { } void -Code::setTier2(UniqueModuleSegment segment) const +Code::setTier2(UniqueCodeTier tier2) const { - MOZ_RELEASE_ASSERT(segment->tier() == Tier::Ion && segment1_->tier() != Tier::Ion); - MOZ_RELEASE_ASSERT(!segment2_.get()); - segment2_ = takeOwnership(Move(segment)); + MOZ_RELEASE_ASSERT(!hasTier2()); + MOZ_RELEASE_ASSERT(tier2->tier() == Tier::Ion && tier1_->tier() == Tier::Baseline); + tier2_ = takeOwnership(Move(tier2)); +} + +void +Code::commitTier2() const +{ + MOZ_RELEASE_ASSERT(!hasTier2()); + MOZ_RELEASE_ASSERT(tier2_.get()); + hasTier2_ = true; } uint32_t Code::getFuncIndex(JSFunction* fun) const { if (fun->isAsmJSNative()) return fun->asmJSFuncIndex(); return jumpTables_.funcIndexFromJitEntry(fun->wasmJitEntry()); } Tiers Code::tiers() const { if (hasTier2()) - return Tiers(segment1_->tier(), segment2_->tier()); - return Tiers(segment1_->tier()); + return Tiers(tier1_->tier(), tier2_->tier()); + return Tiers(tier1_->tier()); } bool Code::hasTier(Tier t) const { - if (hasTier2() && segment2_->tier() == t) + if (hasTier2() && tier2_->tier() == t) return true; - return segment1_->tier() == t; + return tier1_->tier() == t; } Tier Code::stableTier() const { - return segment1_->tier(); + return tier1_->tier(); } Tier Code::bestTier() const { if (hasTier2()) - return segment2_->tier(); - return segment1_->tier(); + return tier2_->tier(); + return tier1_->tier(); } -const ModuleSegment& -Code::segment(Tier tier) const +const CodeTier& +Code::codeTier(Tier tier) const { switch (tier) { case Tier::Baseline: - if (segment1_->tier() == Tier::Baseline) - return *segment1_; + if (tier1_->tier() == Tier::Baseline) + return *tier1_; MOZ_CRASH("No code segment at this tier"); case Tier::Ion: - if (segment1_->tier() == Tier::Ion) - return *segment1_; + if (tier1_->tier() == Tier::Ion) + return *tier1_; if (hasTier2()) - return *segment2_; + return *tier2_; MOZ_CRASH("No code segment at this tier"); default: MOZ_CRASH(); } } bool Code::containsCodePC(const void* pc) const @@ -1055,54 +1082,51 @@ Code::addSizeOfMiscIfNotSeen(MallocSizeO (void)ok; // oh well *data += mallocSizeOf(this) + metadata().sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenMetadata) + profilingLabels_.lock()->sizeOfExcludingThis(mallocSizeOf) + jumpTables_.sizeOfMiscIncludingThis(mallocSizeOf); for (auto t : tiers()) - segment(t).addSizeOfMisc(mallocSizeOf, code, data); + codeTier(t).addSizeOfMisc(mallocSizeOf, code, data); } size_t Code::serializedSize() const { return metadata().serializedSize() + - segment(Tier::Serialized).serializedSize(); + codeTier(Tier::Serialized).serializedSize(); } uint8_t* -Code::serialize(uint8_t* cursor, const LinkData& linkData) const +Code::serialize(uint8_t* cursor, const LinkDataTier& linkDataTier) const { MOZ_RELEASE_ASSERT(!metadata().debugEnabled); cursor = metadata().serialize(cursor); - cursor = segment(Tier::Serialized).serialize(cursor, linkData.linkData(Tier::Serialized)); + cursor = codeTier(Tier::Serialized).serialize(cursor, linkDataTier); return cursor; } const uint8_t* -Code::deserialize(const uint8_t* cursor, const SharedBytes& bytecode, const LinkData& linkData, - Metadata& metadata) +Code::deserialize(const uint8_t* cursor, const SharedBytes& bytecode, + const LinkDataTier& linkDataTier, Metadata& metadata) { cursor = metadata.deserialize(cursor); if (!cursor) return nullptr; - UniqueModuleSegment moduleSegment = js::MakeUnique<ModuleSegment>(); - if (!moduleSegment) + auto codeTier = js::MakeUnique<CodeTier>(Tier::Serialized); + if (!codeTier) return nullptr; - - cursor = moduleSegment->deserialize(cursor, *bytecode, linkData.linkData(Tier::Serialized), - metadata); + cursor = codeTier->deserialize(cursor, bytecode, metadata, linkDataTier); if (!cursor) return nullptr; - segment1_ = takeOwnership(Move(moduleSegment)); + tier1_ = takeOwnership(Move(codeTier)); metadata_ = &metadata; - if (!jumpTables_.init(CompileMode::Once, *segment1_, - metadata.metadata(Tier::Serialized).codeRanges)) + if (!jumpTables_.init(CompileMode::Once, tier1_->segment(), tier1_->metadata().codeRanges)) return nullptr; return cursor; }
--- a/js/src/wasm/WasmCode.h +++ b/js/src/wasm/WasmCode.h @@ -27,18 +27,17 @@ namespace js { struct AsmJSMetadata; class WasmInstanceObject; namespace wasm { struct LinkDataTier; struct MetadataTier; -class LinkData; -class Metadata; +struct Metadata; // ShareableBytes is a reference-counted Vector of bytes. struct ShareableBytes : ShareableBase<ShareableBytes> { // Vector is 'final', so instead make Vector a member and add boilerplate. Bytes bytes; ShareableBytes() = default; @@ -59,48 +58,50 @@ struct FreeCode { uint32_t codeLength; FreeCode() : codeLength(0) {} explicit FreeCode(uint32_t codeLength) : codeLength(codeLength) {} void operator()(uint8_t* codeBytes); }; using UniqueCodeBytes = UniquePtr<uint8_t, FreeCode>; +class Code; +class CodeTier; class ModuleSegment; // CodeSegment contains common helpers for determining the base and length of a // code segment and if a pc belongs to this segment. It is inherited by: // - ModuleSegment, i.e. the code segment of a Module, generated // eagerly when a Module is instanciated. // - LazyCodeSegment, i.e. the code segment of entry stubs that are lazily // generated. class CodeSegment { protected: static UniqueCodeBytes AllocateCodeBytes(uint32_t codeLength); // A back reference to the owning code. - const Code* code_; + const CodeTier* codeTier_; UniqueCodeBytes bytes_; uint32_t length_; enum class Kind { LazyStubs, Module } kind_; bool registerInProcessMap(); private: bool registered_; public: CodeSegment() - : code_(nullptr), + : codeTier_(nullptr), length_(0), kind_(Kind::Module), registered_(false) {} ~CodeSegment(); bool isLazyStubs() const { return kind_ == Kind::LazyStubs; } @@ -109,21 +110,21 @@ class CodeSegment uint8_t* base() const { return bytes_.get(); } uint32_t length() const { return length_; } bool containsCodePC(const void* pc) const { return pc >= base() && pc < (base() + length_); } - void initCode(const Code* code) { - MOZ_ASSERT(!code_); - code_ = code; + void initCodeTier(const CodeTier* codeTier) { + MOZ_ASSERT(!codeTier_); + codeTier_ = codeTier; } - const Code& code() const { MOZ_ASSERT(code_); return *code_; } + const Code& code() const; }; // A wasm ModuleSegment owns the allocated executable code for a wasm module. typedef UniquePtr<ModuleSegment> UniqueModuleSegment; typedef UniquePtr<const ModuleSegment> UniqueConstModuleSegment; class ModuleSegment : public CodeSegment @@ -137,24 +138,26 @@ class ModuleSegment : public CodeSegment uint8_t* unalignedAccessCode_; uint8_t* trapCode_; bool initialize(Tier tier, UniqueCodeBytes bytes, uint32_t codeLength, const ShareableBytes& bytecode, const LinkDataTier& linkData, - const Metadata& metadata); + const Metadata& metadata, + const CodeRangeVector& codeRanges); static UniqueModuleSegment create(Tier tier, UniqueCodeBytes bytes, uint32_t codeLength, const ShareableBytes& bytecode, const LinkDataTier& linkData, - const Metadata& metadata); + const Metadata& metadata, + const CodeRangeVector& codeRanges); public: ModuleSegment(const ModuleSegment&) = delete; void operator=(const ModuleSegment&) = delete; ModuleSegment() : CodeSegment(), tier_(Tier(-1)), interruptCode_(nullptr), @@ -162,37 +165,40 @@ class ModuleSegment : public CodeSegment unalignedAccessCode_(nullptr), trapCode_(nullptr) {} static UniqueModuleSegment create(Tier tier, jit::MacroAssembler& masm, const ShareableBytes& bytecode, const LinkDataTier& linkData, - const Metadata& metadata); + const Metadata& metadata, + const CodeRangeVector& codeRanges); static UniqueModuleSegment create(Tier tier, const Bytes& unlinkedBytes, const ShareableBytes& bytecode, const LinkDataTier& linkData, - const Metadata& metadata); + const Metadata& metadata, + const CodeRangeVector& codeRanges); Tier tier() const { return tier_; } uint8_t* interruptCode() const { return interruptCode_; } uint8_t* outOfBoundsCode() const { return outOfBoundsCode_; } uint8_t* unalignedAccessCode() const { return unalignedAccessCode_; } uint8_t* trapCode() const { return trapCode_; } // Structured clone support: size_t serializedSize() const; - uint8_t* serialize(uint8_t* cursor, const LinkDataTier& linkData) const; + uint8_t* serialize(uint8_t* cursor, const LinkDataTier& linkDataTier) const; const uint8_t* deserialize(const uint8_t* cursor, const ShareableBytes& bytecode, - const LinkDataTier& linkData, const Metadata& metadata); + const LinkDataTier& linkDataTier, const Metadata& metadata, + const CodeRangeVector& codeRanges); void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code, size_t* data) const; }; // A FuncExport represents a single function definition inside a wasm Module // that has been exported one or more times. A FuncExport represents an // internal entry point that can be called via function definition index by // Instance::callExport(). To allow O(log(n)) lookup of a FuncExport by @@ -236,16 +242,21 @@ class FuncExport MOZ_ASSERT(pod.codeRangeIndex_ != UINT32_MAX); return pod.codeRangeIndex_; } uint32_t interpEntryOffset() const { MOZ_ASSERT(pod.interpEntryOffset_ != UINT32_MAX); return pod.interpEntryOffset_; } + bool clone(const FuncExport& src) { + mozilla::PodAssign(&pod, &src.pod); + return sig_.clone(src.sig_); + } + WASM_DECLARE_SERIALIZABLE(FuncExport) }; typedef Vector<FuncExport, 0, SystemAllocPolicy> FuncExportVector; // An FuncImport contains the runtime metadata needed to implement a call to an // imported function. Each function import has two call stubs: an optimized path // into JIT code and a slow path into the generic C++ js::Invoke and these @@ -291,16 +302,21 @@ class FuncImport } uint32_t interpExitCodeOffset() const { return pod.interpExitCodeOffset_; } uint32_t jitExitCodeOffset() const { return pod.jitExitCodeOffset_; } + bool clone(const FuncImport& src) { + mozilla::PodAssign(&pod, &src.pod); + return sig_.clone(src.sig_); + } + WASM_DECLARE_SERIALIZABLE(FuncImport) }; typedef Vector<FuncImport, 0, SystemAllocPolicy> FuncImportVector; // A wasm module can either use no memory, a unshared memory (ArrayBuffer) or // shared memory (SharedArrayBuffer). @@ -375,89 +391,43 @@ struct MetadataCacheablePod memoryUsage(MemoryUsage::None), minMemoryLength(0), globalDataLength(0) {} }; typedef uint8_t ModuleHash[8]; -struct MetadataTier -{ - explicit MetadataTier(Tier tier) : tier(tier) {} - - const Tier tier; - - MemoryAccessVector memoryAccesses; - CodeRangeVector codeRanges; - CallSiteVector callSites; - TrapSiteVectorArray trapSites; - FuncImportVector funcImports; - FuncExportVector funcExports; - - // Debug information, not serialized. - Uint32Vector debugTrapFarJumpOffsets; - Uint32Vector debugFuncToCodeRange; - - FuncExport& lookupFuncExport(uint32_t funcIndex); - const FuncExport& lookupFuncExport(uint32_t funcIndex) const; - - WASM_DECLARE_SERIALIZABLE(MetadataTier); -}; - -typedef UniquePtr<MetadataTier> UniqueMetadataTier; - -class Metadata : public ShareableBase<Metadata>, public MetadataCacheablePod +struct Metadata : public ShareableBase<Metadata>, public MetadataCacheablePod { - protected: - UniqueMetadataTier metadata1_; - mutable UniqueMetadataTier metadata2_; // Access only when hasTier2() is true - mutable Atomic<bool> hasTier2_; - - public: - explicit Metadata(UniqueMetadataTier tier, ModuleKind kind = ModuleKind::Wasm) - : MetadataCacheablePod(kind), - metadata1_(Move(tier)), - debugEnabled(false), - debugHash() - {} - virtual ~Metadata() {} - - MetadataCacheablePod& pod() { return *this; } - const MetadataCacheablePod& pod() const { return *this; } - - void commitTier2() const; - bool hasTier2() const { return hasTier2_; } - void setTier2(UniqueMetadataTier metadata) const; - Tiers tiers() const; - - const MetadataTier& metadata(Tier t) const; - MetadataTier& metadata(Tier t); - - UniquePtr<MetadataTier> takeMetadata(Tier tier) { - MOZ_ASSERT(!hasTier2()); - MOZ_ASSERT(metadata1_->tier == tier); - return Move(metadata1_); - } - SigWithIdVector sigIds; GlobalDescVector globals; TableDescVector tables; NameInBytecodeVector funcNames; CustomSectionVector customSections; CacheableChars filename; CacheableChars baseURL; CacheableChars sourceMapURL; // Debug-enabled code is not serialized. bool debugEnabled; FuncArgTypesVector debugFuncArgTypes; FuncReturnTypesVector debugFuncReturnTypes; ModuleHash debugHash; + explicit Metadata(ModuleKind kind = ModuleKind::Wasm) + : MetadataCacheablePod(kind), + debugEnabled(false), + debugHash() + {} + virtual ~Metadata() {} + + MetadataCacheablePod& pod() { return *this; } + const MetadataCacheablePod& pod() const { return *this; } + bool usesMemory() const { return memoryUsage != MemoryUsage::None; } bool usesSharedMemory() const { return memoryUsage == MemoryUsage::Shared; } // AsmJSMetadata derives Metadata iff isAsmJS(). Mostly this distinction is // encapsulated within AsmJS.cpp, but the additional virtual functions allow // asm.js to override wasm behavior in the handful of cases that can't be // easily encapsulated by AsmJS.cpp. @@ -480,16 +450,93 @@ class Metadata : public ShareableBase<Me virtual bool getFuncName(const Bytes* maybeBytecode, uint32_t funcIndex, UTF8Bytes* name) const; WASM_DECLARE_SERIALIZABLE_VIRTUAL(Metadata); }; typedef RefPtr<Metadata> MutableMetadata; typedef RefPtr<const Metadata> SharedMetadata; +struct MetadataTier +{ + explicit MetadataTier(Tier tier) : tier(tier) {} + + const Tier tier; + + MemoryAccessVector memoryAccesses; + CodeRangeVector codeRanges; + CallSiteVector callSites; + TrapSiteVectorArray trapSites; + FuncImportVector funcImports; + FuncExportVector funcExports; + + // Debug information, not serialized. + Uint32Vector debugTrapFarJumpOffsets; + Uint32Vector debugFuncToCodeRange; + + FuncExport& lookupFuncExport(uint32_t funcIndex); + const FuncExport& lookupFuncExport(uint32_t funcIndex) const; + + bool clone(const MetadataTier& src); + + WASM_DECLARE_SERIALIZABLE(MetadataTier); +}; + +using UniqueMetadataTier = UniquePtr<MetadataTier>; + +// CodeTier contains all the data related to a given compilation tier. It is +// built during module generation and then immutably stored in a Code. + +class CodeTier +{ + const Tier tier_; + const Code* code_; + UniqueMetadataTier metadata_; + UniqueConstModuleSegment segment_; + + UniqueConstModuleSegment takeOwnership(UniqueModuleSegment segment) const { + segment->initCodeTier(this); + return UniqueConstModuleSegment(segment.release()); + } + + public: + explicit CodeTier(Tier tier) + : tier_(tier), + code_(nullptr), + metadata_(nullptr), + segment_(nullptr) + {} + + CodeTier(Tier tier, UniqueMetadataTier metadata, UniqueModuleSegment segment) + : tier_(tier), + code_(nullptr), + metadata_(Move(metadata)), + segment_(takeOwnership(Move(segment))) + {} + + void initCode(const Code* code) { + MOZ_ASSERT(!code_); + code_ = code; + } + + Tier tier() const { return tier_; } + const MetadataTier& metadata() const { return *metadata_.get(); } + const ModuleSegment& segment() const { return *segment_.get(); } + const Code& code() const { return *code_; } + + size_t serializedSize() const; + uint8_t* serialize(uint8_t* cursor, const LinkDataTier& linkData) const; + const uint8_t* deserialize(const uint8_t* cursor, const SharedBytes& bytecode, + Metadata& metadata, const LinkDataTier& linkData); + void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, size_t* data) const; +}; + +typedef UniquePtr<CodeTier> UniqueCodeTier; +typedef UniquePtr<const CodeTier> UniqueConstCodeTier; + // Jump tables to take tiering into account, when calling either from wasm to // wasm (through rabaldr) or from jit to wasm (jit entry). class JumpTables { using TablePointer = mozilla::UniquePtr<void*[], JS::FreePolicy>; CompileMode mode_; @@ -538,50 +585,59 @@ class JumpTables // Code objects own executable code and the metadata that describe it. A single // Code object is normally shared between a module and all its instances. // // profilingLabels_ is lazily initialized, but behind a lock. class Code : public ShareableBase<Code> { - UniqueConstModuleSegment segment1_; - mutable UniqueConstModuleSegment segment2_; // Access only when hasTier2() is true + UniqueConstCodeTier tier1_; + mutable UniqueConstCodeTier tier2_; // Access only when hasTier2() is true + mutable Atomic<bool> hasTier2_; SharedMetadata metadata_; ExclusiveData<CacheableCharsVector> profilingLabels_; JumpTables jumpTables_; - UniqueConstModuleSegment takeOwnership(UniqueModuleSegment segment) const { - segment->initCode(this); - return UniqueConstModuleSegment(segment.release()); + UniqueConstCodeTier takeOwnership(UniqueCodeTier codeTier) const { + codeTier->initCode(this); + return UniqueConstCodeTier(codeTier.release()); } public: Code(); - Code(UniqueModuleSegment tier, const Metadata& metadata, JumpTables&& maybeJumpTables); + Code(UniqueCodeTier tier, const Metadata& metadata, JumpTables&& maybeJumpTables); void setTieringEntry(size_t i, void* target) const { jumpTables_.setTieringEntry(i, target); } void** tieringJumpTable() const { return jumpTables_.tiering(); } void setJitEntry(size_t i, void* target) const { jumpTables_.setJitEntry(i, target); } void** getAddressOfJitEntry(size_t i) const { return jumpTables_.getAddressOfJitEntry(i); } uint32_t getFuncIndex(JSFunction* fun) const; - bool hasTier2() const { return metadata_->hasTier2(); } - void setTier2(UniqueModuleSegment segment) const; + void setTier2(UniqueCodeTier tier2) const; + void commitTier2() const; + + bool hasTier2() const { return hasTier2_; } Tiers tiers() const; bool hasTier(Tier t) const; Tier stableTier() const; // This is stable during a run Tier bestTier() const; // This may transition from Baseline -> Ion at any time - const ModuleSegment& segment(Tier tier) const; - const MetadataTier& metadata(Tier tier) const { return metadata_->metadata(tier); } + const CodeTier& codeTier(Tier tier) const; const Metadata& metadata() const { return *metadata_; } + const ModuleSegment& segment(Tier iter) const { + return codeTier(iter).segment(); + } + const MetadataTier& metadata(Tier iter) const { + return codeTier(iter).metadata(); + } + // Metadata lookup functions: const CallSite* lookupCallSite(void* returnAddress) const; const CodeRange* lookupRange(void* pc) const; const MemoryAccess* lookupMemoryAccess(void* pc) const; bool containsCodePC(const void* pc) const; bool lookupTrap(void* pc, Trap* trap, BytecodeOffset* bytecode) const; @@ -599,19 +655,19 @@ class Code : public ShareableBase<Code> size_t* code, size_t* data) const; // A Code object is serialized as the length and bytes of the machine code // after statically unlinking it; the Code is then later recreated from the // machine code and other parts. size_t serializedSize() const; - uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const; + uint8_t* serialize(uint8_t* cursor, const LinkDataTier& linkDataTier) const; const uint8_t* deserialize(const uint8_t* cursor, const SharedBytes& bytecode, - const LinkData& linkData, Metadata& metadata); + const LinkDataTier& linkDataTier, Metadata& metadata); }; typedef RefPtr<const Code> SharedCode; typedef RefPtr<Code> MutableCode; } // namespace wasm } // namespace js
--- a/js/src/wasm/WasmDebug.h +++ b/js/src/wasm/WasmDebug.h @@ -27,20 +27,17 @@ namespace js { class Debugger; class WasmBreakpoint; class WasmBreakpointSite; class WasmInstanceObject; namespace wasm { -struct LinkDataTier; struct MetadataTier; -class FrameIterator; -class LinkData; // The generated source location for the AST node/expression. The offset field refers // an offset in an binary format file. struct ExprLoc { uint32_t lineno; uint32_t column;
--- a/js/src/wasm/WasmGenerator.cpp +++ b/js/src/wasm/WasmGenerator.cpp @@ -154,27 +154,21 @@ ModuleGenerator::allocateGlobalBytes(uin return true; } bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata) { // Perform fallible metadata, linkdata, assumption allocations. + MOZ_ASSERT(isAsmJS() == !!maybeAsmJSMetadata); if (maybeAsmJSMetadata) { - MOZ_ASSERT(isAsmJS()); - metadataTier_ = &maybeAsmJSMetadata->metadata(tier()); metadata_ = maybeAsmJSMetadata; } else { - MOZ_ASSERT(!isAsmJS()); - auto metadataTier = js::MakeUnique<MetadataTier>(tier()); - if (!metadataTier) - return false; - metadataTier_ = metadataTier.get(); - metadata_ = js_new<Metadata>(Move(metadataTier)); + metadata_ = js_new<Metadata>(); if (!metadata_) return false; } if (compileArgs_->scriptedCaller.filename) { metadata_->filename = DuplicateString(compileArgs_->scriptedCaller.filename.get()); if (!metadata_->filename) return false; @@ -187,20 +181,23 @@ ModuleGenerator::init(Metadata* maybeAsm } if (compileArgs_->responseURLs.sourceMapURL) { metadata_->sourceMapURL = DuplicateString(compileArgs_->responseURLs.sourceMapURL.get()); if (!metadata_->sourceMapURL) return false; } - if (!linkData_.initTier1(tier(), *metadata_)) + linkDataTier_ = js::MakeUnique<LinkDataTier>(tier()); + if (!linkDataTier_) return false; - linkDataTier_ = &linkData_.linkData(tier()); + metadataTier_ = js::MakeUnique<MetadataTier>(tier()); + if (!metadataTier_) + return false; if (!assumptions_.clone(compileArgs_->assumptions)) return false; // The funcToCodeRange_ maps function indices to code-range indices and all // elements will be initialized by the time module generation is finished. if (!funcToCodeRange_.appendN(BAD_CODE_RANGE, env_->funcSigs.length())) @@ -967,17 +964,18 @@ ModuleGenerator::finish(const ShareableB // All functions and stubs have been compiled, finish linking and metadata. if (!finishCode()) return nullptr; if (!finishMetadata(bytecode)) return nullptr; - return ModuleSegment::create(tier(), masm_, bytecode, *linkDataTier_, *metadata_); + return ModuleSegment::create(tier(), masm_, bytecode, *linkDataTier_, *metadata_, + metadataTier_->codeRanges); } SharedModule ModuleGenerator::finishModule(const ShareableBytes& bytecode) { MOZ_ASSERT(mode() == CompileMode::Once || mode() == CompileMode::Tier1); UniqueModuleSegment moduleSegment = finish(bytecode); @@ -995,24 +993,28 @@ ModuleGenerator::finishModule(const Shar if (!bytes.resize(masm_.bytesNeeded())) return nullptr; masm_.executableCopy(bytes.begin(), /* flushICache = */ false); maybeDebuggingBytes = js::MakeUnique<Bytes>(Move(bytes)); if (!maybeDebuggingBytes) return nullptr; } - SharedCode code = js_new<Code>(Move(moduleSegment), *metadata_, Move(jumpTables)); + auto codeTier = js::MakeUnique<CodeTier>(tier(), Move(metadataTier_), Move(moduleSegment)); + if (!codeTier) + return nullptr; + + SharedCode code = js_new<Code>(Move(codeTier), *metadata_, Move(jumpTables)); if (!code) return nullptr; SharedModule module(js_new<Module>(Move(assumptions_), *code, Move(maybeDebuggingBytes), - Move(linkData_), + LinkData(Move(linkDataTier_)), Move(env_->imports), Move(env_->exports), Move(env_->dataSegments), Move(env_->elemSegments), bytecode)); if (!module) return nullptr; @@ -1031,14 +1033,15 @@ ModuleGenerator::finishTier2(Module& mod if (cancelled_ && *cancelled_) return false; UniqueModuleSegment moduleSegment = finish(module.bytecode()); if (!moduleSegment) return false; - module.finishTier2(linkData_.takeLinkData(tier()), - metadata_->takeMetadata(tier()), - Move(moduleSegment), - env_); + auto tier2 = js::MakeUnique<CodeTier>(tier(), Move(metadataTier_), Move(moduleSegment)); + if (!tier2) + return false; + + module.finishTier2(Move(linkDataTier_), Move(tier2), env_); return true; }
--- a/js/src/wasm/WasmGenerator.h +++ b/js/src/wasm/WasmGenerator.h @@ -154,19 +154,18 @@ class MOZ_STACK_CLASS ModuleGenerator // Constant parameters SharedCompileArgs const compileArgs_; UniqueChars* const error_; const Atomic<bool>* const cancelled_; ModuleEnvironment* const env_; // Data that is moved into the result of finish() Assumptions assumptions_; - LinkDataTier* linkDataTier_; // Owned by linkData_ - LinkData linkData_; - MetadataTier* metadataTier_; // Owned by metadata_ + UniqueLinkDataTier linkDataTier_; + UniqueMetadataTier metadataTier_; MutableMetadata metadata_; // Data scoped to the ModuleGenerator's lifetime ExclusiveCompileTaskState taskState_; LifoAlloc lifo_; jit::JitContext jcx_; jit::TempAllocator masmAlloc_; jit::MacroAssembler masm_;
--- a/js/src/wasm/WasmInstance.cpp +++ b/js/src/wasm/WasmInstance.cpp @@ -393,17 +393,17 @@ Instance::Instance(JSContext* cx, code_(code), debug_(Move(debug)), tlsData_(Move(tlsDataIn)), memory_(memory), tables_(Move(tables)), enterFrameTrapsEnabled_(false) { #ifdef DEBUG - for (auto t : metadata().tiers()) + for (auto t : code_->tiers()) MOZ_ASSERT(funcImports.length() == metadata(t).funcImports.length()); #endif MOZ_ASSERT(tables_.length() == metadata().tables.length()); tlsData()->memoryBase = memory ? memory->buffer().dataPointerEither().unwrap() : nullptr; #ifndef WASM_HUGE_MEMORY tlsData()->boundsCheckLimit = memory ? memory->buffer().wasmBoundsCheckLimit() : 0; #endif
--- a/js/src/wasm/WasmModule.cpp +++ b/js/src/wasm/WasmModule.cpp @@ -110,102 +110,71 @@ LinkDataTier::sizeOfExcludingThis(Malloc { return internalLinks.sizeOfExcludingThis(mallocSizeOf) + symbolicLinks.sizeOfExcludingThis(mallocSizeOf); } void LinkData::setTier2(UniqueLinkDataTier linkData) const { - MOZ_RELEASE_ASSERT(linkData->tier == Tier::Ion && linkData1_->tier != Tier::Ion); + MOZ_RELEASE_ASSERT(linkData->tier == Tier::Ion && linkData1_->tier == Tier::Baseline); MOZ_RELEASE_ASSERT(!linkData2_.get()); linkData2_ = Move(linkData); } -Tiers -LinkData::tiers() const -{ - if (hasTier2()) - return Tiers(linkData1_->tier, linkData2_->tier); - return Tiers(linkData1_->tier); -} - const LinkDataTier& LinkData::linkData(Tier tier) const { switch (tier) { case Tier::Baseline: if (linkData1_->tier == Tier::Baseline) return *linkData1_; MOZ_CRASH("No linkData at this tier"); case Tier::Ion: if (linkData1_->tier == Tier::Ion) return *linkData1_; - if (hasTier2()) + if (linkData2_) return *linkData2_; MOZ_CRASH("No linkData at this tier"); default: MOZ_CRASH(); } } -LinkDataTier& -LinkData::linkData(Tier tier) -{ - switch (tier) { - case Tier::Baseline: - if (linkData1_->tier == Tier::Baseline) - return *linkData1_; - MOZ_CRASH("No linkData at this tier"); - case Tier::Ion: - if (linkData1_->tier == Tier::Ion) - return *linkData1_; - if (hasTier2()) - return *linkData2_; - MOZ_CRASH("No linkData at this tier"); - default: - MOZ_CRASH(); - } -} - -bool -LinkData::initTier1(Tier tier, const Metadata& metadata) -{ - MOZ_ASSERT(!linkData1_); - metadata_ = &metadata; - linkData1_ = js::MakeUnique<LinkDataTier>(tier); - return linkData1_ != nullptr; -} - size_t LinkData::serializedSize() const { return linkData(Tier::Serialized).serializedSize(); } uint8_t* LinkData::serialize(uint8_t* cursor) const { cursor = linkData(Tier::Serialized).serialize(cursor); return cursor; } const uint8_t* LinkData::deserialize(const uint8_t* cursor) { - (cursor = linkData(Tier::Serialized).deserialize(cursor)); + MOZ_ASSERT(!linkData1_); + linkData1_ = js::MakeUnique<LinkDataTier>(Tier::Serialized); + if (!linkData1_) + return nullptr; + cursor = linkData1_->deserialize(cursor); return cursor; } size_t LinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const { size_t sum = 0; - for (auto t : tiers()) - sum += linkData(t).sizeOfExcludingThis(mallocSizeOf); + sum += linkData1_->sizeOfExcludingThis(mallocSizeOf); + if (linkData2_) + sum += linkData2_->sizeOfExcludingThis(mallocSizeOf); return sum; } class Module::Tier2GeneratorTaskImpl : public Tier2GeneratorTask { SharedModule module_; SharedCompileArgs compileArgs_; Atomic<bool> cancelled_; @@ -272,31 +241,30 @@ Module::notifyCompilationListeners() tiering.notify_all(/* inactive */); } for (RefPtr<JS::WasmModuleListener>& listener : listeners) listener->onCompilationComplete(); } void -Module::finishTier2(UniqueLinkDataTier linkData2, UniqueMetadataTier metadata2, - UniqueModuleSegment code2, ModuleEnvironment* env2) +Module::finishTier2(UniqueLinkDataTier linkData2, UniqueCodeTier tier2, ModuleEnvironment* env2) { // Install the data in the data structures. They will not be visible yet. - metadata().setTier2(Move(metadata2)); + MOZ_ASSERT(!code().hasTier2()); linkData().setTier2(Move(linkData2)); - code().setTier2(Move(code2)); + code().setTier2(Move(tier2)); for (uint32_t i = 0; i < elemSegments_.length(); i++) elemSegments_[i].setTier2(Move(env2->elemSegments[i].elemCodeRangeIndices(Tier::Ion))); // Now that all the code and metadata is valid, make tier 2 code visible and // unblock anyone waiting on it. - metadata().commitTier2(); + code().commitTier2(); notifyCompilationListeners(); // And we update the jump vector. uint8_t* base = code().segment(Tier::Ion).base(); for (auto cr : metadata(Tier::Ion).codeRanges) { // These are racy writes that we just want to be visible, atomically, // eventually. All hardware we care about will do this right. But @@ -405,17 +373,17 @@ Module::compiledSerialize(uint8_t* compi uint8_t* cursor = compiledBegin; cursor = assumptions_.serialize(cursor); cursor = linkData_.serialize(cursor); cursor = SerializeVector(cursor, imports_); cursor = SerializeVector(cursor, exports_); cursor = SerializePodVector(cursor, dataSegments_); cursor = SerializeVector(cursor, elemSegments_); - cursor = code_->serialize(cursor, linkData_); + cursor = code_->serialize(cursor, linkData_.linkData(Tier::Serialized)); MOZ_RELEASE_ASSERT(cursor == compiledBegin + compiledSize); } /* static */ bool Module::assumptionsMatch(const Assumptions& current, const uint8_t* compiledBegin, size_t remain) { Assumptions cached; if (!cached.deserialize(compiledBegin, remain)) @@ -438,29 +406,22 @@ Module::deserialize(const uint8_t* bytec Assumptions assumptions; const uint8_t* cursor = assumptions.deserialize(compiledBegin, compiledSize); if (!cursor) return nullptr; MutableMetadata metadata(maybeMetadata); if (!metadata) { - auto tierMetadata = js::MakeUnique<MetadataTier>(Tier::Ion); - if (!tierMetadata) - return nullptr; - - metadata = js_new<Metadata>(Move(tierMetadata)); + metadata = js_new<Metadata>(); if (!metadata) return nullptr; } LinkData linkData; - if (!linkData.initTier1(Tier::Serialized, *metadata)) - return nullptr; - cursor = linkData.deserialize(cursor); if (!cursor) return nullptr; ImportVector imports; cursor = DeserializeVector(cursor, &imports); if (!cursor) return nullptr; @@ -476,17 +437,17 @@ Module::deserialize(const uint8_t* bytec return nullptr; ElemSegmentVector elemSegments; cursor = DeserializeVector(cursor, &elemSegments); if (!cursor) return nullptr; MutableCode code = js_new<Code>(); - cursor = code->deserialize(cursor, bytecode, linkData, *metadata); + cursor = code->deserialize(cursor, bytecode, linkData.linkData(Tier::Serialized), *metadata); if (!cursor) return nullptr; MOZ_RELEASE_ASSERT(cursor == compiledBegin + compiledSize); MOZ_RELEASE_ASSERT(!!maybeMetadata == code->metadata().isAsmJS()); return js_new<Module>(Move(assumptions), *code, @@ -566,17 +527,17 @@ wasm::DeserializeModule(PRFileDesc* byte return nullptr; return Module::deserialize(bytecodeMapping.get(), bytecodeInfo.size, compiledMapping.get(), compiledInfo.size); } // Since the compiled file's assumptions don't match, we must recompile from // bytecode. The bytecode file format is simply that of a .wasm (see - // Module::serialize). + // Module::bytecodeSerialize). MutableBytes bytecode = js_new<ShareableBytes>(); if (!bytecode || !bytecode->bytes.initLengthUninitialized(bytecodeInfo.size)) return nullptr; memcpy(bytecode->bytes.begin(), bytecodeMapping.get(), bytecodeInfo.size); ScriptedCaller scriptedCaller; @@ -1161,31 +1122,41 @@ Module::instantiate(JSContext* cx, SharedCode code(code_); if (metadata().debugEnabled) { // The first time through, use the pre-linked code in the module but // mark it as busy. Subsequently, instantiate the copy of the code // bytes that we keep around for debugging instead, because the debugger // may patch the pre-linked code at any time. if (!codeIsBusy_.compareExchange(false, true)) { - auto moduleSegment = ModuleSegment::create(Tier::Baseline, - *unlinkedCodeForDebugging_, - *bytecode_, - linkData_.linkData(Tier::Baseline), - metadata()); - if (!moduleSegment) { + Tier tier = Tier::Baseline; + auto segment = ModuleSegment::create(tier, + *unlinkedCodeForDebugging_, + *bytecode_, + linkData(tier), + metadata(), + metadata(tier).codeRanges); + if (!segment) { ReportOutOfMemory(cx); return false; } - JumpTables jumpTables; - if (!jumpTables.init(CompileMode::Once, *moduleSegment, metadata(Tier::Baseline).codeRanges)) + UniqueMetadataTier metadataTier = js::MakeUnique<MetadataTier>(tier); + if (!metadataTier || !metadataTier->clone(metadata(tier))) return false; - code = js_new<Code>(Move(moduleSegment), metadata(), Move(jumpTables)); + auto codeTier = js::MakeUnique<CodeTier>(tier, Move(metadataTier), Move(segment)); + if (!codeTier) + return false; + + JumpTables jumpTables; + if (!jumpTables.init(CompileMode::Once, moduleSegment(tier), metadata(tier).codeRanges)) + return false; + + code = js_new<Code>(Move(codeTier), metadata(), Move(jumpTables)); if (!code) { ReportOutOfMemory(cx); return false; } } } // To support viewing the source of an instance (Instance::createText), the
--- a/js/src/wasm/WasmModule.h +++ b/js/src/wasm/WasmModule.h @@ -74,35 +74,25 @@ struct LinkDataTier : LinkDataTierCachea WASM_DECLARE_SERIALIZABLE(LinkData) }; typedef UniquePtr<LinkDataTier> UniqueLinkDataTier; class LinkData { - SharedMetadata metadata_; - UniqueLinkDataTier linkData1_; // Always present - mutable UniqueLinkDataTier linkData2_; // Access only if hasTier2() is true + UniqueLinkDataTier linkData1_; // Always present + mutable UniqueLinkDataTier linkData2_; // Access only if hasTier2() is true public: - bool initTier1(Tier tier, const Metadata& metadata); + LinkData() {} + explicit LinkData(UniqueLinkDataTier linkData) : linkData1_(Move(linkData)) {} - bool hasTier2() const { return metadata_->hasTier2(); } void setTier2(UniqueLinkDataTier linkData) const; - Tiers tiers() const; - const LinkDataTier& linkData(Tier tier) const; - LinkDataTier& linkData(Tier tier); - - UniquePtr<LinkDataTier> takeLinkData(Tier tier) { - MOZ_ASSERT(!hasTier2()); - MOZ_ASSERT(linkData1_->tier == tier); - return Move(linkData1_); - } WASM_DECLARE_SERIALIZABLE(LinkData) }; // Contains the locked tiering state of a Module: whether there is an active // background tier-2 compilation in progress and, if so, the list of listeners // waiting for the tier-2 compilation to complete. @@ -215,18 +205,17 @@ class Module : public JS::WasmModule // Tier-2 compilation may be initiated after the Module is constructed at // most once, ideally before any client can attempt to serialize the Module. // When tier-2 compilation completes, ModuleGenerator calls finishTier2() // from a helper thread, passing tier-variant data which will be installed // and made visible. void startTier2(const CompileArgs& args); - void finishTier2(UniqueLinkDataTier linkData2, UniqueMetadataTier metadata2, - UniqueModuleSegment code2, ModuleEnvironment* env2); + void finishTier2(UniqueLinkDataTier linkData2, UniqueCodeTier tier2, ModuleEnvironment* env2); void blockOnTier2Complete() const; // JS API and JS::WasmModule implementation: size_t bytecodeSerializedSize() const override; void bytecodeSerialize(uint8_t* bytecodeBegin, size_t bytecodeSize) const override; bool compilationComplete() const override; bool notifyWhenCompilationComplete(JS::WasmModuleListener* listener) override;