Bug 1313180 - Baldr: move around a few things (r=bbouvier)
authorLuke Wagner <luke@mozilla.com>
Fri, 04 Nov 2016 17:05:56 -0500
changeset 347926 700fccbb5044182632fea0a2b0cb7500eaae7097
parent 347925 c3b3c5505c198e1ef743eeb1ce2d6e27d4ad9c89
child 347927 a938e94dc04db48f56a39f4a798977d015a5050e
push id10298
push userraliiev@mozilla.com
push dateMon, 14 Nov 2016 12:33:03 +0000
treeherdermozilla-aurora@7e29173b1641 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1313180
milestone52.0a1
Bug 1313180 - Baldr: move around a few things (r=bbouvier) MozReview-Commit-ID: CYcuLOVCjcC
js/src/jit/CodeGenerator.cpp
js/src/jit/Linker.cpp
js/src/jit/shared/Assembler-shared.h
js/src/wasm/WasmGenerator.cpp
js/src/wasm/WasmGenerator.h
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -163,17 +163,17 @@ CodeGenerator::CodeGenerator(MIRGenerato
   , ionScriptLabels_(gen->alloc())
   , scriptCounts_(nullptr)
   , simdRefreshTemplatesDuringLink_(0)
 {
 }
 
 CodeGenerator::~CodeGenerator()
 {
-    MOZ_ASSERT_IF(!gen->compilingWasm(), masm.numWasmSymbolicAccesses() == 0);
+    MOZ_ASSERT_IF(!gen->compilingWasm(), masm.numSymbolicAccesses() == 0);
     js_delete(scriptCounts_);
 }
 
 typedef bool (*StringToNumberFn)(ExclusiveContext*, JSString*, double*);
 static const VMFunction StringToNumberInfo =
     FunctionInfo<StringToNumberFn>(StringToNumber, "StringToNumber");
 
 void
--- a/js/src/jit/Linker.cpp
+++ b/js/src/jit/Linker.cpp
@@ -10,17 +10,17 @@
 
 namespace js {
 namespace jit {
 
 template <AllowGC allowGC>
 JitCode*
 Linker::newCode(JSContext* cx, CodeKind kind, bool hasPatchableBackedges /* = false */)
 {
-    MOZ_ASSERT(masm.numWasmSymbolicAccesses() == 0);
+    MOZ_ASSERT(masm.numSymbolicAccesses() == 0);
     MOZ_ASSERT_IF(hasPatchableBackedges, kind == ION_CODE);
 
     gc::AutoSuppressGC suppressGC(cx);
     if (masm.oom())
         return fail(cx);
 
     ExecutablePool* pool;
     size_t bytesNeeded = masm.bytesNeeded() + sizeof(JitCode*) + CodeAlignment;
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -665,16 +665,17 @@ class CodeLocationLabel
 
 namespace wasm {
 
 // As an invariant across architectures, within wasm code:
 //   $sp % WasmStackAlignment = (sizeof(wasm::Frame) + masm.framePushed) % WasmStackAlignment
 // Thus, wasm::Frame represents the bytes pushed after the call (which occurred
 // with a WasmStackAlignment-aligned StackPointer) that are not included in
 // masm.framePushed.
+
 struct Frame
 {
     // The caller's saved frame pointer. In non-profiling mode, internal
     // wasm-to-wasm calls don't update fp and thus don't save the caller's
     // frame pointer; the space is reserved, however, so that profiling mode can
     // reuse the same function body without recompiling.
     uint8_t* callerFP;
 
@@ -684,25 +685,31 @@ struct Frame
 };
 
 static_assert(sizeof(Frame) == 2 * sizeof(void*), "?!");
 static const uint32_t FrameBytesAfterReturnAddress = sizeof(void*);
 
 // Represents an instruction to be patched and the intended pointee. These
 // links are accumulated in the MacroAssembler, but patching is done outside
 // the MacroAssembler (in Module::staticallyLink).
+
 struct SymbolicAccess
 {
     SymbolicAccess(jit::CodeOffset patchAt, SymbolicAddress target)
       : patchAt(patchAt), target(target) {}
 
     jit::CodeOffset patchAt;
     SymbolicAddress target;
 };
 
+typedef Vector<SymbolicAccess, 0, SystemAllocPolicy> SymbolicAccessVector;
+
+// Describes a single wasm or asm.js memory access for the purpose of generating
+// code and metadata.
+
 class MemoryAccessDesc
 {
     uint32_t offset_;
     uint32_t align_;
     Scalar::Type type_;
     unsigned numSimdElems_;
     jit::MemoryBarrierBits barrierBefore_;
     jit::MemoryBarrierBits barrierAfter_;
@@ -825,17 +832,17 @@ class AssemblerShared
 {
     wasm::CallSiteAndTargetVector callSites_;
     wasm::TrapSiteVector trapSites_;
     wasm::TrapFarJumpVector trapFarJumps_;
     wasm::MemoryAccessVector memoryAccesses_;
     wasm::MemoryPatchVector memoryPatches_;
     wasm::BoundsCheckVector boundsChecks_;
     wasm::GlobalAccessVector globalAccesses_;
-    Vector<wasm::SymbolicAccess, 0, SystemAllocPolicy> wasmSymbolicAccesses_;
+    wasm::SymbolicAccessVector symbolicAccesses_;
 
   protected:
     Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
 
     bool enoughMemory_;
     bool embedsNurseryPointers_;
 
   public:
@@ -908,19 +915,19 @@ class AssemblerShared
     wasm::MemoryPatchVector&& extractMemoryPatches() { return Move(memoryPatches_); }
 
     void append(wasm::BoundsCheck check) { enoughMemory_ &= boundsChecks_.append(check); }
     wasm::BoundsCheckVector&& extractBoundsChecks() { return Move(boundsChecks_); }
 
     void append(wasm::GlobalAccess access) { enoughMemory_ &= globalAccesses_.append(access); }
     const wasm::GlobalAccessVector& globalAccesses() const { return globalAccesses_; }
 
-    void append(wasm::SymbolicAccess link) { enoughMemory_ &= wasmSymbolicAccesses_.append(link); }
-    size_t numWasmSymbolicAccesses() const { return wasmSymbolicAccesses_.length(); }
-    wasm::SymbolicAccess wasmSymbolicAccess(size_t i) const { return wasmSymbolicAccesses_[i]; }
+    void append(wasm::SymbolicAccess access) { enoughMemory_ &= symbolicAccesses_.append(access); }
+    size_t numSymbolicAccesses() const { return symbolicAccesses_.length(); }
+    wasm::SymbolicAccess symbolicAccess(size_t i) const { return symbolicAccesses_[i]; }
 
     static bool canUseInSingleByteInstruction(Register reg) { return true; }
 
     void addCodeLabel(CodeLabel label) {
         propagateOOM(codeLabels_.append(label));
     }
     size_t numCodeLabels() const {
         return codeLabels_.length();
@@ -959,20 +966,20 @@ class AssemblerShared
         for (; i < boundsChecks_.length(); i++)
             boundsChecks_[i].offsetBy(delta);
 
         i = globalAccesses_.length();
         enoughMemory_ &= globalAccesses_.appendAll(other.globalAccesses_);
         for (; i < globalAccesses_.length(); i++)
             globalAccesses_[i].patchAt.offsetBy(delta);
 
-        i = wasmSymbolicAccesses_.length();
-        enoughMemory_ &= wasmSymbolicAccesses_.appendAll(other.wasmSymbolicAccesses_);
-        for (; i < wasmSymbolicAccesses_.length(); i++)
-            wasmSymbolicAccesses_[i].patchAt.offsetBy(delta);
+        i = symbolicAccesses_.length();
+        enoughMemory_ &= symbolicAccesses_.appendAll(other.symbolicAccesses_);
+        for (; i < symbolicAccesses_.length(); i++)
+            symbolicAccesses_[i].patchAt.offsetBy(delta);
 
         i = codeLabels_.length();
         enoughMemory_ &= codeLabels_.appendAll(other.codeLabels_);
         for (; i < codeLabels_.length(); i++)
             codeLabels_[i].offsetBy(delta);
 
         return !oom();
     }
--- a/js/src/wasm/WasmGenerator.cpp
+++ b/js/src/wasm/WasmGenerator.cpp
@@ -385,16 +385,30 @@ ModuleGenerator::finishTask(IonCompileTa
 
     freeTasks_.infallibleAppend(task);
     return true;
 }
 
 bool
 ModuleGenerator::finishFuncExports()
 {
+    // In addition to all the functions that were explicitly exported, any
+    // element of an exported table is also exported.
+
+    for (ElemSegment& elems : elemSegments_) {
+        if (shared_->tables[elems.tableIndex].external) {
+            for (uint32_t funcIndex : elems.elemFuncIndices) {
+                if (funcIsImport(funcIndex))
+                    continue;
+                if (!exportedFuncs_.put(funcIndex))
+                    return false;
+            }
+        }
+    }
+
     // ModuleGenerator::exportedFuncs_ is an unordered HashSet. The
     // FuncExportVector stored in Metadata needs to be stored sorted by
     // function index to allow O(log(n)) lookup at runtime.
 
     Uint32Vector sorted;
     if (!sorted.reserve(exportedFuncs_.count()))
         return false;
 
@@ -550,18 +564,18 @@ ModuleGenerator::finishCodegen()
 bool
 ModuleGenerator::finishLinkData(Bytes& code)
 {
     // Inflate the global bytes up to page size so that the total bytes are a
     // page size (as required by the allocator functions).
     linkData_.globalDataLength = AlignBytes(linkData_.globalDataLength, gc::SystemPageSize());
 
     // Add links to absolute addresses identified symbolically.
-    for (size_t i = 0; i < masm_.numWasmSymbolicAccesses(); i++) {
-        SymbolicAccess src = masm_.wasmSymbolicAccess(i);
+    for (size_t i = 0; i < masm_.numSymbolicAccesses(); i++) {
+        SymbolicAccess src = masm_.symbolicAccess(i);
         if (!linkData_.symbolicLinks[src.target].append(src.patchAt.offset()))
             return false;
     }
 
     // Relative link metadata: absolute addresses that refer to another point within
     // the asm.js module.
 
     // CodeLabels are used for switch cases and loads from floating-point /
@@ -848,33 +862,16 @@ ModuleGenerator::setDataSegments(DataSeg
 }
 
 bool
 ModuleGenerator::startFuncDefs()
 {
     MOZ_ASSERT(!startedFuncDefs_);
     MOZ_ASSERT(!finishedFuncDefs_);
 
-    // Now that it is known whether tables are internal or external, mark the
-    // elements of any external table as exported since they may be called from
-    // outside the module.
-
-    for (ElemSegment& elems : elemSegments_) {
-        if (!shared_->tables[elems.tableIndex].external)
-            continue;
-
-        for (uint32_t funcIndex : elems.elemFuncIndices) {
-            if (funcIsImport(funcIndex))
-                continue;
-
-            if (!exportedFuncs_.put(funcIndex))
-                return false;
-        }
-    }
-
     // The wasmCompilationInProgress atomic ensures that there is only one
     // parallel compilation in progress at a time. In the special case of
     // asm.js, where the ModuleGenerator itself can be on a helper thread, this
     // avoids the possibility of deadlock since at most 1 helper thread will be
     // blocking on other helper threads and there are always >1 helper threads.
     // With wasm, this restriction could be relaxed by moving the worklist state
     // out of HelperThreadState since each independent compilation needs its own
     // worklist pair. Alternatively, the deadlock could be avoided by having the
@@ -1077,21 +1074,16 @@ ModuleGenerator::initSigTableElems(uint3
 }
 
 SharedModule
 ModuleGenerator::finish(const ShareableBytes& bytecode)
 {
     MOZ_ASSERT(!activeFuncDef_);
     MOZ_ASSERT(finishedFuncDefs_);
 
-    // Now that all asm.js tables have been created and the compiler threads are
-    // done, shrink the (no longer shared) tables vector down to size.
-    if (isAsmJS() && !shared_->tables.resize(numTables_))
-        return nullptr;
-
     if (!finishFuncExports())
         return nullptr;
 
     if (!finishCodegen())
         return nullptr;
 
     // Round up the code size to page size since this is eventually required by
     // the executable-code allocator and for setting memory protection.
@@ -1135,16 +1127,21 @@ ModuleGenerator::finish(const ShareableB
     // so realloc them down to size.
     metadata_->memoryAccesses.podResizeToFit();
     metadata_->memoryPatches.podResizeToFit();
     metadata_->boundsChecks.podResizeToFit();
     metadata_->codeRanges.podResizeToFit();
     metadata_->callSites.podResizeToFit();
     metadata_->callThunks.podResizeToFit();
 
+    // For asm.js, the tables vector is over-allocated (to avoid resize during
+    // parallel copilation). Shrink it back down to fit.
+    if (isAsmJS() && !metadata_->tables.resize(numTables_))
+        return nullptr;
+
     // Assert CodeRanges are sorted.
 #ifdef DEBUG
     uint32_t lastEnd = 0;
     for (const CodeRange& codeRange : metadata_->codeRanges) {
         MOZ_ASSERT(codeRange.begin() >= lastEnd);
         lastEnd = codeRange.end();
     }
 #endif
--- a/js/src/wasm/WasmGenerator.h
+++ b/js/src/wasm/WasmGenerator.h
@@ -111,22 +111,22 @@ class MOZ_STACK_CLASS ModuleGenerator
     IonCompileTaskPtrVector         freeTasks_;
 
     // Assertions
     DebugOnly<FunctionGenerator*>   activeFuncDef_;
     DebugOnly<bool>                 startedFuncDefs_;
     DebugOnly<bool>                 finishedFuncDefs_;
     DebugOnly<uint32_t>             numFinishedFuncDefs_;
 
-    MOZ_MUST_USE bool finishOutstandingTask();
     bool funcIsImport(uint32_t funcIndex) const;
     bool funcIsCompiled(uint32_t funcIndex) const;
     const CodeRange& funcCodeRange(uint32_t funcIndex) const;
     MOZ_MUST_USE bool patchCallSites(TrapExitOffsetArray* maybeTrapExits = nullptr);
     MOZ_MUST_USE bool finishTask(IonCompileTask* task);
+    MOZ_MUST_USE bool finishOutstandingTask();
     MOZ_MUST_USE bool finishFuncExports();
     MOZ_MUST_USE bool finishCodegen();
     MOZ_MUST_USE bool finishLinkData(Bytes& code);
     MOZ_MUST_USE bool addFuncImport(const Sig& sig, uint32_t globalDataOffset);
     MOZ_MUST_USE bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOff);
     MOZ_MUST_USE bool allocateGlobal(GlobalDesc* global);
 
   public: