author | Jan de Mooij <jdemooij@mozilla.com> |
Mon, 16 Sep 2019 15:06:29 +0000 | |
changeset 493563 | f5bc71d6ee11838d067fd128a4436153378d7c7f |
parent 493562 | 90aeb73dadaf90adec6fce1d02fd1520b1d73704 |
child 493564 | 2591b1d2429d5ac5afb3a98e5d19e7499e70dd4d |
push id | 95553 |
push user | jdemooij@mozilla.com |
push date | Tue, 17 Sep 2019 10:58:26 +0000 |
treeherder | autoland@f5bc71d6ee11 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | tcampbell, lth |
bugs | 1575153 |
milestone | 71.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/js/src/irregexp/NativeRegExpMacroAssembler.cpp +++ b/js/src/irregexp/NativeRegExpMacroAssembler.cpp @@ -500,17 +500,17 @@ NativeRegExpMacroAssembler::GenerateCode // If any of the code above needed to exit with an exception. masm.bind(&exit_with_exception_label_); // Exit with an error result to signal thrown exception. masm.movePtr(ImmWord(RegExpRunStatus_Error), temp0); masm.jump(&return_temp0); } - Linker linker(masm, "RegExp"); + Linker linker(masm); JitCode* code = linker.newCode(cx, CodeKind::RegExp); if (!code) return RegExpCode(); #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(code, "RegExp"); #endif
--- a/js/src/jit/BaselineCacheIRCompiler.cpp +++ b/js/src/jit/BaselineCacheIRCompiler.cpp @@ -185,17 +185,17 @@ JitCode* BaselineCacheIRCompiler::compil // Done emitting the main IC code. Now emit the failure paths. for (size_t i = 0; i < failurePaths.length(); i++) { if (!emitFailurePath(i)) { return nullptr; } EmitStubGuardFailure(masm); } - Linker linker(masm, "getStubCode"); + Linker linker(masm); Rooted<JitCode*> newStubCode(cx_, linker.newCode(cx_, CodeKind::Baseline)); if (!newStubCode) { cx_->recoverFromOutOfMemory(); return nullptr; } return newStubCode; }
--- a/js/src/jit/BaselineCodeGen.cpp +++ b/js/src/jit/BaselineCodeGen.cpp @@ -215,17 +215,17 @@ MethodStatus BaselineCompiler::compile() if (!emitEpilogue()) { return Method_Error; } if (!emitOutOfLinePostBarrierSlot()) { return Method_Error; } - Linker linker(masm, "Baseline"); + Linker linker(masm); if (masm.oom()) { ReportOutOfMemory(cx); return Method_Error; } JitCode* code = linker.newCode(cx, CodeKind::Baseline); if (!code) { return Method_Error; @@ -7084,17 +7084,17 @@ bool BaselineInterpreterGenerator::gener if (!emitOutOfLinePostBarrierSlot()) { return false; } emitOutOfLineCodeCoverageInstrumentation(); { - Linker linker(masm, "BaselineInterpreter"); + Linker linker(masm); if (masm.oom()) { ReportOutOfMemory(cx); return false; } JitCode* code = linker.newCode(cx, CodeKind::Other); if (!code) { return false; @@ -7245,17 +7245,17 @@ JitCode* JitRuntime::generateDebugTrapHa masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation); masm.profilerExitFrame(); masm.bind(&skipProfilingInstrumentation); } masm.ret(); - Linker linker(masm, "DebugTrapHandler"); + Linker linker(masm); JitCode* handlerCode = linker.newCode(cx, CodeKind::Other); if (!handlerCode) { return nullptr; } #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(handlerCode, "DebugTrapHandler"); #endif
--- a/js/src/jit/BaselineIC.cpp +++ b/js/src/jit/BaselineIC.cpp @@ -1193,17 +1193,17 @@ JitCode* ICStubCompiler::getStubCode() { // Compile new stubcode. JitContext jctx(cx, nullptr); StackMacroAssembler masm; InitMacroAssemblerForICStub(masm); if (!generateStubCode(masm)) { return nullptr; } - Linker linker(masm, "getStubCode"); + Linker linker(masm); Rooted<JitCode*> newStubCode(cx, linker.newCode(cx, CodeKind::Baseline)); if (!newStubCode) { return nullptr; } // Cache newly compiled stubcode. if (!realm->putStubCode(cx, stubKey, newStubCode)) { return nullptr; @@ -4145,17 +4145,17 @@ bool JitRuntime::generateBaselineICFallb if (!compiler.emit_##kind()) { \ return false; \ } \ fallbackCode.initOffset(BaselineICFallbackKind::kind, offset); \ } IC_BASELINE_FALLBACK_CODE_KIND_LIST(EMIT_CODE) #undef EMIT_CODE - Linker linker(masm, "BaselineICFallback"); + Linker linker(masm); JitCode* code = linker.newCode(cx, CodeKind::Other); if (!code) { return false; } #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(code, "BaselineICFallback"); #endif
--- a/js/src/jit/CodeGenerator.cpp +++ b/js/src/jit/CodeGenerator.cpp @@ -2692,17 +2692,17 @@ JitCode* JitRealm::generateRegExpMatcher masm.jump(&matchResultJoin); // Use an undefined value to signal to the caller that the OOL stub needs to // be called. masm.bind(&oolEntry); masm.moveValue(UndefinedValue(), result); masm.ret(); - Linker linker(masm, "RegExpMatcherStub"); + Linker linker(masm); JitCode* code = linker.newCode(cx, CodeKind::Other); if (!code) { return nullptr; } #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(code, "RegExpMatcherStub"); #endif @@ -2873,17 +2873,17 @@ JitCode* JitRealm::generateRegExpSearche masm.bind(¬Found); masm.move32(Imm32(RegExpSearcherResultNotFound), result); masm.ret(); masm.bind(&oolEntry); masm.move32(Imm32(RegExpSearcherResultFailed), result); masm.ret(); - Linker linker(masm, "RegExpSearcherStub"); + Linker linker(masm); JitCode* code = linker.newCode(cx, CodeKind::Other); if (!code) { return nullptr; } #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(code, "RegExpSearcherStub"); #endif @@ -3011,17 +3011,17 @@ JitCode* JitRealm::generateRegExpTesterS masm.bind(&oolEntry); masm.move32(Imm32(RegExpTesterResultFailed), result); masm.bind(&done); masm.freeStack(sizeof(irregexp::InputOutputData)); masm.ret(); - Linker linker(masm, "RegExpTesterStub"); + Linker linker(masm); JitCode* code = linker.newCode(cx, CodeKind::Other); if (!code) { return nullptr; } #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(code, "RegExpTesterStub"); #endif @@ -8863,17 +8863,17 @@ JitCode* JitRealm::generateStringConcatS masm.pop(temp2); masm.pop(temp1); masm.bind(&failure); masm.movePtr(ImmPtr(nullptr), output); masm.ret(); - Linker linker(masm, "StringConcatStub"); + Linker linker(masm); JitCode* code = linker.newCode(cx, CodeKind::Other); #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(code, "StringConcatStub"); #endif #ifdef MOZ_VTUNE vtune::MarkStub(code, "StringConcatStub"); #endif @@ -10758,17 +10758,17 @@ bool CodeGenerator::link(JSContext* cx, return false; } auto guardIonScript = mozilla::MakeScopeExit([&ionScript] { // Use js_free instead of IonScript::Destroy: the cache list is still // uninitialized. js_free(ionScript); }); - Linker linker(masm, "IonLink"); + Linker linker(masm); JitCode* code = linker.newCode(cx, CodeKind::Ion); if (!code) { return false; } // Encode native to bytecode map if profiling is enabled. if (isProfilerInstrumentationEnabled()) { // Generate native-to-bytecode main table.
--- a/js/src/jit/Ion.cpp +++ b/js/src/jit/Ion.cpp @@ -312,17 +312,17 @@ bool JitRuntime::generateTrampolines(JSC JitSpew(JitSpew_Codegen, "# Emitting profiler exit frame tail stub"); Label profilerExitTail; generateProfilerExitFrameTailStub(masm, &profilerExitTail); JitSpew(JitSpew_Codegen, "# Emitting exception tail stub"); void* handler = JS_FUNC_TO_DATA_PTR(void*, jit::HandleException); generateExceptionTailStub(masm, handler, &profilerExitTail); - Linker linker(masm, "Trampolines"); + Linker linker(masm); trampolineCode_ = linker.newCode(cx, CodeKind::Other); if (!trampolineCode_) { return false; } #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(trampolineCode_, "Trampolines"); #endif @@ -2830,170 +2830,16 @@ void jit::ForbidCompilation(JSContext* c if (script->hasIonScript()) { Invalidate(cx, script, false); } script->disableIon(); } -AutoFlushICache* JSContext::autoFlushICache() const { return autoFlushICache_; } - -void JSContext::setAutoFlushICache(AutoFlushICache* afc) { - autoFlushICache_ = afc; -} - -// Set the range for the merging of flushes. The flushing is deferred until the -// end of the AutoFlushICache context. Subsequent flushing within this range -// will is also deferred. This is only expected to be defined once for each -// AutoFlushICache context. It assumes the range will be flushed is required to -// be within an AutoFlushICache context. -void AutoFlushICache::setRange(uintptr_t start, size_t len) { -#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ - defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) - AutoFlushICache* afc = TlsContext.get()->autoFlushICache(); - MOZ_ASSERT(afc); - MOZ_ASSERT(!afc->start_); - JitSpewCont(JitSpew_CacheFlush, "(%" PRIxPTR " %zx):", start, len); - - uintptr_t stop = start + len; - afc->start_ = start; - afc->stop_ = stop; -#endif -} - -// Flush the instruction cache. -// -// If called within a dynamic AutoFlushICache context and if the range is -// already pending flushing for this AutoFlushICache context then the request is -// ignored with the understanding that it will be flushed on exit from the -// AutoFlushICache context. Otherwise the range is flushed immediately. -// -// Updates outside the current code object are typically the exception so they -// are flushed immediately rather than attempting to merge them. -// -// For efficiency it is expected that all large ranges will be flushed within an -// AutoFlushICache, so check. If this assertion is hit then it does not -// necessarily indicate a program fault but it might indicate a lost opportunity -// to merge cache flushing. It can be corrected by wrapping the call in an -// AutoFlushICache to context. -// -// Note this can be called without TLS JSContext defined so this case needs -// to be guarded against. E.g. when patching instructions from the exception -// handler on MacOS running the ARM simulator. -void AutoFlushICache::flush(uintptr_t start, size_t len) { -#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || \ - defined(JS_CODEGEN_NONE) - // Nothing -#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ - defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) - JSContext* cx = TlsContext.get(); - AutoFlushICache* afc = cx ? cx->autoFlushICache() : nullptr; - if (!afc) { - JitSpewCont(JitSpew_CacheFlush, "#"); - jit::FlushICache((void*)start, len); - MOZ_ASSERT(len <= 32); - return; - } - - uintptr_t stop = start + len; - if (start >= afc->start_ && stop <= afc->stop_) { - // Update is within the pending flush range, so defer to the end of the - // context. - JitSpewCont(JitSpew_CacheFlush, afc->inhibit_ ? "-" : "="); - return; - } - - JitSpewCont(JitSpew_CacheFlush, afc->inhibit_ ? "x" : "*"); - jit::FlushICache((void*)start, len); -#else - MOZ_CRASH("Unresolved porting API - AutoFlushICache::flush"); -#endif -} - -// Flag the current dynamic AutoFlushICache as inhibiting flushing. Useful in -// error paths where the changes are being abandoned. -void AutoFlushICache::setInhibit() { -#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || \ - defined(JS_CODEGEN_NONE) - // Nothing -#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ - defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) - AutoFlushICache* afc = TlsContext.get()->autoFlushICache(); - MOZ_ASSERT(afc); - MOZ_ASSERT(afc->start_); - JitSpewCont(JitSpew_CacheFlush, "I"); - afc->inhibit_ = true; -#else - MOZ_CRASH("Unresolved porting API - AutoFlushICache::setInhibit"); -#endif -} - -// The common use case is merging cache flushes when preparing a code object. In -// this case the entire range of the code object is being flushed and as the -// code is patched smaller redundant flushes could occur. The design allows an -// AutoFlushICache dynamic thread local context to be declared in which the -// range of the code object can be set which defers flushing until the end of -// this dynamic context. The redundant flushing within this code range is also -// deferred avoiding redundant flushing. Flushing outside this code range is -// not affected and proceeds immediately. -// -// In some cases flushing is not necessary, such as when compiling an wasm -// module which is flushed again when dynamically linked, and also in error -// paths that abandon the code. Flushing within the set code range can be -// inhibited within the AutoFlushICache dynamic context by setting an inhibit -// flag. -// -// The JS compiler can be re-entered while within an AutoFlushICache dynamic -// context and it is assumed that code being assembled or patched is not -// executed before the exit of the respective AutoFlushICache dynamic context. -// -AutoFlushICache::AutoFlushICache(const char* nonce, bool inhibit) -#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ - defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) - : start_(0), - stop_(0), -# ifdef JS_JITSPEW - name_(nonce), -# endif - inhibit_(inhibit) -#endif -{ -#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ - defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) - JSContext* cx = TlsContext.get(); - AutoFlushICache* afc = cx->autoFlushICache(); - if (afc) { - JitSpew(JitSpew_CacheFlush, "<%s,%s%s ", nonce, afc->name_, - inhibit ? " I" : ""); - } else { - JitSpewCont(JitSpew_CacheFlush, "<%s%s ", nonce, inhibit ? " I" : ""); - } - - prev_ = afc; - cx->setAutoFlushICache(this); -#endif -} - -AutoFlushICache::~AutoFlushICache() { -#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ - defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) - JSContext* cx = TlsContext.get(); - MOZ_ASSERT(cx->autoFlushICache() == this); - - if (!inhibit_ && start_) { - jit::FlushICache((void*)start_, size_t(stop_ - start_)); - } - - JitSpewCont(JitSpew_CacheFlush, "%s%s>", name_, start_ ? "" : " U"); - JitSpewFin(JitSpew_CacheFlush); - cx->setAutoFlushICache(prev_); -#endif -} - size_t jit::SizeOfIonData(JSScript* script, mozilla::MallocSizeOf mallocSizeOf) { size_t result = 0; if (script->hasIonScript()) { result += script->ionScript()->sizeOfIncludingThis(mallocSizeOf); }
--- a/js/src/jit/IonCacheIRCompiler.cpp +++ b/js/src/jit/IonCacheIRCompiler.cpp @@ -546,17 +546,17 @@ JitCode* IonCacheIRCompiler::compile() { Register scratch = ic_->scratchRegisterForEntryJump(); CodeOffset offset = masm.movWithPatch(ImmWord(-1), scratch); masm.jump(Address(scratch, 0)); if (!nextCodeOffsets_.append(offset)) { return nullptr; } } - Linker linker(masm, "getStubCode"); + Linker linker(masm); Rooted<JitCode*> newStubCode(cx_, linker.newCode(cx_, CodeKind::Ion)); if (!newStubCode) { cx_->recoverFromOutOfMemory(); return nullptr; } for (CodeOffset offset : nextCodeOffsets_) { Assembler::PatchDataWithValueCheck(CodeLocationLabel(newStubCode, offset),
--- a/js/src/jit/IonCode.h +++ b/js/src/jit/IonCode.h @@ -612,37 +612,16 @@ struct IonScriptCounts { blocks_[i].sizeOfExcludingThis(mallocSizeOf); } return size; } }; struct VMFunction; -struct AutoFlushICache { - private: -#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ - defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) - uintptr_t start_; - uintptr_t stop_; -# ifdef JS_JITSPEW - const char* name_; -# endif - bool inhibit_; - AutoFlushICache* prev_; -#endif - - public: - static void setRange(uintptr_t p, size_t len); - static void flush(uintptr_t p, size_t len); - static void setInhibit(); - ~AutoFlushICache(); - explicit AutoFlushICache(const char* nonce, bool inhibit = false); -}; - } // namespace jit namespace gc { inline bool IsMarked(JSRuntime* rt, const jit::VMFunction*) { // VMFunction are only static objects which are used by WeakMaps as keys. // It is considered as a root object which is always marked. return true;
--- a/js/src/jit/Linker.h +++ b/js/src/jit/Linker.h @@ -15,29 +15,25 @@ #include "vm/Realm.h" namespace js { namespace jit { class Linker { MacroAssembler& masm; mozilla::Maybe<AutoWritableJitCodeFallible> awjcf; - AutoFlushICache afc; JitCode* fail(JSContext* cx) { ReportOutOfMemory(cx); return nullptr; } public: // Construct a linker with a rooted macro assembler. - explicit Linker(MacroAssembler& masm, const char* name) - : masm(masm), afc(name) { - masm.finish(); - } + explicit Linker(MacroAssembler& masm) : masm(masm) { masm.finish(); } // Create a new JitCode object and populate it with the contents of the // macro assember buffer. // // This method cannot GC. Errors are reported to the context. JitCode* newCode(JSContext* cx, CodeKind kind); };
--- a/js/src/jit/arm/Assembler-arm.cpp +++ b/js/src/jit/arm/Assembler-arm.cpp @@ -516,22 +516,19 @@ bool Assembler::swapBuffer(wasm::Bytes& MOZ_ASSERT(bytes.empty()); if (!bytes.resize(bytesNeeded())) { return false; } m_buffer.executableCopy(bytes.begin()); return true; } -void Assembler::executableCopy(uint8_t* buffer, bool flushICache) { +void Assembler::executableCopy(uint8_t* buffer) { MOZ_ASSERT(isFinished); m_buffer.executableCopy(buffer); - if (flushICache) { - AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size()); - } } class RelocationIterator { CompactBufferReader reader_; // Offset in bytes. uint32_t offset_; public: @@ -704,22 +701,16 @@ static void TraceOneDataRelocation(JSTra if (ptr != prior) { if (awjc.isNothing()) { awjc.emplace(code); } MacroAssemblerARM::ma_mov_patch(Imm32(int32_t(ptr)), dest, Assembler::Always, rs, iter); - - // L_LDR won't cause any instructions to be updated. - if (rs != Assembler::L_LDR) { - AutoFlushICache::flush(uintptr_t(iter.cur()), 4); - AutoFlushICache::flush(uintptr_t(iter.next()), 4); - } } } /* static */ void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader) { mozilla::Maybe<AutoWritableJitCode> awjc; while (reader.more()) { @@ -2340,18 +2331,16 @@ uint32_t Assembler::PatchWrite_NearCallS void Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) { Instruction* inst = (Instruction*)start.raw(); // Overwrite whatever instruction used to be here with a call. Since the // destination is in the same function, it will be within range of the // 24 << 2 byte bl instruction. uint8_t* dest = toCall.raw(); new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst), Always); - // Ensure everyone sees the code that was just written into memory. - AutoFlushICache::flush(uintptr_t(inst), 4); } void Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, PatchedImmPtr expectedValue) { Instruction* ptr = reinterpret_cast<Instruction*>(label.raw()); Register dest; @@ -2364,23 +2353,16 @@ void Assembler::PatchDataWithValueCheck( } // Patch over actual instructions. { InstructionIterator iter(ptr); MacroAssembler::ma_mov_patch(Imm32(int32_t(newValue.value)), dest, Always, rs, iter); } - - // L_LDR won't cause any instructions to be updated. - if (rs != L_LDR) { - InstructionIterator iter(ptr); - AutoFlushICache::flush(uintptr_t(iter.cur()), 4); - AutoFlushICache::flush(uintptr_t(iter.next()), 4); - } } void Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue) { PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expectedValue.value)); } @@ -2549,17 +2531,16 @@ void Assembler::ToggleToJmp(CodeLocation uint32_t* ptr = (uint32_t*)inst_.raw(); DebugOnly<Instruction*> inst = (Instruction*)inst_.raw(); MOZ_ASSERT(inst->is<InstCMP>()); // Zero bits 20-27, then set 24-27 to be correct for a branch. // 20-23 will be party of the B's immediate, and should be 0. *ptr = (*ptr & ~(0xff << 20)) | (0xa0 << 20); - AutoFlushICache::flush(uintptr_t(ptr), 4); } void Assembler::ToggleToCmp(CodeLocationLabel inst_) { uint32_t* ptr = (uint32_t*)inst_.raw(); DebugOnly<Instruction*> inst = (Instruction*)inst_.raw(); MOZ_ASSERT(inst->is<InstBImm>()); @@ -2569,18 +2550,16 @@ void Assembler::ToggleToCmp(CodeLocation // Also make sure that the CMP is valid. Part of having a valid CMP is that // all of the bits describing the destination in most ALU instructions are // all unset (looks like it is encoding r0). MOZ_ASSERT(toRD(*inst) == r0); // Zero out bits 20-27, then set them to be correct for a compare. *ptr = (*ptr & ~(0xff << 20)) | (0x35 << 20); - - AutoFlushICache::flush(uintptr_t(ptr), 4); } void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) { InstructionIterator iter(reinterpret_cast<Instruction*>(inst_.raw())); MOZ_ASSERT(iter.cur()->is<InstMovW>() || iter.cur()->is<InstLDR>()); if (iter.cur()->is<InstMovW>()) { // If it looks like the start of a movw/movt sequence, then make sure we @@ -2599,18 +2578,16 @@ void Assembler::ToggleCall(CodeLocationL Instruction* inst = iter.cur(); if (enabled) { *inst = InstBLXReg(ScratchRegister, Always); } else { *inst = InstNOP(); } - - AutoFlushICache::flush(uintptr_t(inst), 4); } size_t Assembler::ToggledCallSize(uint8_t* code) { InstructionIterator iter(reinterpret_cast<Instruction*>(code)); MOZ_ASSERT(iter.cur()->is<InstMovW>() || iter.cur()->is<InstLDR>()); if (iter.cur()->is<InstMovW>()) { // If it looks like the start of a movw/movt sequence, then make sure we
--- a/js/src/jit/arm/Assembler-arm.h +++ b/js/src/jit/arm/Assembler-arm.h @@ -1692,17 +1692,17 @@ class Assembler : public AssemblerShared void comment(const char* msg) { #ifdef JS_DISASM_ARM spew_.spew("; %s", msg); #endif } // Copy the assembly code to the given buffer, and perform any pending // relocations relying on the target address. - void executableCopy(uint8_t* buffer, bool flushICache = true); + void executableCopy(uint8_t* buffer); // Actual assembly emitting functions. // Since I can't think of a reasonable default for the mode, I'm going to // leave it as a required argument. void startDataTransferM(LoadStore ls, Register rm, DTMMode mode, DTMWriteBack update = NoWriteBack, Condition c = Always) {
--- a/js/src/jit/arm/MacroAssembler-arm.cpp +++ b/js/src/jit/arm/MacroAssembler-arm.cpp @@ -4300,25 +4300,23 @@ CodeOffset MacroAssembler::nopPatchableT } void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) { uint8_t* inst = call - 4; MOZ_ASSERT(reinterpret_cast<Instruction*>(inst)->is<InstBLImm>() || reinterpret_cast<Instruction*>(inst)->is<InstNOP>()); new (inst) InstBLImm(BOffImm(target - inst), Assembler::Always); - AutoFlushICache::flush(uintptr_t(inst), 4); } void MacroAssembler::patchCallToNop(uint8_t* call) { uint8_t* inst = call - 4; MOZ_ASSERT(reinterpret_cast<Instruction*>(inst)->is<InstBLImm>() || reinterpret_cast<Instruction*>(inst)->is<InstNOP>()); new (inst) InstNOP(); - AutoFlushICache::flush(uintptr_t(inst), 4); } void MacroAssembler::pushReturnAddress() { push(lr); } void MacroAssembler::popReturnAddress() { pop(lr); } // =============================================================== // ABI function calls.
--- a/js/src/jit/arm64/Assembler-arm64.cpp +++ b/js/src/jit/arm64/Assembler-arm64.cpp @@ -144,17 +144,17 @@ BufferOffset Assembler::emitExtendedJump if (oom()) { return BufferOffset(); } return tableOffset; } -void Assembler::executableCopy(uint8_t* buffer, bool flushICache) { +void Assembler::executableCopy(uint8_t* buffer) { // Copy the code and all constant pools into the output buffer. armbuffer_.executableCopy(buffer); // Patch any relative jumps that target code outside the buffer. // The extended jump table may be used for distant jumps. for (size_t i = 0; i < pendingJumps_.length(); i++) { RelativePatch& rp = pendingJumps_[i]; @@ -178,20 +178,16 @@ void Assembler::executableCopy(uint8_t* entry->data = target; } } else { // Currently a two-instruction call, it should be possible to optimize // this into a single instruction call + nop in some instances, but this // will work. } } - - if (flushICache) { - AutoFlushICache::setRange(uintptr_t(buffer), armbuffer_.size()); - } } BufferOffset Assembler::immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op, const LiteralDoc& doc, ARMBuffer::PoolEntry* pe) { uint32_t inst = op | Rt(dest); const size_t numInst = 1; const unsigned sizeOfPoolEntryInBytes = 4; @@ -335,18 +331,16 @@ void Assembler::PatchWrite_NearCall(Code Instruction* dest = (Instruction*)start.raw(); ptrdiff_t relTarget = (Instruction*)toCall.raw() - dest; ptrdiff_t relTarget00 = relTarget >> 2; MOZ_RELEASE_ASSERT((relTarget & 0x3) == 0); MOZ_RELEASE_ASSERT(vixl::IsInt26(relTarget00)); // printf("patching %p with call to %p\n", start.raw(), toCall.raw()); bl(dest, relTarget00); - - AutoFlushICache::flush(uintptr_t(dest), 4); } void Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, PatchedImmPtr expected) { Instruction* i = (Instruction*)label.raw(); void** pValue = i->LiteralAddress<void**>(); MOZ_ASSERT(*pValue == expected.value); @@ -363,18 +357,16 @@ void Assembler::ToggleToJmp(CodeLocation Instruction* i = (Instruction*)inst_.raw(); MOZ_ASSERT(i->IsAddSubImmediate()); // Refer to instruction layout in ToggleToCmp(). int imm19 = (int)i->Bits(23, 5); MOZ_ASSERT(vixl::IsInt19(imm19)); b(i, imm19, Always); - - AutoFlushICache::flush(uintptr_t(i), 4); } void Assembler::ToggleToCmp(CodeLocationLabel inst_) { Instruction* i = (Instruction*)inst_.raw(); MOZ_ASSERT(i->IsCondB()); int imm19 = i->ImmCondBranch(); // bit 23 is reserved, and the simulator throws an assertion when this happens @@ -388,18 +380,16 @@ void Assembler::ToggleToCmp(CodeLocation // 10:21 - ImmAddSub. (OK!) // 5:9 - First source register (Rn). (OK!) // 0:4 - Destination Register. Must be xzr. // From the above, there is a safe 19-bit contiguous region from 5:23. Emit(i, vixl::ThirtyTwoBits | vixl::AddSubImmediateFixed | vixl::SUB | Flags(vixl::SetFlags) | Rd(vixl::xzr) | (imm19 << vixl::Rn_offset)); - - AutoFlushICache::flush(uintptr_t(i), 4); } void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) { const Instruction* first = reinterpret_cast<Instruction*>(inst_.raw()); Instruction* load; Instruction* call; // There might be a constant pool at the very first instruction. @@ -441,19 +431,16 @@ void Assembler::ToggleCall(CodeLocationL // Transform this to: // ldr x17, [pc, offset] // blr x17 int32_t offset = (int)load->ImmPCRawOffset(); MOZ_ASSERT(vixl::IsInt19(offset)); ldr(load, ScratchReg2_64, int32_t(offset)); blr(call, ScratchReg2_64); } - - AutoFlushICache::flush(uintptr_t(first), 4); - AutoFlushICache::flush(uintptr_t(call), 8); } // Patches loads generated by MacroAssemblerCompat::mov(CodeLabel*, Register). // The loading code is implemented in movePatchablePtr(). void Assembler::UpdateLoad64Value(Instruction* inst0, uint64_t value) { MOZ_ASSERT(inst0->IsLDR()); uint64_t* literal = inst0->LiteralAddress<uint64_t*>(); *literal = value;
--- a/js/src/jit/arm64/Assembler-arm64.h +++ b/js/src/jit/arm64/Assembler-arm64.h @@ -200,17 +200,17 @@ class Assembler : public vixl::Assembler bool appendRawCode(const uint8_t* code, size_t numBytes); bool reserve(size_t size); bool swapBuffer(wasm::Bytes& bytes); // Emit the jump table, returning the BufferOffset to the first entry in the // table. BufferOffset emitExtendedJumpTable(); BufferOffset ExtendedJumpTable_; - void executableCopy(uint8_t* buffer, bool flushICache = true); + void executableCopy(uint8_t* buffer); BufferOffset immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op, const LiteralDoc& doc, ARMBuffer::PoolEntry* pe = nullptr); BufferOffset immPool64(ARMRegister dest, uint64_t value, ARMBuffer::PoolEntry* pe = nullptr); BufferOffset fImmPool(ARMFPRegister dest, uint8_t* value, vixl::LoadLiteralOp op, const LiteralDoc& doc);
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp +++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp @@ -677,17 +677,16 @@ CodeOffset MacroAssembler::callWithPatch void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) { Instruction* inst = getInstructionAt(BufferOffset(callerOffset - 4)); MOZ_ASSERT(inst->IsBL()); ptrdiff_t relTarget = (int)calleeOffset - ((int)callerOffset - 4); ptrdiff_t relTarget00 = relTarget >> 2; MOZ_RELEASE_ASSERT((relTarget & 0x3) == 0); MOZ_RELEASE_ASSERT(vixl::IsInt26(relTarget00)); bl(inst, relTarget00); - AutoFlushICache::flush(uintptr_t(inst), 4); } CodeOffset MacroAssembler::farJumpWithPatch() { vixl::UseScratchRegisterScope temps(this); const ARMRegister scratch = temps.AcquireX(); const ARMRegister scratch2 = temps.AcquireX(); AutoForbidPoolsAndNops afp(this, @@ -734,25 +733,23 @@ CodeOffset MacroAssembler::nopPatchableT return CodeOffset(currentOffset()); } void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) { uint8_t* inst = call - 4; Instruction* instr = reinterpret_cast<Instruction*>(inst); MOZ_ASSERT(instr->IsBL() || instr->IsNOP()); bl(instr, (target - inst) >> 2); - AutoFlushICache::flush(uintptr_t(inst), 4); } void MacroAssembler::patchCallToNop(uint8_t* call) { uint8_t* inst = call - 4; Instruction* instr = reinterpret_cast<Instruction*>(inst); MOZ_ASSERT(instr->IsBL() || instr->IsNOP()); nop(instr); - AutoFlushICache::flush(uintptr_t(inst), 4); } void MacroAssembler::pushReturnAddress() { MOZ_RELEASE_ASSERT(!sp.Is(GetStackPointer64()), "Not valid"); push(lr); } void MacroAssembler::popReturnAddress() {
--- a/js/src/jit/mips-shared/Assembler-mips-shared.cpp +++ b/js/src/jit/mips-shared/Assembler-mips-shared.cpp @@ -1735,29 +1735,25 @@ InstImm AssemblerMIPSShared::invertBranc } void AssemblerMIPSShared::ToggleToJmp(CodeLocationLabel inst_) { InstImm* inst = (InstImm*)inst_.raw(); MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_andi >> OpcodeShift)); // We converted beq to andi, so now we restore it. inst->setOpcode(op_beq); - - AutoFlushICache::flush(uintptr_t(inst), 4); } void AssemblerMIPSShared::ToggleToCmp(CodeLocationLabel inst_) { InstImm* inst = (InstImm*)inst_.raw(); // toggledJump is allways used for short jumps. MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_beq >> OpcodeShift)); // Replace "beq $zero, $zero, offset" with "andi $zero, $zero, offset" inst->setOpcode(op_andi); - - AutoFlushICache::flush(uintptr_t(inst), 4); } void AssemblerMIPSShared::UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value) { MOZ_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift)); MOZ_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h +++ b/js/src/jit/mips-shared/Assembler-mips-shared.h @@ -854,17 +854,17 @@ class AssemblerMIPSShared : public Assem protected: bool isFinished; public: void finish(); bool appendRawCode(const uint8_t* code, size_t numBytes); bool reserve(size_t size); bool swapBuffer(wasm::Bytes& bytes); - void executableCopy(void* buffer, bool flushICache = true); + void executableCopy(void* buffer); void copyJumpRelocationTable(uint8_t* dest); void copyDataRelocationTable(uint8_t* dest); // Size of the instruction stream, in bytes. size_t size() const; // Size of the jump relocation table, in bytes. size_t jumpRelocationTableBytes() const; size_t dataRelocationTableBytes() const;
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp +++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp @@ -1495,23 +1495,21 @@ CodeOffset MacroAssembler::nopPatchableT return CodeOffset(currentOffset()); } void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) { #ifdef JS_CODEGEN_MIPS64 Instruction* inst = (Instruction*)call - 6 /* six nops */; Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)target); inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); - AutoFlushICache::flush(uintptr_t(inst), 6 * 4); #else Instruction* inst = (Instruction*)call - 4 /* four nops */; Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)target); inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); - AutoFlushICache::flush(uintptr_t(inst), 4 * 4); #endif } void MacroAssembler::patchCallToNop(uint8_t* call) { #ifdef JS_CODEGEN_MIPS64 Instruction* inst = (Instruction*)call - 6 /* six nops */; #else Instruction* inst = (Instruction*)call - 4 /* four nops */; @@ -1519,19 +1517,16 @@ void MacroAssembler::patchCallToNop(uint inst[0].makeNop(); inst[1].makeNop(); inst[2].makeNop(); inst[3].makeNop(); #ifdef JS_CODEGEN_MIPS64 inst[4].makeNop(); inst[5].makeNop(); - AutoFlushICache::flush(uintptr_t(inst), 6 * 4); -#else - AutoFlushICache::flush(uintptr_t(inst), 4 * 4); #endif } void MacroAssembler::pushReturnAddress() { push(ra); } void MacroAssembler::popReturnAddress() { pop(ra); } // ===============================================================
--- a/js/src/jit/mips32/Assembler-mips32.cpp +++ b/js/src/jit/mips32/Assembler-mips32.cpp @@ -106,23 +106,19 @@ uint32_t js::jit::RZ(FloatRegister r) { return r.id() << RZShift; } uint32_t js::jit::SA(FloatRegister r) { MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit); return r.id() << SAShift; } -void Assembler::executableCopy(uint8_t* buffer, bool flushICache) { +void Assembler::executableCopy(uint8_t* buffer) { MOZ_ASSERT(isFinished); m_buffer.executableCopy(buffer); - - if (flushICache) { - AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size()); - } } uintptr_t Assembler::GetPointer(uint8_t* instPtr) { Instruction* inst = (Instruction*)instPtr; return Assembler::ExtractLuiOriValue(inst, inst->next()); } static JitCode* CodeFromJump(Instruction* jump) { @@ -148,17 +144,16 @@ static void TraceOneDataRelocation(JSTra // No barrier needed since these are constants. TraceManuallyBarrieredGenericPointerEdge( trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr"); if (ptr != prior) { if (awjc.isNothing()) { awjc.emplace(code); } AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), uint32_t(ptr)); - AutoFlushICache::flush(uintptr_t(inst), 8); } } /* static */ void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader) { mozilla::Maybe<AutoWritableJitCode> awjc; while (reader.more()) { @@ -303,19 +298,16 @@ void Assembler::PatchWrite_NearCall(Code // Always use long jump for two reasons: // - Jump has to be the same size because of PatchWrite_NearCallSize. // - Return address has to be at the end of replaced block. // Short jump wouldn't be more efficient. Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)dest); inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); inst[3] = InstNOP(); - - // Ensure everyone sees the code that was just written into memory. - AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize()); } uint32_t Assembler::ExtractLuiOriValue(Instruction* inst0, Instruction* inst1) { InstImm* i0 = (InstImm*)inst0; InstImm* i1 = (InstImm*)inst1; MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift)); MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); @@ -343,18 +335,16 @@ void Assembler::PatchDataWithValueCheck( // Extract old Value DebugOnly<uint32_t> value = Assembler::ExtractLuiOriValue(&inst[0], &inst[1]); MOZ_ASSERT(value == uint32_t(expectedValue.value)); // Replace with new value AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), uint32_t(newValue.value)); - - AutoFlushICache::flush(uintptr_t(inst), 8); } uint32_t Assembler::ExtractInstructionImmediate(uint8_t* code) { InstImm* inst = (InstImm*)code; return Assembler::ExtractLuiOriValue(inst, inst->next()); } void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) { @@ -368,11 +358,9 @@ void Assembler::ToggleCall(CodeLocationL if (enabled) { InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); *i2 = jalr; } else { InstNOP nop; *i2 = nop; } - - AutoFlushICache::flush(uintptr_t(i2), 4); }
--- a/js/src/jit/mips32/Assembler-mips32.h +++ b/js/src/jit/mips32/Assembler-mips32.h @@ -190,17 +190,17 @@ class Assembler : public AssemblerMIPSSh CompactBufferReader& reader); static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader); void bind(InstImm* inst, uintptr_t branch, uintptr_t target); // Copy the assembly code to the given buffer, and perform any pending // relocations relying on the target address. - void executableCopy(uint8_t* buffer, bool flushICache = true); + void executableCopy(uint8_t* buffer); static uint32_t PatchWrite_NearCallSize(); static uint32_t ExtractLuiOriValue(Instruction* inst0, Instruction* inst1); static void WriteLuiOriInstructions(Instruction* inst, Instruction* inst1, Register reg, uint32_t value); static void PatchWrite_NearCall(CodeLocationLabel start,
--- a/js/src/jit/mips64/Assembler-mips64.cpp +++ b/js/src/jit/mips64/Assembler-mips64.cpp @@ -70,23 +70,19 @@ uint32_t js::jit::RZ(FloatRegister r) { return r.id() << RZShift; } uint32_t js::jit::SA(FloatRegister r) { MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys); return r.id() << SAShift; } -void Assembler::executableCopy(uint8_t* buffer, bool flushICache) { +void Assembler::executableCopy(uint8_t* buffer) { MOZ_ASSERT(isFinished); m_buffer.executableCopy(buffer); - - if (flushICache) { - AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size()); - } } uintptr_t Assembler::GetPointer(uint8_t* instPtr) { Instruction* inst = (Instruction*)instPtr; return Assembler::ExtractLoad64Value(inst); } static JitCode* CodeFromJump(Instruction* jump) { @@ -126,17 +122,16 @@ static void TraceOneDataRelocation(JSTra trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr"); } if (ptr != prior) { if (awjc.isNothing()) { awjc.emplace(code); } Assembler::UpdateLoad64Value(inst, uint64_t(ptr)); - AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t)); } } /* static */ void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader) { mozilla::Maybe<AutoWritableJitCode> awjc; while (reader.more()) { @@ -244,19 +239,16 @@ void Assembler::PatchWrite_NearCall(Code // Overwrite whatever instruction used to be here with a call. // Always use long jump for two reasons: // - Jump has to be the same size because of PatchWrite_NearCallSize. // - Return address has to be at the end of replaced block. // Short jump wouldn't be more efficient. Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest); inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); inst[5] = InstNOP(); - - // Ensure everyone sees the code that was just written into memory. - AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize()); } uint64_t Assembler::ExtractLoad64Value(Instruction* inst0) { InstImm* i0 = (InstImm*)inst0; InstImm* i1 = (InstImm*)i0->next(); InstReg* i2 = (InstReg*)i1->next(); InstImm* i3 = (InstImm*)i2->next(); InstImm* i5 = (InstImm*)i3->next()->next(); @@ -332,18 +324,16 @@ void Assembler::PatchDataWithValueCheck( Instruction* inst = (Instruction*)label.raw(); // Extract old Value DebugOnly<uint64_t> value = Assembler::ExtractLoad64Value(inst); MOZ_ASSERT(value == uint64_t(expectedValue.value)); // Replace with new value Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value)); - - AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t)); } uint64_t Assembler::ExtractInstructionImmediate(uint8_t* code) { InstImm* inst = (InstImm*)code; return Assembler::ExtractLoad64Value(inst); } void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) { @@ -360,11 +350,9 @@ void Assembler::ToggleCall(CodeLocationL if (enabled) { MOZ_ASSERT(i4->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift)); InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); *i4 = jalr; } else { InstNOP nop; *i4 = nop; } - - AutoFlushICache::flush(uintptr_t(i4), sizeof(uint32_t)); }
--- a/js/src/jit/mips64/Assembler-mips64.h +++ b/js/src/jit/mips64/Assembler-mips64.h @@ -210,17 +210,17 @@ class Assembler : public AssemblerMIPSSh CompactBufferReader& reader); static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader); void bind(InstImm* inst, uintptr_t branch, uintptr_t target); // Copy the assembly code to the given buffer, and perform any pending // relocations relying on the target address. - void executableCopy(uint8_t* buffer, bool flushICache = true); + void executableCopy(uint8_t* buffer); static uint32_t PatchWrite_NearCallSize(); static uint64_t ExtractLoad64Value(Instruction* inst0); static void UpdateLoad64Value(Instruction* inst0, uint64_t value); static void WriteLoad64Instructions(Instruction* inst0, Register reg, uint64_t value);
--- a/js/src/jit/shared/AtomicOperations-shared-jit.cpp +++ b/js/src/jit/shared/AtomicOperations-shared-jit.cpp @@ -867,19 +867,18 @@ bool InitializeJittedAtomics() { MemCheckKind::MakeUndefined); if (!code) { return false; } // Zero the padding. memset(code + codeLength, 0, roundedCodeLength - codeLength); - // Copy the code into place but do not flush, as the flush path requires a - // JSContext* we do not have. - masm.executableCopy(code, /* flushICache = */ false); + // Copy the code into place. + masm.executableCopy(code); // Reprotect the whole region to avoid having separate RW and RX mappings. if (!ExecutableAllocator::makeExecutableAndFlushICache(code, roundedCodeLength)) { DeallocateExecutableMemory(code, roundedCodeLength); return false; }
--- a/js/src/jit/x64/Assembler-x64.cpp +++ b/js/src/jit/x64/Assembler-x64.cpp @@ -193,17 +193,17 @@ void Assembler::finish() { masm.ud2(); MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == 8); masm.immediate64(0); MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == SizeOfExtendedJump); MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == SizeOfJumpTableEntry); } } -void Assembler::executableCopy(uint8_t* buffer, bool flushICache) { +void Assembler::executableCopy(uint8_t* buffer) { AssemblerX86Shared::executableCopy(buffer); for (size_t i = 0; i < jumps_.length(); i++) { RelativePatch& rp = jumps_[i]; uint8_t* src = buffer + rp.offset; if (!rp.target) { // The patch target is nullptr for jumps that have been linked to // a label within the same code block, but may be repatched later
--- a/js/src/jit/x64/Assembler-x64.h +++ b/js/src/jit/x64/Assembler-x64.h @@ -335,17 +335,17 @@ class Assembler : public AssemblerX86Sha CompactBufferReader& reader); // The buffer is about to be linked, make sure any constant pools or excess // bookkeeping has been flushed to the instruction stream. void finish(); // Copy the assembly code to the given buffer, and perform any pending // relocations relying on the target address. - void executableCopy(uint8_t* buffer, bool flushICache = true); + void executableCopy(uint8_t* buffer); // Actual assembly emitting functions. void push(const ImmGCPtr ptr) { movq(ptr, ScratchReg); push(ScratchReg); } void push(const ImmWord ptr) {
--- a/js/src/jit/x86/Assembler-x86.cpp +++ b/js/src/jit/x86/Assembler-x86.cpp @@ -42,17 +42,17 @@ ABIArg ABIArgGenerator::next(MIRType typ stackOffset_ += Simd128DataSize; break; default: MOZ_CRASH("Unexpected argument type"); } return current_; } -void Assembler::executableCopy(uint8_t* buffer, bool flushICache) { +void Assembler::executableCopy(uint8_t* buffer) { AssemblerX86Shared::executableCopy(buffer); for (RelativePatch& rp : jumps_) { X86Encoding::SetRel32(buffer + rp.offset, rp.target); } } class RelocationIterator { CompactBufferReader reader_;
--- a/js/src/jit/x86/Assembler-x86.h +++ b/js/src/jit/x86/Assembler-x86.h @@ -249,17 +249,17 @@ class Assembler : public AssemblerX86Sha using AssemblerX86Shared::vmovsd; using AssemblerX86Shared::vmovss; static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader); // Copy the assembly code to the given buffer, and perform any pending // relocations relying on the target address. - void executableCopy(uint8_t* buffer, bool flushICache = true); + void executableCopy(uint8_t* buffer); // Actual assembly emitting functions. void push(ImmGCPtr ptr) { masm.push_i32(int32_t(ptr.value)); writeDataRelocation(ptr); } void push(const ImmWord imm) { push(Imm32(imm.value)); }
--- a/js/src/jsapi-tests/testJitMacroAssembler.cpp +++ b/js/src/jsapi-tests/testJitMacroAssembler.cpp @@ -39,17 +39,17 @@ static bool Execute(JSContext* cx, Macro LiveRegisterSet save(regs.asLiveSet()); masm.PopRegsInMask(save); masm.ret(); // Add return statement to be sure. if (masm.oom()) { return false; } - Linker linker(masm, "Test"); + Linker linker(masm); JitCode* code = linker.newCode(cx, CodeKind::Other); if (!code) { return false; } if (!ExecutableAllocator::makeExecutableAndFlushICache(code->raw(), code->bufferSize())) { return false; }
--- a/js/src/jsapi-tests/testJitMoveEmitterCycles-mips32.cpp +++ b/js/src/jsapi-tests/testJitMoveEmitterCycles-mips32.cpp @@ -116,30 +116,29 @@ static constexpr js::jit::FloatRegister 28, js::jit::FloatRegister::Double); static constexpr js::jit::FloatRegister double15( 30, js::jit::FloatRegister::Double); static js::jit::JitCode* linkAndAllocate(JSContext* cx, js::jit::MacroAssembler* masm) { using namespace js; using namespace js::jit; - Linker l(*masm, "test"); + Linker l(*masm); return l.newCode(cx, CodeKind::Ion); } # define TRY(x) \ if (!(x)) return false; BEGIN_TEST(testJitMoveEmitterCycles_simple) { using namespace js; using namespace js::jit; LifoAlloc lifo(LIFO_ALLOC_PRIMARY_CHUNK_SIZE); TempAllocator alloc(&lifo); JitContext jc(cx, &alloc); - AutoFlushICache afc("test"); StackMacroAssembler masm; MoveEmitter mover(masm); MoveResolver mr; mr.setAllocator(alloc); Simulator* sim = Simulator::Current(); TRY(mr.addMove(MoveOperand(double0), MoveOperand(double2), MoveOp::DOUBLE)); sim->setFpuRegisterDouble(double0.id(), 2.0); @@ -170,17 +169,16 @@ BEGIN_TEST(testJitMoveEmitterCycles_simp } END_TEST(testJitMoveEmitterCycles_simple) BEGIN_TEST(testJitMoveEmitterCycles_autogen) { using namespace js; using namespace js::jit; LifoAlloc lifo(LIFO_ALLOC_PRIMARY_CHUNK_SIZE); TempAllocator alloc(&lifo); JitContext jc(cx, &alloc); - AutoFlushICache afc("test"); StackMacroAssembler masm; MoveEmitter mover(masm); MoveResolver mr; mr.setAllocator(alloc); Simulator* sim = Simulator::Current(); sim->setFpuRegisterDouble(double9.id(), 9.0); TRY(mr.addMove(MoveOperand(single24), MoveOperand(single25), MoveOp::FLOAT32)); @@ -263,17 +261,16 @@ BEGIN_TEST(testJitMoveEmitterCycles_auto END_TEST(testJitMoveEmitterCycles_autogen) BEGIN_TEST(testJitMoveEmitterCycles_autogen2) { using namespace js; using namespace js::jit; LifoAlloc lifo(LIFO_ALLOC_PRIMARY_CHUNK_SIZE); TempAllocator alloc(&lifo); JitContext jc(cx, &alloc); - AutoFlushICache afc("test"); StackMacroAssembler masm; MoveEmitter mover(masm); MoveResolver mr; mr.setAllocator(alloc); Simulator* sim = Simulator::Current(); TRY(mr.addMove(MoveOperand(double10), MoveOperand(double0), MoveOp::DOUBLE)); sim->setFpuRegisterDouble(double10.id(), 10.0); TRY(mr.addMove(MoveOperand(single15), MoveOperand(single3), MoveOp::FLOAT32)); @@ -369,17 +366,16 @@ BEGIN_TEST(testJitMoveEmitterCycles_auto END_TEST(testJitMoveEmitterCycles_autogen2) BEGIN_TEST(testJitMoveEmitterCycles_autogen3) { using namespace js; using namespace js::jit; LifoAlloc lifo(LIFO_ALLOC_PRIMARY_CHUNK_SIZE); TempAllocator alloc(&lifo); JitContext jc(cx, &alloc); - AutoFlushICache afc("test"); StackMacroAssembler masm; MoveEmitter mover(masm); MoveResolver mr; mr.setAllocator(alloc); Simulator* sim = Simulator::Current(); TRY(mr.addMove(MoveOperand(single0), MoveOperand(single21), MoveOp::FLOAT32)); sim->setFpuRegisterFloat(single0.id(), 0.0f); TRY(mr.addMove(MoveOperand(single2), MoveOperand(single26), MoveOp::FLOAT32));
--- a/js/src/jsapi-tests/testJitMoveEmitterCycles.cpp +++ b/js/src/jsapi-tests/testJitMoveEmitterCycles.cpp @@ -52,17 +52,17 @@ static constexpr js::jit::FloatRegister static constexpr js::jit::FloatRegister s29(29, js::jit::VFPRegister::Single); static constexpr js::jit::FloatRegister s30(30, js::jit::VFPRegister::Single); static constexpr js::jit::FloatRegister s31(31, js::jit::VFPRegister::Single); static js::jit::JitCode* linkAndAllocate(JSContext* cx, js::jit::MacroAssembler* masm) { using namespace js; using namespace js::jit; - Linker l(*masm, "test"); + Linker l(*masm); return l.newCode(cx, CodeKind::Ion); } # define TRY(x) \ if (!(x)) return false; BEGIN_TEST(testJitMoveEmitterCycles_simple) { using namespace js;
--- a/js/src/vm/JSContext.cpp +++ b/js/src/vm/JSContext.cpp @@ -1239,17 +1239,16 @@ JSContext::JSContext(JSRuntime* runtime, hasAutoUnsafeCallWithABI(this, false), #endif #ifdef JS_SIMULATOR simulator_(this, nullptr), #endif #ifdef JS_TRACE_LOGGING traceLogger(nullptr), #endif - autoFlushICache_(this, nullptr), dtoaState(this, nullptr), suppressGC(this, 0), gcSweeping(this, false), #ifdef DEBUG isTouchingGrayThings(this, false), noNurseryAllocationCheck(this, 0), disableStrictProxyCheckingCount(this, 0), #endif @@ -1293,18 +1292,17 @@ JSContext::JSContext(JSRuntime* runtime, jobQueue(this, nullptr), internalJobQueue(this), canSkipEnqueuingJobs(this, false), promiseRejectionTrackerCallback(this, nullptr), promiseRejectionTrackerCallbackData(this, nullptr), #ifdef JS_STRUCTURED_SPEW structuredSpewer_(), #endif - insideDebuggerEvaluationWithOnNativeCallHook(this, nullptr) -{ + insideDebuggerEvaluationWithOnNativeCallHook(this, nullptr) { MOZ_ASSERT(static_cast<JS::RootingContext*>(this) == JS::RootingContext::get(this)); } JSContext::~JSContext() { // Clear the ContextKind first, so that ProtectedData checks will allow us to // destroy this context even if the runtime is already gone. kind_ = ContextKind::HelperThread;
--- a/js/src/vm/JSContext.h +++ b/js/src/vm/JSContext.h @@ -518,24 +518,17 @@ struct JSContext : public JS::RootingCon js::jit::Simulator* simulator() const; uintptr_t* addressOfSimulatorStackLimit(); #endif #ifdef JS_TRACE_LOGGING js::UnprotectedData<js::TraceLoggerThread*> traceLogger; #endif - private: - /* Pointer to the current AutoFlushICache. */ - js::ContextData<js::jit::AutoFlushICache*> autoFlushICache_; - public: - js::jit::AutoFlushICache* autoFlushICache() const; - void setAutoFlushICache(js::jit::AutoFlushICache* afc); - // State used by util/DoubleToString.cpp. js::ContextData<DtoaState*> dtoaState; /* * When this flag is non-zero, any attempt to GC will be skipped. It is used * to suppress GC when reporting an OOM (see ReportOutOfMemory) and in * debugging facilities that cannot tolerate a GC and would rather OOM * immediately, such as utilities exposed to GDB. Setting this flag is @@ -1247,16 +1240,17 @@ class MOZ_RAII AutoKeepAtoms { public: explicit inline AutoKeepAtoms(JSContext* cx MOZ_GUARD_OBJECT_NOTIFIER_PARAM); inline ~AutoKeepAtoms(); }; class MOZ_RAII AutoNoteDebuggerEvaluationWithOnNativeCallHook { JSContext* cx; Debugger* oldValue; + public: AutoNoteDebuggerEvaluationWithOnNativeCallHook(JSContext* cx, Debugger* dbg) : cx(cx), oldValue(cx->insideDebuggerEvaluationWithOnNativeCallHook) { cx->insideDebuggerEvaluationWithOnNativeCallHook = dbg; } ~AutoNoteDebuggerEvaluationWithOnNativeCallHook() { cx->insideDebuggerEvaluationWithOnNativeCallHook = oldValue;
--- a/js/src/vm/Runtime.h +++ b/js/src/vm/Runtime.h @@ -98,17 +98,16 @@ extern MOZ_COLD void ReportOverRecursed( class Activation; class ActivationIterator; namespace jit { class JitRuntime; class JitActivation; struct PcScriptCache; -struct AutoFlushICache; class CompileRuntime; #ifdef JS_SIMULATOR_ARM64 typedef vixl::Simulator Simulator; #elif defined(JS_SIMULATOR) class Simulator; #endif } // namespace jit
--- a/js/src/wasm/WasmBuiltins.cpp +++ b/js/src/wasm/WasmBuiltins.cpp @@ -1199,17 +1199,17 @@ bool wasm::EnsureBuiltinThunksInitialize thunks->codeSize = allocSize; thunks->codeBase = (uint8_t*)AllocateExecutableMemory( allocSize, ProtectionSetting::Writable, MemCheckKind::MakeUndefined); if (!thunks->codeBase) { return false; } - masm.executableCopy(thunks->codeBase, /* flushICache = */ false); + masm.executableCopy(thunks->codeBase); memset(thunks->codeBase + masm.bytesNeeded(), 0, allocSize - masm.bytesNeeded()); masm.processCodeLabels(thunks->codeBase); PatchDebugSymbolicAccesses(thunks->codeBase, masm); MOZ_ASSERT(masm.callSites().empty()); MOZ_ASSERT(masm.callSiteTargets().empty());
--- a/js/src/wasm/WasmCode.cpp +++ b/js/src/wasm/WasmCode.cpp @@ -342,18 +342,17 @@ UniqueModuleSegment ModuleSegment::creat const LinkData& linkData) { uint32_t codeLength = masm.bytesNeeded(); UniqueCodeBytes codeBytes = AllocateCodeBytes(codeLength); if (!codeBytes) { return nullptr; } - // We'll flush the icache after static linking, in initialize(). - masm.executableCopy(codeBytes.get(), /* flushICache = */ false); + masm.executableCopy(codeBytes.get()); return js::MakeUnique<ModuleSegment>(tier, std::move(codeBytes), codeLength, linkData); } /* static */ UniqueModuleSegment ModuleSegment::create(Tier tier, const Bytes& unlinkedBytes, const LinkData& linkData) { @@ -709,17 +708,17 @@ bool LazyStubTier::createMany(const Uint *stubSegmentIndex = lastStubSegmentIndex_; size_t interpRangeIndex; uint8_t* codePtr = nullptr; if (!segment->addStubs(codeLength, funcExportIndices, funcExports, codeRanges, &codePtr, &interpRangeIndex)) return false; - masm.executableCopy(codePtr, /* flushICache = */ false); + masm.executableCopy(codePtr); PatchDebugSymbolicAccesses(codePtr, masm); memset(codePtr + masm.bytesNeeded(), 0, codeLength - masm.bytesNeeded()); for (const CodeLabel& label : masm.codeLabels()) { Assembler::Bind(codePtr, label); } if (!ExecutableAllocator::makeExecutableAndFlushICache(codePtr, codeLength)) {
--- a/js/src/wasm/WasmCraneliftCompile.cpp +++ b/js/src/wasm/WasmCraneliftCompile.cpp @@ -422,17 +422,17 @@ bool wasm::CraneliftCompileFunctions(con if (jitSpew) { // The disassembler uses the jitspew for output, so re-enable now. EnableChannel(js::jit::JitSpew_Codegen); uint32_t totalCodeSize = masm.currentOffset(); uint8_t* codeBuf = (uint8_t*)js_malloc(totalCodeSize); if (codeBuf) { - masm.executableCopy(codeBuf, totalCodeSize); + masm.executableCopy(codeBuf); const CodeRangeVector& codeRanges = code->codeRanges; MOZ_ASSERT(codeRanges.length() >= inputs.length()); // Within the current batch, functions' code ranges have been added in the // same order as the inputs. size_t firstCodeRangeIndex = codeRanges.length() - inputs.length();
--- a/js/src/wasm/WasmDebug.cpp +++ b/js/src/wasm/WasmDebug.cpp @@ -107,17 +107,16 @@ bool DebugState::incrementStepperCount(J if (!stepperCounters_.add(p, funcIndex, 1)) { ReportOutOfMemory(cx); return false; } AutoWritableJitCode awjc( cx->runtime(), code_->segment(Tier::Debug).base() + codeRange.begin(), codeRange.end() - codeRange.begin()); - AutoFlushICache afc("Code::incrementStepperCount"); for (const CallSite& callSite : callSites(Tier::Debug)) { if (callSite.kind() != CallSite::Breakpoint) { continue; } uint32_t offset = callSite.returnAddressOffset(); if (codeRange.begin() <= offset && offset <= codeRange.end()) { toggleDebugTrap(offset, true); @@ -138,17 +137,16 @@ bool DebugState::decrementStepperCount(J return true; } stepperCounters_.remove(p); AutoWritableJitCode awjc( fop->runtime(), code_->segment(Tier::Debug).base() + codeRange.begin(), codeRange.end() - codeRange.begin()); - AutoFlushICache afc("Code::decrementStepperCount"); for (const CallSite& callSite : callSites(Tier::Debug)) { if (callSite.kind() != CallSite::Breakpoint) { continue; } uint32_t offset = callSite.returnAddressOffset(); if (codeRange.begin() <= offset && offset <= codeRange.end()) { bool enabled = breakpointSites_.has(offset); @@ -176,19 +174,16 @@ void DebugState::toggleBreakpointTrap(JS code_->lookupFuncRange(codeSegment.base() + debugTrapOffset); MOZ_ASSERT(codeRange); if (stepperCounters_.lookup(codeRange->funcIndex())) { return; // no need to toggle when step mode is enabled } AutoWritableJitCode awjc(rt, codeSegment.base(), codeSegment.length()); - AutoFlushICache afc("Code::toggleBreakpointTrap"); - AutoFlushICache::setRange(uintptr_t(codeSegment.base()), - codeSegment.length()); toggleDebugTrap(debugTrapOffset, enabled); } WasmBreakpointSite* DebugState::getBreakpointSite(uint32_t offset) const { WasmBreakpointSiteMap::Ptr p = breakpointSites_.lookup(offset); if (!p) { return nullptr; } @@ -299,19 +294,16 @@ void DebugState::adjustEnterAndLeaveFram bool stillEnabled = enterAndLeaveFrameTrapsCounter_ > 0; if (wasEnabled == stillEnabled) { return; } const ModuleSegment& codeSegment = code_->segment(Tier::Debug); AutoWritableJitCode awjc(cx->runtime(), codeSegment.base(), codeSegment.length()); - AutoFlushICache afc("Code::adjustEnterAndLeaveFrameTrapsState"); - AutoFlushICache::setRange(uintptr_t(codeSegment.base()), - codeSegment.length()); for (const CallSite& callSite : callSites(Tier::Debug)) { if (callSite.kind() != CallSite::EnterFrame && callSite.kind() != CallSite::LeaveFrame) { continue; } toggleDebugTrap(callSite.returnAddressOffset(), stillEnabled); } }
--- a/js/src/wasm/WasmGenerator.cpp +++ b/js/src/wasm/WasmGenerator.cpp @@ -1205,17 +1205,17 @@ SharedModule ModuleGenerator::finishModu MOZ_ASSERT(mode() == CompileMode::Once); MOZ_ASSERT(tier() == Tier::Debug); debugUnlinkedCode = js::MakeUnique<Bytes>(); if (!debugUnlinkedCode || !debugUnlinkedCode->resize(masm_.bytesNeeded())) { return nullptr; } - masm_.executableCopy(debugUnlinkedCode->begin(), /* flushICache = */ false); + masm_.executableCopy(debugUnlinkedCode->begin()); debugLinkData = std::move(linkData_); debugBytecode = &bytecode; } // All the components are finished, so create the complete Module and start // tier-2 compilation if requested.