From 2556e6d00985f78a5b364fc1927cbbddd7c667c8 Mon Sep 17 00:00:00 2001 From: Tamas Szentpeteri Date: Mon, 1 Jul 2024 12:21:46 +0300 Subject: [PATCH] Backed out 17 changesets (bug 1898153) as reuqested by Aryx for causing wasm crashes. Backed out changeset 6b2961327cb5 (bug 1898153) Backed out changeset ce76b2fd1fdd (bug 1898153) Backed out changeset 0d18640a68ae (bug 1898153) Backed out changeset ff59e2597abb (bug 1898153) Backed out changeset 3e72652d9496 (bug 1898153) Backed out changeset 3b80e07c38bd (bug 1898153) Backed out changeset 77d395f4c562 (bug 1898153) Backed out changeset b438aa6a0a81 (bug 1898153) Backed out changeset 78c1ed882b98 (bug 1898153) Backed out changeset 38d3fe022100 (bug 1898153) Backed out changeset ca74887894e6 (bug 1898153) Backed out changeset 7e13f23fb1c9 (bug 1898153) Backed out changeset 23f8fa8e529f (bug 1898153) Backed out changeset 621c4254adf7 (bug 1898153) Backed out changeset 97276733ce7a (bug 1898153) Backed out changeset 1de85a4ce7ae (bug 1898153) Backed out changeset 7f2ace29e124 (bug 1898153) --- js/src/builtin/TestingFunctions.cpp | 24 +- js/src/jit/CacheIR.cpp | 11 +- js/src/jit/CacheIROps.yaml | 2 +- js/src/jit/CodeGenerator.cpp | 2 +- js/src/jit/MIR-wasm.cpp | 4 +- js/src/jit/WarpCacheIRTranspiler.cpp | 2 +- js/src/jit/arm/Simulator-arm.cpp | 4 +- js/src/vm/MutexIDs.h | 4 +- js/src/wasm/WasmBuiltins.cpp | 36 +- js/src/wasm/WasmBuiltins.h | 7 +- js/src/wasm/WasmCode.cpp | 1112 +++++++++++++------------- js/src/wasm/WasmCode.h | 833 ++++++------------- js/src/wasm/WasmCodegenTypes.h | 12 - js/src/wasm/WasmDebug.cpp | 33 +- js/src/wasm/WasmDebug.h | 15 +- js/src/wasm/WasmFrameIter.cpp | 24 +- js/src/wasm/WasmGenerator.cpp | 675 +++++++--------- js/src/wasm/WasmGenerator.h | 62 +- js/src/wasm/WasmInstance-inl.h | 8 + js/src/wasm/WasmInstance.cpp | 183 +++-- js/src/wasm/WasmInstance.h | 3 + js/src/wasm/WasmJS.cpp | 87 +- js/src/wasm/WasmJS.h | 6 +- js/src/wasm/WasmModule.cpp | 123 ++- js/src/wasm/WasmModule.h | 19 +- js/src/wasm/WasmPI.cpp | 8 +- js/src/wasm/WasmProcess.cpp | 263 +++++- js/src/wasm/WasmProcess.h | 14 +- js/src/wasm/WasmRealm.cpp | 17 +- js/src/wasm/WasmSerialize.cpp | 223 +++--- js/src/wasm/WasmSerialize.h | 1 - js/src/wasm/WasmSignalHandlers.cpp | 34 +- js/src/wasm/WasmStubs.cpp | 102 +-- js/src/wasm/WasmStubs.h | 8 +- js/src/wasm/WasmSummarizeInsn.h | 2 +- js/src/wasm/WasmTable.cpp | 15 +- js/src/wasm/WasmTypeDecls.h | 4 +- 37 files changed, 1968 insertions(+), 2014 deletions(-) diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp index 9072c7893678a..38b76f18fd1e3 100644 --- a/js/src/builtin/TestingFunctions.cpp +++ b/js/src/builtin/TestingFunctions.cpp @@ -1700,6 +1700,7 @@ static bool WasmExtractCode(JSContext* cx, unsigned argc, Value* vp) { } wasm::Tier tier = module->module().code().stableTier(); + ; if (args.length() > 1 && !ConvertToTier(cx, args[1], module->module().code(), &tier)) { args.rval().setNull(); @@ -1771,11 +1772,14 @@ static bool DisassembleNative(JSContext* cx, unsigned argc, Value* vp) { js::wasm::Instance& inst = fun->wasmInstance(); const js::wasm::Code& code = inst.code(); + js::wasm::Tier tier = code.bestTier(); + + const js::wasm::MetadataTier& meta = inst.metadata(tier); + + const js::wasm::CodeSegment& segment = code.segment(tier); const uint32_t funcIndex = code.getFuncIndex(&*fun); - const js::wasm::CodeBlock& codeBlock = inst.code().funcCodeBlock(funcIndex); - const js::wasm::CodeSegment& segment = *codeBlock.segment; - const js::wasm::FuncExport& func = codeBlock.lookupFuncExport(funcIndex); - const js::wasm::CodeRange& codeRange = codeBlock.codeRange(func); + const js::wasm::FuncExport& func = meta.lookupFuncExport(funcIndex); + const js::wasm::CodeRange& codeRange = meta.codeRange(func); jit_begin = segment.base() + codeRange.begin(); jit_end = segment.base() + codeRange.end(); @@ -1883,6 +1887,11 @@ static bool ComputeTier(JSContext* cx, const wasm::Code& code, return false; } + if (!code.hasTier(*tier)) { + JS_ReportErrorASCII(cx, "function missing selected tier"); + return false; + } + return true; } @@ -1915,18 +1924,13 @@ static bool WasmDisassembleFunction(JSContext* cx, const HandleFunction& func, HandleValue tierSelection, bool asString, MutableHandleValue rval) { wasm::Instance& instance = wasm::ExportedFunctionToInstance(func); - uint32_t funcIndex = wasm::ExportedFunctionToFuncIndex(func); wasm::Tier tier; if (!ComputeTier(cx, instance.code(), tierSelection, &tier)) { return false; } - if (!instance.code().funcHasTier(funcIndex, tier)) { - JS_ReportErrorASCII(cx, "function missing selected tier"); - return false; - } - + uint32_t funcIndex = wasm::ExportedFunctionToFuncIndex(func); return DisassembleIt( cx, asString, rval, [&](void (*captureText)(const char*)) { instance.disassembleExport(cx, funcIndex, tier, captureText); diff --git a/js/src/jit/CacheIR.cpp b/js/src/jit/CacheIR.cpp index 721986d5749e9..839e0af9a1152 100644 --- a/js/src/jit/CacheIR.cpp +++ b/js/src/jit/CacheIR.cpp @@ -11461,11 +11461,12 @@ AttachDecision CallIRGenerator::tryAttachWasmCall(HandleFunction calleeFunc) { } wasm::Instance& inst = wasm::ExportedFunctionToInstance(calleeFunc); - uint32_t funcIndex = wasm::ExportedFunctionToFuncIndex(calleeFunc); - const wasm::CodeBlock& codeBlock = inst.code().funcCodeBlock(funcIndex); - const wasm::FuncExport& funcExport = codeBlock.lookupFuncExport(funcIndex); - const wasm::FuncType& sig = - wasm::ExportedFunctionToTypeDef(calleeFunc).funcType(); + uint32_t funcIndex = inst.code().getFuncIndex(calleeFunc); + + auto bestTier = inst.code().bestTier(); + const wasm::FuncExport& funcExport = + inst.metadata(bestTier).lookupFuncExport(funcIndex); + const wasm::FuncType& sig = inst.codeMeta().getFuncExportType(funcExport); MOZ_ASSERT(!IsInsideNursery(inst.object())); MOZ_ASSERT(sig.canHaveJitEntry(), "Function should allow a Wasm JitEntry"); diff --git a/js/src/jit/CacheIROps.yaml b/js/src/jit/CacheIROps.yaml index f626c29f85c3e..ffe825349fdfb 100644 --- a/js/src/jit/CacheIROps.yaml +++ b/js/src/jit/CacheIROps.yaml @@ -1993,7 +1993,7 @@ argc: Int32Id flags: CallFlagsImm argcFixed: UInt32Imm - funcType: RawPointerField + funcExport: RawPointerField instance: ObjectField - name: GuardWasmArg diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp index 25dec9faaa475..09e922b58045b 100644 --- a/js/src/jit/CodeGenerator.cpp +++ b/js/src/jit/CodeGenerator.cpp @@ -21433,7 +21433,7 @@ void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase* lir) { MIonToWasmCall* mir = lir->mir(); const wasm::FuncExport& funcExport = mir->funcExport(); const wasm::FuncType& sig = - mir->instance()->code().getFuncExportType(funcExport); + mir->instance()->codeMeta().getFuncExportType(funcExport); WasmABIArgGenerator abi; for (size_t i = 0; i < lir->numOperands(); i++) { diff --git a/js/src/jit/MIR-wasm.cpp b/js/src/jit/MIR-wasm.cpp index b2516c9930755..96fa63032c01e 100644 --- a/js/src/jit/MIR-wasm.cpp +++ b/js/src/jit/MIR-wasm.cpp @@ -858,7 +858,7 @@ MIonToWasmCall* MIonToWasmCall::New(TempAllocator& alloc, WasmInstanceObject* instanceObj, const wasm::FuncExport& funcExport) { const wasm::FuncType& funcType = - instanceObj->instance().code().getFuncExportType(funcExport); + instanceObj->instance().codeMeta().getFuncExportType(funcExport); const wasm::ValTypeVector& results = funcType.results(); MIRType resultType = MIRType::Value; // At the JS boundary some wasm types must be represented as a Value, and in @@ -879,7 +879,7 @@ MIonToWasmCall* MIonToWasmCall::New(TempAllocator& alloc, #ifdef DEBUG bool MIonToWasmCall::isConsistentFloat32Use(MUse* use) const { const wasm::FuncType& funcType = - instance()->code().getFuncExportType(funcExport_); + instance()->codeMeta().getFuncExportType(funcExport_); return funcType.args()[use->index()].kind() == wasm::ValType::F32; } #endif diff --git a/js/src/jit/WarpCacheIRTranspiler.cpp b/js/src/jit/WarpCacheIRTranspiler.cpp index ab0a1904efbf3..e9c1e2ffd5683 100644 --- a/js/src/jit/WarpCacheIRTranspiler.cpp +++ b/js/src/jit/WarpCacheIRTranspiler.cpp @@ -5992,7 +5992,7 @@ bool WarpCacheIRTranspiler::emitCallWasmFunction( auto* wasmInstanceObj = &instanceObject->as(); const wasm::FuncExport* funcExport = wasmFuncExportField(funcExportOffset); const wasm::FuncType& sig = - wasmInstanceObj->instance().code().getFuncExportType(*funcExport); + wasmInstanceObj->instance().codeMeta().getFuncExportType(*funcExport); if (!updateCallInfo(callee, flags)) { return false; diff --git a/js/src/jit/arm/Simulator-arm.cpp b/js/src/jit/arm/Simulator-arm.cpp index 4859a412b9552..3c72f05572e0f 100644 --- a/js/src/jit/arm/Simulator-arm.cpp +++ b/js/src/jit/arm/Simulator-arm.cpp @@ -2432,14 +2432,14 @@ void Simulator::softwareInterrupt(SimInstruction* instr) { } void Simulator::canonicalizeNaN(double* value) { - if (!wasm::CodeExists && !wasm::LookupCodeBlock(get_pc_as()) && + if (!wasm::CodeExists && !wasm::LookupCodeSegment(get_pc_as()) && FPSCR_default_NaN_mode_) { *value = JS::CanonicalizeNaN(*value); } } void Simulator::canonicalizeNaN(float* value) { - if (!wasm::CodeExists && !wasm::LookupCodeBlock(get_pc_as()) && + if (!wasm::CodeExists && !wasm::LookupCodeSegment(get_pc_as()) && FPSCR_default_NaN_mode_) { *value = JS::CanonicalizeNaN(*value); } diff --git a/js/src/vm/MutexIDs.h b/js/src/vm/MutexIDs.h index ce961a7a1fb6e..dc0e5b3c2cc56 100644 --- a/js/src/vm/MutexIDs.h +++ b/js/src/vm/MutexIDs.h @@ -23,7 +23,7 @@ _(WellKnownParserAtomsInit, 100) \ \ _(WasmInitBuiltinThunks, 250) \ - _(WasmCodeProtected, 250) \ + _(WasmLazyStubsTier1, 250) \ _(WasmLazyStubsTier2, 251) \ \ _(StoreBuffer, 275) \ @@ -64,7 +64,7 @@ _(SharedImmutableStringsCache, 600) \ _(IrregexpLazyStatic, 600) \ _(ThreadId, 600) \ - _(WasmCodeBlockMap, 600) \ + _(WasmCodeSegmentMap, 600) \ _(VTuneLock, 600) \ _(ShellTelemetry, 600) \ _(ShellUseCounters, 600) diff --git a/js/src/wasm/WasmBuiltins.cpp b/js/src/wasm/WasmBuiltins.cpp index 38c0ee15d7826..1da6f9964e7c2 100644 --- a/js/src/wasm/WasmBuiltins.cpp +++ b/js/src/wasm/WasmBuiltins.cpp @@ -639,13 +639,14 @@ static WasmExceptionObject* GetOrWrapWasmException(JitActivation* activation, return nullptr; } -static const wasm::TryNote* FindNonDelegateTryNote( - const wasm::Code& code, const uint8_t* pc, const CodeBlock** codeBlock) { - const wasm::TryNote* tryNote = code.lookupTryNote((void*)pc, codeBlock); +static const wasm::TryNote* FindNonDelegateTryNote(const wasm::Code& code, + const uint8_t* pc, + Tier* tier) { + const wasm::TryNote* tryNote = code.lookupTryNote((void*)pc, tier); while (tryNote && tryNote->isDelegate()) { - pc = (*codeBlock)->segment->base() + tryNote->delegateOffset(); - const wasm::TryNote* delegateTryNote = - code.lookupTryNote((void*)pc, codeBlock); + const wasm::CodeTier& codeTier = code.codeTier(*tier); + pc = codeTier.segment().base() + tryNote->delegateOffset(); + const wasm::TryNote* delegateTryNote = code.lookupTryNote((void*)pc, tier); MOZ_RELEASE_ASSERT(delegateTryNote == nullptr || delegateTryNote->tryBodyBegin() < tryNote->tryBodyBegin()); @@ -709,11 +710,10 @@ bool wasm::HandleThrow(JSContext* cx, WasmFrameIter& iter, // Only look for an exception handler if there's a catchable exception. if (wasmExn) { + Tier tier; const wasm::Code& code = iter.instance()->code(); const uint8_t* pc = iter.resumePCinCurrentFrame(); - const wasm::CodeBlock* codeBlock = nullptr; - const wasm::TryNote* tryNote = - FindNonDelegateTryNote(code, pc, &codeBlock); + const wasm::TryNote* tryNote = FindNonDelegateTryNote(code, pc, &tier); if (tryNote) { #ifdef ENABLE_WASM_TAIL_CALLS @@ -736,7 +736,7 @@ bool wasm::HandleThrow(JSContext* cx, WasmFrameIter& iter, rfe->stackPointer = (uint8_t*)(rfe->framePointer - tryNote->landingPadFramePushed()); rfe->target = - codeBlock->segment->base() + tryNote->landingPadEntryPoint(); + iter.instance()->codeBase(tier) + tryNote->landingPadEntryPoint(); // Make sure to clear trapping state if we got here due to a trap. if (activation->isWasmTrapping()) { @@ -965,12 +965,14 @@ static void* BoxValue_Anyref(Value* rawVal) { return result.get().forCompiledCode(); } -static int32_t CoerceInPlace_JitEntry(int funcIndex, Instance* instance, +static int32_t CoerceInPlace_JitEntry(int funcExportIndex, Instance* instance, Value* argv) { JSContext* cx = TlsContext.get(); // Cold code const Code& code = instance->code(); - const FuncType& funcType = code.getFuncExportType(funcIndex); + const FuncExport& fe = + code.metadata(code.stableTier()).funcExports[funcExportIndex]; + const FuncType& funcType = code.codeMeta().getFuncExportType(fe); for (size_t i = 0; i < funcType.args().length(); i++) { HandleValue arg = HandleValue::fromMarkedLocation(&argv[i]); @@ -1904,12 +1906,6 @@ Mutex initBuiltinThunks(mutexid::WasmInitBuiltinThunks); Atomic builtinThunks; bool wasm::EnsureBuiltinThunksInitialized() { - AutoMarkJitCodeWritableForThread writable; - return EnsureBuiltinThunksInitialized(writable); -} - -bool wasm::EnsureBuiltinThunksInitialized( - AutoMarkJitCodeWritableForThread& writable) { LockGuard guard(initBuiltinThunks); if (builtinThunks) { return true; @@ -2014,6 +2010,8 @@ bool wasm::EnsureBuiltinThunksInitialized( return false; } + AutoMarkJitCodeWritableForThread writable; + masm.executableCopy(thunks->codeBase); memset(thunks->codeBase + masm.bytesNeeded(), 0, allocSize - masm.bytesNeeded()); @@ -2150,7 +2148,7 @@ void* wasm::MaybeGetBuiltinThunk(JSFunction* f, const FuncType& funcType) { } bool wasm::LookupBuiltinThunk(void* pc, const CodeRange** codeRange, - const uint8_t** codeBase) { + uint8_t** codeBase) { if (!builtinThunks) { return false; } diff --git a/js/src/wasm/WasmBuiltins.h b/js/src/wasm/WasmBuiltins.h index cec45a0be2455..55f9b55a9c018 100644 --- a/js/src/wasm/WasmBuiltins.h +++ b/js/src/wasm/WasmBuiltins.h @@ -25,9 +25,8 @@ namespace js { namespace jit { -class AutoMarkJitCodeWritableForThread; struct ResumeFromException; -} // namespace jit +} namespace wasm { class WasmFrameIter; @@ -304,7 +303,7 @@ bool NeedsBuiltinThunk(SymbolicAddress sym); // CodeRange is relative to. bool LookupBuiltinThunk(void* pc, const CodeRange** codeRange, - const uint8_t** codeBase); + uint8_t** codeBase); // EnsureBuiltinThunksInitialized() must be called, and must succeed, before // SymbolicAddressTarget() or MaybeGetBuiltinThunk(). This function creates all @@ -313,8 +312,6 @@ bool LookupBuiltinThunk(void* pc, const CodeRange** codeRange, // executable code has been released. bool EnsureBuiltinThunksInitialized(); -bool EnsureBuiltinThunksInitialized( - jit::AutoMarkJitCodeWritableForThread& writable); bool HandleThrow(JSContext* cx, WasmFrameIter& iter, jit::ResumeFromException* rfe); diff --git a/js/src/wasm/WasmCode.cpp b/js/src/wasm/WasmCode.cpp index bafc710eff00e..bd1e183ad34b9 100644 --- a/js/src/wasm/WasmCode.cpp +++ b/js/src/wasm/WasmCode.cpp @@ -29,7 +29,6 @@ #include "jit/Disassemble.h" #include "jit/ExecutableAllocator.h" -#include "jit/FlushICache.h" // for FlushExecutionContextForAllThreads #include "jit/MacroAssembler.h" #include "jit/PerfSpewer.h" #include "util/Poison.h" @@ -59,6 +58,12 @@ size_t LinkData::SymbolicLinkArray::sizeOfExcludingThis( return size; } +CodeSegment::~CodeSegment() { + if (unregisterOnDestroy_) { + UnregisterCodeSegment(this); + } +} + static uint32_t RoundupCodeLength(uint32_t codeLength) { // AllocateExecutableMemory() requires a multiple of ExecutableCodePageSize. return RoundUp(codeLength, ExecutableCodePageSize); @@ -106,6 +111,34 @@ UniqueCodeBytes wasm::AllocateCodeBytes( return UniqueCodeBytes((uint8_t*)p, FreeCode(roundedCodeLength)); } +bool CodeSegment::initialize(const CodeTier& codeTier) { + MOZ_ASSERT(!initialized()); + codeTier_ = &codeTier; + MOZ_ASSERT(initialized()); + + // In the case of tiering, RegisterCodeSegment() immediately makes this code + // segment live to access from other threads executing the containing + // module. So only call once the CodeSegment is fully initialized. + if (!RegisterCodeSegment(this)) { + return false; + } + + // This bool is only used by the destructor which cannot be called racily + // and so it is not a problem to mutate it after RegisterCodeSegment(). + MOZ_ASSERT(!unregisterOnDestroy_); + unregisterOnDestroy_ = true; + return true; +} + +const Code& CodeSegment::code() const { + MOZ_ASSERT(codeTier_); + return codeTier_->code(); +} + +void CodeSegment::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code) const { + *code += RoundupCodeLength(length()); +} + void FreeCode::operator()(uint8_t* bytes) { MOZ_ASSERT(codeLength); MOZ_ASSERT(codeLength == RoundupCodeLength(codeLength)); @@ -116,13 +149,13 @@ void FreeCode::operator()(uint8_t* bytes) { DeallocateExecutableMemory(bytes, codeLength); } -bool wasm::StaticallyLink(jit::AutoMarkJitCodeWritableForThread& writable, - uint8_t* base, const LinkData& linkData, - const CodeBlock* maybeSharedStubs) { - if (!EnsureBuiltinThunksInitialized(writable)) { +bool wasm::StaticallyLink(const ModuleSegment& ms, const LinkData& linkData) { + if (!EnsureBuiltinThunksInitialized()) { return false; } + AutoMarkJitCodeWritableForThread writable; + for (LinkData::InternalLink link : linkData.internalLinks) { CodeLabel label; label.patchAt()->bind(link.patchAtOffset); @@ -130,7 +163,7 @@ bool wasm::StaticallyLink(jit::AutoMarkJitCodeWritableForThread& writable, #ifdef JS_CODELABEL_LINKMODE label.setLinkMode(static_cast(link.mode)); #endif - Assembler::Bind(base, label); + Assembler::Bind(ms.base(), label); } for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) { @@ -141,7 +174,7 @@ bool wasm::StaticallyLink(jit::AutoMarkJitCodeWritableForThread& writable, void* target = SymbolicAddressTarget(imm); for (uint32_t offset : offsets) { - uint8_t* patchAt = base + offset; + uint8_t* patchAt = ms.base() + offset; Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt), PatchedImmPtr(target), PatchedImmPtr((void*)-1)); @@ -183,7 +216,7 @@ static bool AppendToString(const char* str, UTF8Bytes* bytes) { } static void SendCodeRangesToProfiler( - const uint8_t* segmentBase, const CodeMetadata& codeMeta, + const ModuleSegment& ms, const CodeMetadata& codeMeta, const CodeMetadataForAsmJS* codeMetaForAsmJS, const CodeRangeVector& codeRanges) { bool enabled = false; @@ -200,7 +233,7 @@ static void SendCodeRangesToProfiler( continue; } - uintptr_t start = uintptr_t(segmentBase + codeRange.begin()); + uintptr_t start = uintptr_t(ms.base() + codeRange.begin()); uintptr_t size = codeRange.end() - codeRange.begin(); UTF8Bytes name; @@ -267,95 +300,179 @@ static void SendCodeRangesToProfiler( } } -bool CodeSegment::linkAndMakeExecutable( - jit::AutoMarkJitCodeWritableForThread& writable, const LinkData& linkData, - const CodeBlock* maybeSharedStubs) { - if (!StaticallyLink(writable, bytes_.get(), linkData, maybeSharedStubs)) { - return false; - } +ModuleSegment::ModuleSegment(Tier tier, UniqueCodeBytes codeBytes, + uint32_t codeLength, const LinkData& linkData) + : CodeSegment(std::move(codeBytes), codeLength, CodeSegment::Kind::Module), + tier_(tier), + trapCode_(base() + linkData.trapOffset) {} - // Optimized compilation finishes on a background thread, so we must make sure - // to flush the icaches of all the executing threads. - // Reprotect the whole region to avoid having separate RW and RX mappings. - return ExecutableAllocator::makeExecutableAndFlushICache( - base(), RoundupCodeLength(lengthBytes())); -} +/* static */ +UniqueModuleSegment ModuleSegment::create(Tier tier, MacroAssembler& masm, + const LinkData& linkData) { + uint32_t codeLength = masm.bytesNeeded(); -SharedCodeSegment CodeSegment::createEmpty(size_t capacityBytes) { - uint32_t codeLength = 0; - uint32_t codeCapacity = RoundupCodeLength(capacityBytes); Maybe writable; - UniqueCodeBytes codeBytes = AllocateCodeBytes(writable, codeCapacity); + UniqueCodeBytes codeBytes = AllocateCodeBytes(writable, codeLength); if (!codeBytes) { return nullptr; } - return js_new(std::move(codeBytes), codeLength, codeCapacity); + masm.executableCopy(codeBytes.get()); + + return js::MakeUnique(tier, std::move(codeBytes), codeLength, + linkData); } /* static */ -SharedCodeSegment CodeSegment::createFromMasm( - MacroAssembler& masm, const LinkData& linkData, - const CodeBlock* maybeSharedStubs) { - uint32_t codeLength = masm.bytesNeeded(); - if (codeLength == 0) { - return js_new(nullptr, 0, 0); - } +UniqueModuleSegment ModuleSegment::create(Tier tier, const Bytes& unlinkedBytes, + const LinkData& linkData) { + uint32_t codeLength = unlinkedBytes.length(); - uint32_t codeCapacity = RoundupCodeLength(codeLength); Maybe writable; - UniqueCodeBytes codeBytes = AllocateCodeBytes(writable, codeCapacity); + UniqueCodeBytes codeBytes = AllocateCodeBytes(writable, codeLength); if (!codeBytes) { return nullptr; } - masm.executableCopy(codeBytes.get()); + memcpy(codeBytes.get(), unlinkedBytes.begin(), codeLength); - SharedCodeSegment segment = - js_new(std::move(codeBytes), codeLength, codeCapacity); - if (!segment || - !segment->linkAndMakeExecutable(*writable, linkData, maybeSharedStubs)) { - return nullptr; + return js::MakeUnique(tier, std::move(codeBytes), codeLength, + linkData); +} + +bool ModuleSegment::initialize(const CodeTier& codeTier, + const LinkData& linkData, + const CodeMetadata& codeMeta, + const CodeMetadataForAsmJS* codeMetaForAsmJS, + const MetadataTier& metadataTier) { + if (!StaticallyLink(*this, linkData)) { + return false; } - return segment; + // Optimized compilation finishes on a background thread, so we must make sure + // to flush the icaches of all the executing threads. + // Reprotect the whole region to avoid having separate RW and RX mappings. + if (!ExecutableAllocator::makeExecutableAndFlushICache( + base(), RoundupCodeLength(length()))) { + return false; + } + + SendCodeRangesToProfiler(*this, codeMeta, codeMetaForAsmJS, + metadataTier.codeRanges); + + // See comments in CodeSegment::initialize() for why this must be last. + return CodeSegment::initialize(codeTier); } -/* static */ -SharedCodeSegment CodeSegment::createFromBytes( - const uint8_t* unlinkedBytes, size_t unlinkedBytesLength, - const LinkData& linkData, const CodeBlock* maybeSharedStubs) { - uint32_t codeLength = unlinkedBytesLength; - if (codeLength == 0) { - return js_new(nullptr, 0, 0); - } +void ModuleSegment::addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, + size_t* code, size_t* data) const { + CodeSegment::addSizeOfMisc(mallocSizeOf, code); + *data += mallocSizeOf(this); +} + +const CodeRange* ModuleSegment::lookupRange(const void* pc) const { + return codeTier().lookupRange(pc); +} - uint32_t codeCapacity = RoundupCodeLength(codeLength); +size_t CacheableChars::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const { + return mallocSizeOf(get()); +} + +size_t MetadataTier::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const { + return funcToCodeRange.sizeOfExcludingThis(mallocSizeOf) + + codeRanges.sizeOfExcludingThis(mallocSizeOf) + + callSites.sizeOfExcludingThis(mallocSizeOf) + + tryNotes.sizeOfExcludingThis(mallocSizeOf) + + codeRangeUnwindInfos.sizeOfExcludingThis(mallocSizeOf) + + trapSites.sizeOfExcludingThis(mallocSizeOf) + + stackMaps.sizeOfExcludingThis(mallocSizeOf) + + funcImports.sizeOfExcludingThis(mallocSizeOf) + + funcExports.sizeOfExcludingThis(mallocSizeOf); +} + +UniqueLazyStubSegment LazyStubSegment::create(const CodeTier& codeTier, + size_t length) { Maybe writable; - UniqueCodeBytes codeBytes = AllocateCodeBytes(writable, codeLength); + UniqueCodeBytes codeBytes = AllocateCodeBytes(writable, length); if (!codeBytes) { return nullptr; } - memcpy(codeBytes.get(), unlinkedBytes, unlinkedBytesLength); - - SharedCodeSegment segment = - js_new(std::move(codeBytes), codeLength, codeCapacity); - if (!segment || - !segment->linkAndMakeExecutable(*writable, linkData, maybeSharedStubs)) { + auto segment = js::MakeUnique(std::move(codeBytes), length); + if (!segment || !segment->initialize(codeTier)) { return nullptr; } + return segment; } -void CodeSegment::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, - size_t* data) const { - *code += capacityBytes(); - *data += mallocSizeOf(this); +bool LazyStubSegment::hasSpace(size_t bytes) const { + MOZ_ASSERT(AlignBytesNeeded(bytes) == bytes); + return bytes <= length() && usedBytes_ <= length() - bytes; } -size_t CacheableChars::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const { - return mallocSizeOf(get()); +bool LazyStubSegment::addStubs(const CodeMetadata& codeMeta, size_t codeLength, + const Uint32Vector& funcExportIndices, + const FuncExportVector& funcExports, + const CodeRangeVector& codeRanges, + uint8_t** codePtr, + size_t* indexFirstInsertedCodeRange) { + MOZ_ASSERT(hasSpace(codeLength)); + + size_t offsetInSegment = usedBytes_; + *codePtr = base() + usedBytes_; + usedBytes_ += codeLength; + + *indexFirstInsertedCodeRange = codeRanges_.length(); + + if (!codeRanges_.reserve(codeRanges_.length() + 2 * codeRanges.length())) { + return false; + } + + size_t i = 0; + for (uint32_t funcExportIndex : funcExportIndices) { + const FuncExport& fe = funcExports[funcExportIndex]; + const FuncType& funcType = codeMeta.getFuncExportType(fe); + const CodeRange& interpRange = codeRanges[i]; + MOZ_ASSERT(interpRange.isInterpEntry()); + MOZ_ASSERT(interpRange.funcIndex() == + funcExports[funcExportIndex].funcIndex()); + + codeRanges_.infallibleAppend(interpRange); + codeRanges_.back().offsetBy(offsetInSegment); + i++; + + if (!funcType.canHaveJitEntry()) { + continue; + } + + const CodeRange& jitRange = codeRanges[i]; + MOZ_ASSERT(jitRange.isJitEntry()); + MOZ_ASSERT(jitRange.funcIndex() == interpRange.funcIndex()); + + codeRanges_.infallibleAppend(jitRange); + codeRanges_.back().offsetBy(offsetInSegment); + i++; + } + + return true; +} + +const CodeRange* LazyStubSegment::lookupRange(const void* pc) const { + // Do not search if the search will not find anything. There can be many + // segments, each with many entries. + if (pc < base() || pc >= base() + length()) { + return nullptr; + } + return LookupInSorted(codeRanges_, + CodeRange::OffsetInCode((uint8_t*)pc - base())); +} + +void LazyStubSegment::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, + size_t* data) const { + CodeSegment::addSizeOfMisc(mallocSizeOf, code); + *data += codeRanges_.sizeOfExcludingThis(mallocSizeOf); + *data += mallocSizeOf(this); } // When allocating a single stub to a page, we should not always place the stub @@ -384,10 +501,10 @@ static void PadCodeForSingleStub(MacroAssembler& masm) { static constexpr unsigned LAZY_STUB_LIFO_DEFAULT_CHUNK_SIZE = 8 * 1024; -bool Code::createManyLazyEntryStubs(const WriteGuard& guard, - const Uint32Vector& funcExportIndices, - const CodeBlock& tierCodeBlock, - size_t* stubBlockIndex) const { +bool LazyStubTier::createManyEntryStubs(const Uint32Vector& funcExportIndices, + const CodeMetadata& codeMeta, + const CodeTier& codeTier, + size_t* stubSegmentIndex) { MOZ_ASSERT(funcExportIndices.length()); LifoAlloc lifo(LAZY_STUB_LIFO_DEFAULT_CHUNK_SIZE); @@ -399,18 +516,19 @@ bool Code::createManyLazyEntryStubs(const WriteGuard& guard, PadCodeForSingleStub(masm); } - const FuncExportVector& funcExports = tierCodeBlock.funcExports; - uint8_t* segmentBase = tierCodeBlock.segment->base(); + const MetadataTier& metadataTier = codeTier.metadata(); + const FuncExportVector& funcExports = metadataTier.funcExports; + uint8_t* moduleSegmentBase = codeTier.segment().base(); CodeRangeVector codeRanges; DebugOnly numExpectedRanges = 0; for (uint32_t funcExportIndex : funcExportIndices) { const FuncExport& fe = funcExports[funcExportIndex]; - const FuncType& funcType = getFuncExportType(fe); + const FuncType& funcType = codeMeta.getFuncExportType(fe); // Exports that don't support a jit entry get only the interp entry. numExpectedRanges += (funcType.canHaveJitEntry() ? 2 : 1); void* calleePtr = - segmentBase + tierCodeBlock.codeRange(fe).funcUncheckedCallEntry(); + moduleSegmentBase + metadataTier.codeRange(fe).funcUncheckedCallEntry(); Maybe callee; callee.emplace(calleePtr, ImmPtr::NoCheckToken()); if (!GenerateEntryStubs(masm, funcExportIndex, fe, funcType, callee, @@ -433,37 +551,31 @@ bool Code::createManyLazyEntryStubs(const WriteGuard& guard, return false; } - size_t codeLength = CodeSegment::AlignBytesNeeded(masm.bytesNeeded()); + size_t codeLength = LazyStubSegment::AlignBytesNeeded(masm.bytesNeeded()); - if (guard->lazySegments.length() == 0 || - !guard->lazySegments[guard->lazySegments.length() - 1]->hasSpace( - codeLength)) { - SharedCodeSegment newSegment = CodeSegment::createEmpty(codeLength); + if (!stubSegments_.length() || + !stubSegments_[lastStubSegmentIndex_]->hasSpace(codeLength)) { + size_t newSegmentSize = std::max(codeLength, ExecutableCodePageSize); + UniqueLazyStubSegment newSegment = + LazyStubSegment::create(codeTier, newSegmentSize); if (!newSegment) { return false; } - if (!guard->lazySegments.emplaceBack(std::move(newSegment))) { + lastStubSegmentIndex_ = stubSegments_.length(); + if (!stubSegments_.emplaceBack(std::move(newSegment))) { return false; } } - MOZ_ASSERT(guard->lazySegments.length() > 0); - CodeSegment* segment = - guard->lazySegments[guard->lazySegments.length() - 1].get(); + LazyStubSegment* segment = stubSegments_[lastStubSegmentIndex_].get(); + *stubSegmentIndex = lastStubSegmentIndex_; + size_t interpRangeIndex; uint8_t* codePtr = nullptr; - segment->claimSpace(codeLength, &codePtr); - size_t offsetInSegment = codePtr - segment->base(); - - UniqueCodeBlock stubCodeBlock = - MakeUnique(CodeBlockKind::LazyStubs); - if (!stubCodeBlock) { + if (!segment->addStubs(codeMeta, codeLength, funcExportIndices, funcExports, + codeRanges, &codePtr, &interpRangeIndex)) { return false; } - stubCodeBlock->segment = segment; - stubCodeBlock->codeBase = codePtr; - stubCodeBlock->codeLength = codeLength; - stubCodeBlock->codeRanges = std::move(codeRanges); { AutoMarkJitCodeWritableForThread writable; @@ -480,334 +592,204 @@ bool Code::createManyLazyEntryStubs(const WriteGuard& guard, return false; } - *stubBlockIndex = guard->blocks.length(); + // Create lazy function exports for funcIndex -> entry lookup. + if (!exports_.reserve(exports_.length() + funcExportIndices.length())) { + return false; + } - uint32_t codeRangeIndex = 0; for (uint32_t funcExportIndex : funcExportIndices) { const FuncExport& fe = funcExports[funcExportIndex]; - const FuncType& funcType = getFuncExportType(fe); + const FuncType& funcType = codeMeta.getFuncExportType(fe); - LazyFuncExport lazyExport(fe.funcIndex(), *stubBlockIndex, codeRangeIndex, - tierCodeBlock.kind); + DebugOnly cr = segment->codeRanges()[interpRangeIndex]; + MOZ_ASSERT(cr.value.isInterpEntry()); + MOZ_ASSERT(cr.value.funcIndex() == fe.funcIndex()); - // Offset the code range for the interp entry to where it landed in the - // segment. - CodeRange& interpRange = stubCodeBlock->codeRanges[codeRangeIndex]; - MOZ_ASSERT(interpRange.isInterpEntry()); - MOZ_ASSERT(interpRange.funcIndex() == fe.funcIndex()); - interpRange.offsetBy(offsetInSegment); - codeRangeIndex += 1; - - // Offset the code range for the jit entry (if any) to where it landed in - // the segment. - if (funcType.canHaveJitEntry()) { - CodeRange& jitRange = stubCodeBlock->codeRanges[codeRangeIndex]; - MOZ_ASSERT(jitRange.isJitEntry()); - MOZ_ASSERT(jitRange.funcIndex() == fe.funcIndex()); - codeRangeIndex += 1; - jitRange.offsetBy(offsetInSegment); - } + LazyFuncExport lazyExport(fe.funcIndex(), *stubSegmentIndex, + interpRangeIndex); size_t exportIndex; const uint32_t targetFunctionIndex = fe.funcIndex(); + MOZ_ALWAYS_FALSE(BinarySearchIf( + exports_, 0, exports_.length(), + [targetFunctionIndex](const LazyFuncExport& funcExport) { + return targetFunctionIndex - funcExport.funcIndex; + }, + &exportIndex)); + MOZ_ALWAYS_TRUE( + exports_.insert(exports_.begin() + exportIndex, std::move(lazyExport))); - if (BinarySearchIf( - guard->lazyExports, 0, guard->lazyExports.length(), - [targetFunctionIndex](const LazyFuncExport& funcExport) { - return targetFunctionIndex - funcExport.funcIndex; - }, - &exportIndex)) { - DebugOnly oldKind = - guard->lazyExports[exportIndex].funcKind; - MOZ_ASSERT(oldKind == CodeBlockKind::SharedStubs || - oldKind == CodeBlockKind::BaselineTier); - guard->lazyExports[exportIndex] = std::move(lazyExport); - } else if (!guard->lazyExports.insert( - guard->lazyExports.begin() + exportIndex, - std::move(lazyExport))) { - return false; - } - } - - // Initialization makes the code block visible to the whole process through - // the process code map. We must wait until we're no longer initializing the - // code block to do it. - if (!stubCodeBlock->initialize(*tierCodeBlock.code)) { - return false; + // Exports that don't support a jit entry get only the interp entry. + interpRangeIndex += (funcType.canHaveJitEntry() ? 2 : 1); } - return guard->blocks.append(std::move(stubCodeBlock)); + return true; } -bool Code::createOneLazyEntryStub(const WriteGuard& guard, - uint32_t funcExportIndex, - const CodeBlock& tierCodeBlock, - void** interpEntry) const { +bool LazyStubTier::createOneEntryStub(uint32_t funcExportIndex, + const CodeMetadata& codeMeta, + const CodeTier& codeTier) { Uint32Vector funcExportIndexes; if (!funcExportIndexes.append(funcExportIndex)) { return false; } - size_t stubBlockIndex; - if (!createManyLazyEntryStubs(guard, funcExportIndexes, tierCodeBlock, - &stubBlockIndex)) { + size_t stubSegmentIndex; + if (!createManyEntryStubs(funcExportIndexes, codeMeta, codeTier, + &stubSegmentIndex)) { return false; } - const CodeBlock& block = *guard->blocks[stubBlockIndex]; - const CodeSegment& segment = *block.segment; - const CodeRangeVector& codeRanges = block.codeRanges; - - const FuncExport& fe = tierCodeBlock.funcExports[funcExportIndex]; - const FuncType& funcType = getFuncExportType(fe); + const UniqueLazyStubSegment& segment = stubSegments_[stubSegmentIndex]; + const CodeRangeVector& codeRanges = segment->codeRanges(); - // We created one or two stubs, depending on the function type. - uint32_t funcEntryRanges = funcType.canHaveJitEntry() ? 2 : 1; - MOZ_ASSERT(codeRanges.length() >= funcEntryRanges); + const FuncExport& fe = codeTier.metadata().funcExports[funcExportIndex]; + const FuncType& funcType = codeMeta.getFuncExportType(fe); - // The first created range is the interp entry - const CodeRange& interpRange = - codeRanges[codeRanges.length() - funcEntryRanges]; - MOZ_ASSERT(interpRange.isInterpEntry()); - *interpEntry = segment.base() + interpRange.begin(); - - // The second created range is the jit entry - if (funcType.canHaveJitEntry()) { - const CodeRange& jitRange = - codeRanges[codeRanges.length() - funcEntryRanges + 1]; - MOZ_ASSERT(jitRange.isJitEntry()); - jumpTables_.setJitEntry(jitRange.funcIndex(), - segment.base() + jitRange.begin()); - } - return true; -} - -bool Code::getOrCreateInterpEntry(uint32_t funcIndex, - const FuncExport** funcExport, - void** interpEntry) const { - size_t funcExportIndex; - const CodeBlock& codeBlock = funcCodeBlock(funcIndex); - *funcExport = &codeBlock.lookupFuncExport(funcIndex, &funcExportIndex); - - const FuncExport& fe = **funcExport; - if (fe.hasEagerStubs()) { - *interpEntry = codeBlock.segment->base() + fe.eagerInterpEntryOffset(); + // Exports that don't support a jit entry get only the interp entry. + if (!funcType.canHaveJitEntry()) { + MOZ_ASSERT(codeRanges.length() >= 1); + MOZ_ASSERT(codeRanges.back().isInterpEntry()); return true; } - MOZ_ASSERT(!codeMetaForAsmJS_, "only wasm can lazily export functions"); + MOZ_ASSERT(codeRanges.length() >= 2); + MOZ_ASSERT(codeRanges[codeRanges.length() - 2].isInterpEntry()); - auto guard = data_.writeLock(); - *interpEntry = lookupLazyInterpEntry(guard, funcIndex); - if (*interpEntry) { - return true; - } + const CodeRange& cr = codeRanges[codeRanges.length() - 1]; + MOZ_ASSERT(cr.isJitEntry()); - return createOneLazyEntryStub(guard, funcExportIndex, codeBlock, interpEntry); + codeTier.code().setJitEntry(cr.funcIndex(), segment->base() + cr.begin()); + return true; } -bool Code::createTier2LazyEntryStubs(const WriteGuard& guard, - const CodeBlock& tier2Code, - Maybe* outStubBlockIndex) const { - if (!guard->lazyExports.length()) { +bool LazyStubTier::createTier2(const Uint32Vector& funcExportIndices, + const CodeMetadata& codeMeta, + const CodeTier& codeTier, + Maybe* outStubSegmentIndex) { + if (!funcExportIndices.length()) { return true; } - Uint32Vector funcExportIndices; - if (!funcExportIndices.reserve(guard->lazyExports.length())) { + size_t stubSegmentIndex; + if (!createManyEntryStubs(funcExportIndices, codeMeta, codeTier, + &stubSegmentIndex)) { return false; } - for (size_t i = 0; i < guard->lazyExports.length(); i++) { - const LazyFuncExport& lfe = guard->lazyExports[i]; - MOZ_ASSERT(lfe.funcKind == CodeBlockKind::SharedStubs || - lfe.funcKind == CodeBlockKind::BaselineTier); - size_t funcExportIndex; - tier2Code.lookupFuncExport(lfe.funcIndex, &funcExportIndex); - funcExportIndices.infallibleAppend(funcExportIndex); - } - - size_t stubBlockIndex; - if (!createManyLazyEntryStubs(guard, funcExportIndices, tier2Code, - &stubBlockIndex)) { - return false; - } - - outStubBlockIndex->emplace(stubBlockIndex); + outStubSegmentIndex->emplace(stubSegmentIndex); return true; } -bool Code::finishCompleteTier2(const LinkData& linkData, - UniqueCodeBlock tier2Code) const { - MOZ_RELEASE_ASSERT(bestTier() == Tier::Baseline && - tier2Code->tier() == Tier::Optimized); - // Acquire the write guard before we start mutating anything. We hold this - // for the minimum amount of time necessary. - { - auto guard = data_.writeLock(); - - // Grab the tier2 pointer before moving it into the block vector. This - // ensures we maintain the invariant that tier2_ is never read if hasTier2_ - // is false. - CodeBlock* tier2CodePointer = tier2Code.get(); - - // Publish this code to the process wide map. - if (!tier2Code->initialize(*this) || - !guard->blocks.append(std::move(tier2Code)) || - !blockMap_.insert(tier2CodePointer)) { - return false; - } - - // Before we can make tier-2 live, we need to compile tier2 versions of any - // extant tier1 lazy stubs (otherwise, tiering would break the assumption - // that any extant exported wasm function has had a lazy entry stub already - // compiled for it). - // - // Also see doc block for stubs in WasmJS.cpp. - Maybe stub2Index; - if (!createTier2LazyEntryStubs(guard, *tier2CodePointer, &stub2Index)) { - return false; - } - - // Initializing the code above will have flushed the icache for all cores. - // However, there could still be stale data in the execution pipeline of - // other cores on some platforms. Force an execution context flush on all - // threads to fix this before we commit the code. - // - // This is safe due to the check in `PlatformCanTier` in WasmCompile.cpp - jit::FlushExecutionContextForAllThreads(); - - // Now that we can't fail or otherwise abort tier2, make it live. - tier2_ = tier2CodePointer; - hasTier2_ = true; - MOZ_ASSERT(hasTier2()); - - // Update jump vectors with pointers to tier-2 lazy entry stubs, if any. - if (stub2Index) { - const CodeBlock& block = *guard->blocks[*stub2Index]; - const CodeSegment& segment = *block.segment; - for (const CodeRange& cr : block.codeRanges) { - if (!cr.isJitEntry()) { - continue; - } - jumpTables_.setJitEntry(cr.funcIndex(), segment.base() + cr.begin()); - } - } +void LazyStubTier::setJitEntries(const Maybe& stubSegmentIndex, + const Code& code) { + if (!stubSegmentIndex) { + return; } - - // And we update the jump vectors with pointers to tier-2 functions and eager - // stubs. Callers will continue to invoke tier-1 code until, suddenly, they - // will invoke tier-2 code. This is benign. - const CodeBlock& optimizedTierCode = completeTierCodeBlock(Tier::Optimized); - uint8_t* base = optimizedTierCode.segment->base(); - for (const CodeRange& cr : optimizedTierCode.codeRanges) { - // These are racy writes that we just want to be visible, atomically, - // eventually. All hardware we care about will do this right. But - // we depend on the compiler not splitting the stores hidden inside the - // set*Entry functions. - if (cr.isFunction()) { - jumpTables_.setTieringEntry(cr.funcIndex(), base + cr.funcTierEntry()); - } else if (cr.isJitEntry()) { - jumpTables_.setJitEntry(cr.funcIndex(), base + cr.begin()); + const UniqueLazyStubSegment& segment = stubSegments_[*stubSegmentIndex]; + for (const CodeRange& cr : segment->codeRanges()) { + if (!cr.isJitEntry()) { + continue; } + code.setJitEntry(cr.funcIndex(), segment->base() + cr.begin()); } - return true; } -void* Code::lookupLazyInterpEntry(const WriteGuard& guard, - uint32_t funcIndex) const { +bool LazyStubTier::hasEntryStub(uint32_t funcIndex) const { + size_t match; + return BinarySearchIf( + exports_, 0, exports_.length(), + [funcIndex](const LazyFuncExport& funcExport) { + return funcIndex - funcExport.funcIndex; + }, + &match); +} + +void* LazyStubTier::lookupInterpEntry(uint32_t funcIndex) const { size_t match; if (!BinarySearchIf( - guard->lazyExports, 0, guard->lazyExports.length(), + exports_, 0, exports_.length(), [funcIndex](const LazyFuncExport& funcExport) { return funcIndex - funcExport.funcIndex; }, &match)) { return nullptr; } - const LazyFuncExport& fe = guard->lazyExports[match]; - const CodeBlock& block = *guard->blocks[fe.lazyStubBlockIndex]; - const CodeSegment& segment = *block.segment; - return segment.base() + block.codeRanges[fe.funcCodeRangeIndex].begin(); + const LazyFuncExport& fe = exports_[match]; + const LazyStubSegment& stub = *stubSegments_[fe.lazyStubSegmentIndex]; + return stub.base() + stub.codeRanges()[fe.funcCodeRangeIndex].begin(); } -CodeBlock::~CodeBlock() { - if (unregisterOnDestroy_) { - UnregisterCodeBlock(this); +void LazyStubTier::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, + size_t* data) const { + *data += sizeof(*this); + *data += exports_.sizeOfExcludingThis(mallocSizeOf); + for (const UniqueLazyStubSegment& stub : stubSegments_) { + stub->addSizeOfMisc(mallocSizeOf, code, data); } } -bool CodeBlock::initialize(const Code& code) { - MOZ_ASSERT(!initialized()); - this->code = &code; - segment->setCode(code); - - SendCodeRangesToProfiler(segment->base(), code.codeMeta(), - code.codeMetaForAsmJS(), codeRanges); - - // In the case of tiering, RegisterCodeBlock() immediately makes this code - // block live to access from other threads executing the containing - // module. So only call once the CodeBlock is fully initialized. - if (!RegisterCodeBlock(this)) { - return false; +struct ProjectFuncIndex { + const FuncExportVector& funcExports; + explicit ProjectFuncIndex(const FuncExportVector& funcExports) + : funcExports(funcExports) {} + uint32_t operator[](size_t index) const { + return funcExports[index].funcIndex(); } +}; - // This bool is only used by the destructor which cannot be called racily - // and so it is not a problem to mutate it after RegisterCodeBlock(). - MOZ_ASSERT(!unregisterOnDestroy_); - unregisterOnDestroy_ = true; - - MOZ_ASSERT(initialized()); - return true; +FuncExport& MetadataTier::lookupFuncExport( + uint32_t funcIndex, size_t* funcExportIndex /* = nullptr */) { + size_t match; + if (!BinarySearch(ProjectFuncIndex(funcExports), 0, funcExports.length(), + funcIndex, &match)) { + MOZ_CRASH("missing function export"); + } + if (funcExportIndex) { + *funcExportIndex = match; + } + return funcExports[match]; } -void CodeBlock::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, - size_t* data) const { - segment->addSizeOfMisc(mallocSizeOf, code, data); - *data += funcToCodeRange.sizeOfExcludingThis(mallocSizeOf) + - codeRanges.sizeOfExcludingThis(mallocSizeOf) + - callSites.sizeOfExcludingThis(mallocSizeOf) + - tryNotes.sizeOfExcludingThis(mallocSizeOf) + - codeRangeUnwindInfos.sizeOfExcludingThis(mallocSizeOf) + - trapSites.sizeOfExcludingThis(mallocSizeOf) + - stackMaps.sizeOfExcludingThis(mallocSizeOf) + - funcExports.sizeOfExcludingThis(mallocSizeOf); - ; +const FuncExport& MetadataTier::lookupFuncExport( + uint32_t funcIndex, size_t* funcExportIndex) const { + return const_cast(this)->lookupFuncExport(funcIndex, + funcExportIndex); } -const CodeRange* CodeBlock::lookupRange(const void* pc) const { - CodeRange::OffsetInCode target((uint8_t*)pc - segment->base()); - return LookupInSorted(codeRanges, target); -} +bool CodeTier::initialize(const Code& code, const LinkData& linkData, + const CodeMetadata& codeMeta, + const CodeMetadataForAsmJS* codeMetaForAsmJS) { + MOZ_ASSERT(!initialized()); + code_ = &code; -struct CallSiteRetAddrOffset { - const CallSiteVector& callSites; - explicit CallSiteRetAddrOffset(const CallSiteVector& callSites) - : callSites(callSites) {} - uint32_t operator[](size_t index) const { - return callSites[index].returnAddressOffset(); + MOZ_ASSERT(lazyStubs_.readLock()->entryStubsEmpty()); + + // See comments in CodeSegment::initialize() for why this must be last. + if (!segment_->initialize(*this, linkData, codeMeta, codeMetaForAsmJS, + *metadata_)) { + return false; } -}; -const CallSite* CodeBlock::lookupCallSite(void* pc) const { - uint32_t target = ((uint8_t*)pc) - segment->base(); - size_t lowerBound = 0; - size_t upperBound = callSites.length(); + MOZ_ASSERT(initialized()); + return true; +} - size_t match; - if (BinarySearch(CallSiteRetAddrOffset(callSites), lowerBound, upperBound, - target, &match)) { - return &callSites[match]; - } - return nullptr; +void CodeTier::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, + size_t* data) const { + segment_->addSizeOfMisc(mallocSizeOf, code, data); + lazyStubs_.readLock()->addSizeOfMisc(mallocSizeOf, code, data); + *data += metadata_->sizeOfExcludingThis(mallocSizeOf); } -const StackMap* CodeBlock::lookupStackMap(uint8_t* pc) const { - return stackMaps.findMap(pc); +const CodeRange* CodeTier::lookupRange(const void* pc) const { + CodeRange::OffsetInCode target((uint8_t*)pc - segment_->base()); + return LookupInSorted(metadata_->codeRanges, target); } -const wasm::TryNote* CodeBlock::lookupTryNote(const void* pc) const { - size_t target = (uint8_t*)pc - segment->base(); +const wasm::TryNote* CodeTier::lookupTryNote(const void* pc) const { + size_t target = (uint8_t*)pc - segment_->base(); + const TryNoteVector& tryNotes = metadata_->tryNotes; // We find the first hit (there may be multiple) to obtain the innermost // handler, which is why we cannot binary search here. @@ -820,103 +802,15 @@ const wasm::TryNote* CodeBlock::lookupTryNote(const void* pc) const { return nullptr; } -struct TrapSitePCOffset { - const TrapSiteVector& trapSites; - explicit TrapSitePCOffset(const TrapSiteVector& trapSites) - : trapSites(trapSites) {} - uint32_t operator[](size_t index) const { return trapSites[index].pcOffset; } -}; - -bool CodeBlock::lookupTrap(void* pc, Trap* trapOut, - BytecodeOffset* bytecode) const { - uint32_t target = ((uint8_t*)pc) - segment->base(); - for (Trap trap : MakeEnumeratedRange(Trap::Limit)) { - const TrapSiteVector& trapSitesForKind = trapSites[trap]; - - size_t upperBound = trapSitesForKind.length(); - size_t match; - if (BinarySearch(TrapSitePCOffset(trapSitesForKind), 0, upperBound, target, - &match)) { - MOZ_ASSERT(containsCodePC(pc)); - *trapOut = trap; - *bytecode = trapSitesForKind[match].bytecode; - return true; - } - } - return false; -} - -struct UnwindInfoPCOffset { - const CodeRangeUnwindInfoVector& info; - explicit UnwindInfoPCOffset(const CodeRangeUnwindInfoVector& info) - : info(info) {} - uint32_t operator[](size_t index) const { return info[index].offset(); } -}; - -const CodeRangeUnwindInfo* CodeBlock::lookupUnwindInfo(void* pc) const { - uint32_t target = ((uint8_t*)pc) - segment->base(); - size_t match; - const CodeRangeUnwindInfo* info = nullptr; - if (BinarySearch(UnwindInfoPCOffset(codeRangeUnwindInfos), 0, - codeRangeUnwindInfos.length(), target, &match)) { - info = &codeRangeUnwindInfos[match]; - } else { - // Exact match is not found, using insertion point to get the previous - // info entry; skip if info is outside of codeRangeUnwindInfos. - if (match == 0) return nullptr; - if (match == codeRangeUnwindInfos.length()) { - MOZ_ASSERT( - codeRangeUnwindInfos[codeRangeUnwindInfos.length() - 1].unwindHow() == - CodeRangeUnwindInfo::Normal); - return nullptr; - } - info = &codeRangeUnwindInfos[match - 1]; - } - return info->unwindHow() == CodeRangeUnwindInfo::Normal ? nullptr : info; -} - -struct ProjectFuncIndex { - const FuncExportVector& funcExports; - explicit ProjectFuncIndex(const FuncExportVector& funcExports) - : funcExports(funcExports) {} - uint32_t operator[](size_t index) const { - return funcExports[index].funcIndex(); - } -}; - -FuncExport& CodeBlock::lookupFuncExport( - uint32_t funcIndex, size_t* funcExportIndex /* = nullptr */) { - size_t match; - if (!BinarySearch(ProjectFuncIndex(funcExports), 0, funcExports.length(), - funcIndex, &match)) { - MOZ_CRASH("missing function export"); - } - if (funcExportIndex) { - *funcExportIndex = match; - } - return funcExports[match]; -} - -const FuncExport& CodeBlock::lookupFuncExport(uint32_t funcIndex, - size_t* funcExportIndex) const { - return const_cast(this)->lookupFuncExport(funcIndex, - funcExportIndex); -} - -bool JumpTables::initialize(CompileMode mode, const CodeBlock& sharedStubs, - const CodeBlock& tier1) { +bool JumpTables::init(CompileMode mode, const ModuleSegment& ms, + const CodeRangeVector& codeRanges) { static_assert(JSScript::offsetOfJitCodeRaw() == 0, "wasm fast jit entry is at (void*) jit[funcIndex]"); mode_ = mode; size_t numFuncs = 0; - for (const CodeRange& cr : sharedStubs.codeRanges) { - if (cr.isFunction()) { - numFuncs++; - } - } - for (const CodeRange& cr : tier1.codeRanges) { + for (const CodeRange& cr : codeRanges) { if (cr.isFunction()) { numFuncs++; } @@ -940,17 +834,8 @@ bool JumpTables::initialize(CompileMode mode, const CodeBlock& sharedStubs, return false; } - uint8_t* codeBase = sharedStubs.segment->base(); - for (const CodeRange& cr : sharedStubs.codeRanges) { - if (cr.isFunction()) { - setTieringEntry(cr.funcIndex(), codeBase + cr.funcTierEntry()); - } else if (cr.isJitEntry()) { - setJitEntry(cr.funcIndex(), codeBase + cr.begin()); - } - } - - codeBase = tier1.segment->base(); - for (const CodeRange& cr : tier1.codeRanges) { + uint8_t* codeBase = ms.base(); + for (const CodeRange& cr : codeRanges) { if (cr.isFunction()) { setTieringEntry(cr.funcIndex(), codeBase + cr.funcTierEntry()); } else if (cr.isJitEntry()) { @@ -960,60 +845,54 @@ bool JumpTables::initialize(CompileMode mode, const CodeBlock& sharedStubs, return true; } -Code::Code(CompileMode mode, const CodeMetadata& codeMeta, - const CodeMetadataForAsmJS* codeMetaForAsmJS) - : mode_(mode), - data_(mutexid::WasmCodeProtected), - codeMeta_(&codeMeta), +Code::Code(const CodeMetadata& codeMeta, + const CodeMetadataForAsmJS* codeMetaForAsmJS, UniqueCodeTier tier1, + JumpTables&& maybeJumpTables) + : codeMeta_(&codeMeta), codeMetaForAsmJS_(codeMetaForAsmJS), - tier1_(nullptr), - tier2_(nullptr), + tier1_(std::move(tier1)), profilingLabels_(mutexid::WasmCodeProfilingLabels, CacheableCharsVector()), - trapCode_(nullptr) {} + jumpTables_(std::move(maybeJumpTables)) {} -bool Code::initialize(FuncImportVector&& funcImports, - UniqueCodeBlock sharedStubs, - const LinkData& sharedStubsLinkData, - UniqueCodeBlock tierCodeBlock) { +bool Code::initialize(const LinkData& linkData) { MOZ_ASSERT(!initialized()); - funcImports_ = std::move(funcImports); - - auto guard = data_.writeLock(); - - // Grab a mutable pointer to initialize the code block after we have - // installed it. - CodeBlock* sharedStubsCodePointer = sharedStubs.get(); - CodeBlock* tier1CodePointer = tierCodeBlock.get(); - - sharedStubs_ = sharedStubs.get(); - tier1_ = tierCodeBlock.get(); - trapCode_ = sharedStubs_->segment->base() + sharedStubsLinkData.trapOffset; - if (!jumpTables_.initialize(mode_, *sharedStubs_, *tier1_) || - !guard->blocks.append(std::move(sharedStubs)) || - !guard->blocks.append(std::move(tierCodeBlock)) || - !blockMap_.insert(sharedStubs_) || !blockMap_.insert(tier1_)) { - // Reset the tier1 pointer to maintain the initialization invariant - tier1_ = nullptr; - MOZ_ASSERT(!initialized()); + if (!tier1_->initialize(*this, linkData, *codeMeta_, codeMetaForAsmJS_)) { return false; } - // Initialize the code block (which will publish it to the process) only - // after it has been completely installed. - if (!tier1CodePointer->initialize(*this) || - !sharedStubsCodePointer->initialize(*this)) { - // Reset the tier1 pointer to maintain the initialization invariant - tier1_ = nullptr; - MOZ_ASSERT(!initialized()); + MOZ_ASSERT(initialized()); + return true; +} + +bool Code::setAndBorrowTier2(UniqueCodeTier tier2, const LinkData& linkData, + const CodeTier** borrowedTier) const { + MOZ_RELEASE_ASSERT(!hasTier2()); + MOZ_RELEASE_ASSERT(tier2->tier() == Tier::Optimized && + tier1_->tier() == Tier::Baseline); + + if (!tier2->initialize(*this, linkData, *codeMeta_, codeMetaForAsmJS_)) { return false; } - MOZ_ASSERT(initialized()); + tier2_ = std::move(tier2); + *borrowedTier = &*tier2_; + return true; } +void Code::commitTier2() const { + MOZ_RELEASE_ASSERT(!hasTier2()); + hasTier2_ = true; + MOZ_ASSERT(hasTier2()); + + // To maintain the invariant that tier2_ is never read without the tier having + // been committed, this checks tier2_ here instead of before setting hasTier2_ + // (as would be natural). See comment in WasmCode.h. + MOZ_RELEASE_ASSERT(tier2_.get()); +} + uint32_t Code::getFuncIndex(JSFunction* fun) const { MOZ_ASSERT(fun->isWasm() || fun->isAsmJSNative()); if (!fun->isWasmWithJitEntry()) { @@ -1045,7 +924,7 @@ Tier Code::bestTier() const { return tier1_->tier(); } -const CodeBlock& Code::codeBlock(Tier tier) const { +const CodeTier& Code::codeTier(Tier tier) const { switch (tier) { case Tier::Baseline: if (tier1_->tier() == Tier::Baseline) { @@ -1060,7 +939,7 @@ const CodeBlock& Code::codeBlock(Tier tier) const { } // It is incorrect to ask for the optimized tier without there being such // a tier and the tier having been committed. The guard here could - // instead be `if (hasTier2()) ... ` but codeBlock(t) should not be called + // instead be `if (hasTier2()) ... ` but codeTier(t) should not be called // in contexts where that test is necessary. MOZ_RELEASE_ASSERT(hasTier2()); MOZ_ASSERT(tier2_->initialized()); @@ -1069,15 +948,111 @@ const CodeBlock& Code::codeBlock(Tier tier) const { MOZ_CRASH(); } +bool Code::containsCodePC(const void* pc) const { + for (Tier t : tiers()) { + const ModuleSegment& ms = segment(t); + if (ms.containsCodePC(pc)) { + return true; + } + } + return false; +} + +struct CallSiteRetAddrOffset { + const CallSiteVector& callSites; + explicit CallSiteRetAddrOffset(const CallSiteVector& callSites) + : callSites(callSites) {} + uint32_t operator[](size_t index) const { + return callSites[index].returnAddressOffset(); + } +}; + +const CallSite* Code::lookupCallSite(void* returnAddress) const { + for (Tier t : tiers()) { + uint32_t target = ((uint8_t*)returnAddress) - segment(t).base(); + size_t lowerBound = 0; + size_t upperBound = metadata(t).callSites.length(); + + size_t match; + if (BinarySearch(CallSiteRetAddrOffset(metadata(t).callSites), lowerBound, + upperBound, target, &match)) { + return &metadata(t).callSites[match]; + } + } + + return nullptr; +} + +const CodeRange* Code::lookupFuncRange(void* pc) const { + for (Tier t : tiers()) { + const CodeRange* result = codeTier(t).lookupRange(pc); + if (result && result->isFunction()) { + return result; + } + } + return nullptr; +} + +const StackMap* Code::lookupStackMap(uint8_t* nextPC) const { + for (Tier t : tiers()) { + const StackMap* result = metadata(t).stackMaps.findMap(nextPC); + if (result) { + return result; + } + } + return nullptr; +} + +const wasm::TryNote* Code::lookupTryNote(void* pc, Tier* tier) const { + for (Tier t : tiers()) { + const TryNote* result = codeTier(t).lookupTryNote(pc); + if (result) { + *tier = t; + return result; + } + } + return nullptr; +} + +struct TrapSitePCOffset { + const TrapSiteVector& trapSites; + explicit TrapSitePCOffset(const TrapSiteVector& trapSites) + : trapSites(trapSites) {} + uint32_t operator[](size_t index) const { return trapSites[index].pcOffset; } +}; + +bool Code::lookupTrap(void* pc, Trap* trapOut, BytecodeOffset* bytecode) const { + for (Tier t : tiers()) { + uint32_t target = ((uint8_t*)pc) - segment(t).base(); + const TrapSiteVectorArray& trapSitesArray = metadata(t).trapSites; + for (Trap trap : MakeEnumeratedRange(Trap::Limit)) { + const TrapSiteVector& trapSites = trapSitesArray[trap]; + + size_t upperBound = trapSites.length(); + size_t match; + if (BinarySearch(TrapSitePCOffset(trapSites), 0, upperBound, target, + &match)) { + MOZ_ASSERT(segment(t).containsCodePC(pc)); + *trapOut = trap; + *bytecode = trapSites[match].bytecode; + return true; + } + } + } + + return false; +} + bool Code::lookupFunctionTier(const CodeRange* codeRange, Tier* tier) const { // This logic only works if the codeRange is a function, and therefore only // exists in metadata and not a lazy stub tier. Generalizing to access lazy // stubs would require taking a lock, which is undesirable for the profiler. MOZ_ASSERT(codeRange->isFunction()); for (Tier t : tiers()) { - const CodeBlock& code = completeTierCodeBlock(t); - if (codeRange >= code.codeRanges.begin() && - codeRange < code.codeRanges.end()) { + const CodeTier& code = codeTier(t); + const MetadataTier& metadata = code.metadata(); + if (codeRange >= metadata.codeRanges.begin() && + codeRange < metadata.codeRanges.end()) { *tier = t; return true; } @@ -1085,6 +1060,39 @@ bool Code::lookupFunctionTier(const CodeRange* codeRange, Tier* tier) const { return false; } +struct UnwindInfoPCOffset { + const CodeRangeUnwindInfoVector& info; + explicit UnwindInfoPCOffset(const CodeRangeUnwindInfoVector& info) + : info(info) {} + uint32_t operator[](size_t index) const { return info[index].offset(); } +}; + +const CodeRangeUnwindInfo* Code::lookupUnwindInfo(void* pc) const { + for (Tier t : tiers()) { + uint32_t target = ((uint8_t*)pc) - segment(t).base(); + const CodeRangeUnwindInfoVector& unwindInfoArray = + metadata(t).codeRangeUnwindInfos; + size_t match; + const CodeRangeUnwindInfo* info = nullptr; + if (BinarySearch(UnwindInfoPCOffset(unwindInfoArray), 0, + unwindInfoArray.length(), target, &match)) { + info = &unwindInfoArray[match]; + } else { + // Exact match is not found, using insertion point to get the previous + // info entry; skip if info is outside of codeRangeUnwindInfos. + if (match == 0) continue; + if (match == unwindInfoArray.length()) { + MOZ_ASSERT(unwindInfoArray[unwindInfoArray.length() - 1].unwindHow() == + CodeRangeUnwindInfo::Normal); + continue; + } + info = &unwindInfoArray[match - 1]; + } + return info->unwindHow() == CodeRangeUnwindInfo::Normal ? nullptr : info; + } + return nullptr; +} + // When enabled, generate profiling labels for every name in funcNames_ that is // the name of some Function CodeRange. This involves malloc() so do it now // since, once we start sampling, we'll be in a signal-handing context where we @@ -1103,18 +1111,8 @@ void Code::ensureProfilingLabels(bool profilingEnabled) const { // Any tier will do, we only need tier-invariant data that are incidentally // stored with the code ranges. - const CodeBlock& sharedStubsCodeBlock = sharedStubs(); - const CodeBlock& tier1CodeBlock = completeTierCodeBlock(stableTier()); - // Ignore any OOM failures, nothing we can do about it - (void)appendProfilingLabels(labels, sharedStubsCodeBlock); - (void)appendProfilingLabels(labels, tier1CodeBlock); -} - -bool Code::appendProfilingLabels( - const ExclusiveData::Guard& labels, - const CodeBlock& codeBlock) const { - for (const CodeRange& codeRange : codeBlock.codeRanges) { + for (const CodeRange& codeRange : metadata(stableTier()).codeRanges) { if (!codeRange.isFunction()) { continue; } @@ -1135,38 +1133,37 @@ bool Code::appendProfilingLabels( codeRange.funcIndex(), &name); } if (!ok || !name.append(" (", 2)) { - return false; + return; } if (const char* filename = codeMeta().filename.get()) { if (!name.append(filename, strlen(filename))) { - return false; + return; } } else { if (!name.append('?')) { - return false; + return; } } if (!name.append(':') || !name.append(bytecodeStr, bytecodeStrLen) || !name.append(")\0", 2)) { - return false; + return; } UniqueChars label(name.extractOrCopyRawBuffer()); if (!label) { - return false; + return; } if (codeRange.funcIndex() >= labels->length()) { if (!labels->resize(codeRange.funcIndex() + 1)) { - return false; + return; } } ((CacheableCharsVector&)labels)[codeRange.funcIndex()] = std::move(label); } - return true; } const char* Code::profilingLabel(uint32_t funcIndex) const { @@ -1190,32 +1187,30 @@ void Code::addSizeOfMiscIfNotSeen( bool ok = seenCode->add(p, this); (void)ok; // oh well - auto guard = data_.readLock(); *data += mallocSizeOf(this) + - guard->lazyExports.sizeOfExcludingThis(mallocSizeOf) + + codeMeta().sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenCodeMeta) + (codeMetaForAsmJS() ? codeMetaForAsmJS()->sizeOfIncludingThisIfNotSeen( mallocSizeOf, seenCodeMetaForAsmJS) : 0) + - funcImports_.sizeOfExcludingThis(mallocSizeOf) + profilingLabels_.lock()->sizeOfExcludingThis(mallocSizeOf) + jumpTables_.sizeOfMiscExcludingThis(); - for (const SharedCodeSegment& stub : guard->lazySegments) { - stub->addSizeOfMisc(mallocSizeOf, code, data); - } - sharedStubs().addSizeOfMisc(mallocSizeOf, code, data); for (auto t : tiers()) { - completeTierCodeBlock(t).addSizeOfMisc(mallocSizeOf, code, data); + codeTier(t).addSizeOfMisc(mallocSizeOf, code, data); } } -void CodeBlock::disassemble(JSContext* cx, int kindSelection, - PrintCallback printString) const { - for (const CodeRange& range : codeRanges) { +void Code::disassemble(JSContext* cx, Tier tier, int kindSelection, + PrintCallback printString) const { + const MetadataTier& metadataTier = metadata(tier); + const CodeTier& codeTier = this->codeTier(tier); + const ModuleSegment& segment = codeTier.segment(); + + for (const CodeRange& range : metadataTier.codeRanges) { if (kindSelection & (1 << range.kind())) { - MOZ_ASSERT(range.begin() < segment->lengthBytes()); - MOZ_ASSERT(range.end() < segment->lengthBytes()); + MOZ_ASSERT(range.begin() < segment.length()); + MOZ_ASSERT(range.end() < segment.length()); const char* kind; char kindbuf[128]; @@ -1249,12 +1244,12 @@ void CodeBlock::disassemble(JSContext* cx, int kindSelection, const char* funcName = "(unknown)"; UTF8Bytes namebuf; bool ok; - if (code->codeMetaForAsmJS()) { - ok = code->codeMetaForAsmJS()->getFuncNameForAsmJS(range.funcIndex(), - &namebuf); + if (codeMetaForAsmJS()) { + ok = codeMetaForAsmJS()->getFuncNameForAsmJS(range.funcIndex(), + &namebuf); } else { - ok = code->codeMeta().getFuncNameForWasm(NameContext::Standalone, - range.funcIndex(), &namebuf); + ok = codeMeta().getFuncNameForWasm(NameContext::Standalone, + range.funcIndex(), &namebuf); } if (ok && namebuf.append('\0')) { funcName = namebuf.begin(); @@ -1266,75 +1261,76 @@ void CodeBlock::disassemble(JSContext* cx, int kindSelection, } printString(buf); - uint8_t* theCode = segment->base() + range.begin(); + uint8_t* theCode = segment.base() + range.begin(); jit::Disassemble(theCode, range.end() - range.begin(), printString); } } } -void Code::disassemble(JSContext* cx, Tier tier, int kindSelection, - PrintCallback printString) const { - this->sharedStubs().disassemble(cx, kindSelection, printString); - this->completeTierCodeBlock(tier).disassemble(cx, kindSelection, printString); -} - // Return a map with names and associated statistics MetadataAnalysisHashMap Code::metadataAnalysis(JSContext* cx) const { MetadataAnalysisHashMap hashmap; - if (!hashmap.reserve(14)) { + if (!hashmap.reserve(15)) { return hashmap; } for (auto t : tiers()) { - const CodeBlock& codeBlock = completeTierCodeBlock(t); - size_t length = codeBlock.funcToCodeRange.numEntries(); - length += codeBlock.codeRanges.length(); - length += codeBlock.callSites.length(); - length += codeBlock.trapSites.sumOfLengths(); - length += codeBlock.funcExports.length(); - length += codeBlock.stackMaps.length(); - length += codeBlock.tryNotes.length(); + size_t length = metadata(t).funcToCodeRange.length(); + length += metadata(t).codeRanges.length(); + length += metadata(t).callSites.length(); + length += metadata(t).trapSites.sumOfLengths(); + length += metadata(t).funcImports.length(); + length += metadata(t).funcExports.length(); + length += metadata(t).stackMaps.length(); + length += metadata(t).tryNotes.length(); hashmap.putNewInfallible("metadata length", length); // Iterate over the Code Ranges and accumulate all pieces of code. size_t code_size = 0; - for (const CodeRange& codeRange : codeBlock.codeRanges) { + for (const CodeRange& codeRange : metadata(stableTier()).codeRanges) { if (!codeRange.isFunction()) { continue; } code_size += codeRange.end() - codeRange.begin(); } - hashmap.putNewInfallible("stackmaps number", codeBlock.stackMaps.length()); + hashmap.putNewInfallible("stackmaps number", + this->metadata(t).stackMaps.length()); hashmap.putNewInfallible("trapSites number", - codeBlock.trapSites.sumOfLengths()); + this->metadata(t).trapSites.sumOfLengths()); hashmap.putNewInfallible("codeRange size in bytes", code_size); - hashmap.putNewInfallible("code segment capacity", - codeBlock.segment->capacityBytes()); + hashmap.putNewInfallible("code segment length", + this->codeTier(t).segment().length()); auto mallocSizeOf = cx->runtime()->debuggerMallocSizeOf; + hashmap.putNewInfallible("metadata total size", + metadata(t).sizeOfExcludingThis(mallocSizeOf)); hashmap.putNewInfallible( "funcToCodeRange size", - codeBlock.funcToCodeRange.sizeOfExcludingThis(mallocSizeOf)); + metadata(t).funcToCodeRange.sizeOfExcludingThis(mallocSizeOf)); hashmap.putNewInfallible( "codeRanges size", - codeBlock.codeRanges.sizeOfExcludingThis(mallocSizeOf)); + metadata(t).codeRanges.sizeOfExcludingThis(mallocSizeOf)); hashmap.putNewInfallible( "callSites size", - codeBlock.callSites.sizeOfExcludingThis(mallocSizeOf)); + metadata(t).callSites.sizeOfExcludingThis(mallocSizeOf)); hashmap.putNewInfallible( - "tryNotes size", codeBlock.tryNotes.sizeOfExcludingThis(mallocSizeOf)); + "tryNotes size", + metadata(t).tryNotes.sizeOfExcludingThis(mallocSizeOf)); hashmap.putNewInfallible( "trapSites size", - codeBlock.trapSites.sizeOfExcludingThis(mallocSizeOf)); + metadata(t).trapSites.sizeOfExcludingThis(mallocSizeOf)); hashmap.putNewInfallible( "stackMaps size", - codeBlock.stackMaps.sizeOfExcludingThis(mallocSizeOf)); + metadata(t).stackMaps.sizeOfExcludingThis(mallocSizeOf)); + hashmap.putNewInfallible( + "funcImports size", + metadata(t).funcImports.sizeOfExcludingThis(mallocSizeOf)); hashmap.putNewInfallible( "funcExports size", - codeBlock.funcExports.sizeOfExcludingThis(mallocSizeOf)); + metadata(t).funcExports.sizeOfExcludingThis(mallocSizeOf)); } return hashmap; diff --git a/js/src/wasm/WasmCode.h b/js/src/wasm/WasmCode.h index 28b3d5b8b9bdf..295fc6b099e91 100644 --- a/js/src/wasm/WasmCode.h +++ b/js/src/wasm/WasmCode.h @@ -22,13 +22,11 @@ #include "mozilla/Assertions.h" #include "mozilla/Atomics.h" #include "mozilla/Attributes.h" -#include "mozilla/DebugOnly.h" #include "mozilla/EnumeratedArray.h" #include "mozilla/Maybe.h" #include "mozilla/MemoryReporting.h" #include "mozilla/PodOperations.h" #include "mozilla/RefPtr.h" -#include "mozilla/ScopeExit.h" #include "mozilla/UniquePtr.h" #include @@ -78,8 +76,10 @@ class MacroAssembler; namespace wasm { +struct MetadataTier; + // LinkData contains all the metadata necessary to patch all the locations -// that depend on the absolute address of a CodeSegment. This happens in a +// that depend on the absolute address of a ModuleSegment. This happens in a // "linking" step after compilation and after the module's code is serialized. // The LinkData is serialized along with the Module but does not (normally, see // Module::debugLinkData_ comment) persist after (de)serialization, which @@ -98,7 +98,7 @@ WASM_DECLARE_CACHEABLE_POD(LinkDataCacheablePod); WASM_CHECK_CACHEABLE_POD_PADDING(LinkDataCacheablePod) struct LinkData : LinkDataCacheablePod { - LinkData() = default; + explicit LinkData(Tier tier) : tier(tier) {} LinkDataCacheablePod& pod() { return *this; } const LinkDataCacheablePod& pod() const { return *this; } @@ -119,35 +119,13 @@ struct LinkData : LinkDataCacheablePod { struct SymbolicLinkArray : EnumeratedArray { - bool isEmpty() const { - for (const Uint32Vector& symbolicLinks : *this) { - if (symbolicLinks.length() != 0) { - return false; - } - } - return true; - } - void clear() { - for (SymbolicAddress symbolicAddress : - mozilla::MakeEnumeratedRange(SymbolicAddress::Limit)) { - (*this)[symbolicAddress].clear(); - } - } - size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; }; + const Tier tier; InternalLinkVector internalLinks; SymbolicLinkArray symbolicLinks; - bool isEmpty() const { - return internalLinks.length() == 0 && symbolicLinks.isEmpty(); - } - void clear() { - internalLinks.clear(); - symbolicLinks.clear(); - } - size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; }; @@ -167,19 +145,9 @@ struct FreeCode { using UniqueCodeBytes = UniquePtr; class Code; -class CodeBlock; - -using UniqueCodeBlock = UniquePtr; -using UniqueConstCodeBlock = UniquePtr; -using UniqueCodeBlockVector = Vector; -using RawCodeBlockVector = Vector; - -enum class CodeBlockKind { - SharedStubs, - BaselineTier, - OptimizedTier, - LazyStubs -}; +class CodeTier; +class ModuleSegment; +class LazyStubSegment; // CodeSegment contains common helpers for determining the base and length of a // code segment and if a pc belongs to this segment. It is inherited by: @@ -188,211 +156,117 @@ enum class CodeBlockKind { // - LazyStubSegment, i.e. the code segment of entry stubs that are lazily // generated. -// LazyStubSegment is a code segment lazily generated for function entry stubs -// (both interpreter and jit ones). -// -// Because a stub is usually small (a few KiB) and an executable code segment -// isn't (64KiB), a given stub segment can contain entry stubs of many -// functions. +class CodeSegment { + protected: + enum class Kind { LazyStubs, Module }; -// A wasm ModuleSegment owns the allocated executable code for a wasm module. + CodeSegment(UniqueCodeBytes bytes, uint32_t length, Kind kind) + : bytes_(std::move(bytes)), + length_(length), + kind_(kind), + codeTier_(nullptr), + unregisterOnDestroy_(false) {} + + bool initialize(const CodeTier& codeTier); -class CodeSegment : public ShareableBase { private: const UniqueCodeBytes bytes_; - uint32_t lengthBytes_; - const uint32_t capacityBytes_; - const Code* code_; - - bool linkAndMakeExecutable(jit::AutoMarkJitCodeWritableForThread& writable, - const LinkData& linkData, - const CodeBlock* maybeSharedStubs); + const uint32_t length_; + const Kind kind_; + const CodeTier* codeTier_; + bool unregisterOnDestroy_; public: - CodeSegment(UniqueCodeBytes bytes, uint32_t lengthBytes, - uint32_t capacityBytes) - : bytes_(std::move(bytes)), - lengthBytes_(lengthBytes), - capacityBytes_(capacityBytes), - code_(nullptr) {} - - static RefPtr createEmpty(size_t capacityBytes); - static RefPtr createFromMasm(jit::MacroAssembler& masm, - const LinkData& linkData, - const CodeBlock* maybeSharedStubs); - static RefPtr createFromBytes(const uint8_t* unlinkedBytes, - size_t unlinkedBytesLength, - const LinkData& linkData, - const CodeBlock* maybeSharedStubs); - - void setCode(const Code& code) { code_ = &code; } + bool initialized() const { return !!codeTier_; } + ~CodeSegment(); - uint8_t* base() const { return bytes_.get(); } - uint32_t lengthBytes() const { - MOZ_ASSERT(lengthBytes_ != UINT32_MAX); - return lengthBytes_; + bool isLazyStubs() const { return kind_ == Kind::LazyStubs; } + bool isModule() const { return kind_ == Kind::Module; } + const ModuleSegment* asModule() const { + MOZ_ASSERT(isModule()); + return (ModuleSegment*)this; } - uint32_t capacityBytes() const { - MOZ_ASSERT(capacityBytes_ != UINT32_MAX); - return capacityBytes_; + const LazyStubSegment* asLazyStub() const { + MOZ_ASSERT(isLazyStubs()); + return (LazyStubSegment*)this; } - static size_t AlignBytesNeeded(size_t bytes) { - // All new code allocations must be rounded to the system page size - return AlignBytes(bytes, gc::SystemPageSize()); - } - bool hasSpace(size_t bytes) const { - MOZ_ASSERT(AlignBytesNeeded(bytes) == bytes); - return bytes <= capacityBytes() && lengthBytes_ <= capacityBytes() - bytes; - } - void claimSpace(size_t bytes, uint8_t** claimedBase) { - MOZ_RELEASE_ASSERT(hasSpace(bytes)); - *claimedBase = base() + lengthBytes_; - lengthBytes_ += bytes; + uint8_t* base() const { return bytes_.get(); } + uint32_t length() const { + MOZ_ASSERT(length_ != UINT32_MAX); + return length_; } - const Code& code() const { return *code_; } - - void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code, - size_t* data) const; - WASM_DECLARE_FRIEND_SERIALIZE(CodeSegment); -}; - -using SharedCodeSegment = RefPtr; -using SharedCodeSegmentVector = Vector; - -extern UniqueCodeBytes AllocateCodeBytes( - mozilla::Maybe& writable, - uint32_t codeLength); -extern bool StaticallyLink(jit::AutoMarkJitCodeWritableForThread& writable, - uint8_t* base, const LinkData& linkData, - const CodeBlock* maybeSharedStubs); -extern void StaticallyUnlink(uint8_t* base, const LinkData& linkData); - -// LazyFuncExport helps to efficiently lookup a CodeRange from a given function -// index. It is inserted in a vector sorted by function index, to perform -// binary search on it later. + bool containsCodePC(const void* pc) const { + return pc >= base() && pc < (base() + length_); + } -struct LazyFuncExport { - size_t funcIndex; - size_t lazyStubBlockIndex; - size_t funcCodeRangeIndex; - // Used to make sure we only upgrade a lazy stub from baseline to ion. - mozilla::DebugOnly funcKind; + const CodeTier& codeTier() const { + MOZ_ASSERT(initialized()); + return *codeTier_; + } + const Code& code() const; - LazyFuncExport(size_t funcIndex, size_t lazyStubBlockIndex, - size_t funcCodeRangeIndex, CodeBlockKind funcKind) - : funcIndex(funcIndex), - lazyStubBlockIndex(lazyStubBlockIndex), - funcCodeRangeIndex(funcCodeRangeIndex), - funcKind(funcKind) {} + void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code) const; }; -using LazyFuncExportVector = Vector; +// A wasm ModuleSegment owns the allocated executable code for a wasm module. -static const uint32_t BAD_CODE_RANGE = UINT32_MAX; +using UniqueModuleSegment = UniquePtr; -class FuncToCodeRangeMap { - uint32_t startFuncIndex_ = 0; - Uint32Vector funcToCodeRange_; +class ModuleSegment : public CodeSegment { + const Tier tier_; + uint8_t* const trapCode_; - bool denseHasFuncIndex(uint32_t funcIndex) const { - return funcIndex >= startFuncIndex_ && - funcIndex - startFuncIndex_ < funcToCodeRange_.length(); - } + public: + ModuleSegment(Tier tier, UniqueCodeBytes codeBytes, uint32_t codeLength, + const LinkData& linkData); - FuncToCodeRangeMap(uint32_t startFuncIndex, Uint32Vector&& funcToCodeRange) - : startFuncIndex_(startFuncIndex), - funcToCodeRange_(std::move(funcToCodeRange)) {} + static UniqueModuleSegment create(Tier tier, jit::MacroAssembler& masm, + const LinkData& linkData); + static UniqueModuleSegment create(Tier tier, const Bytes& unlinkedBytes, + const LinkData& linkData); - public: - [[nodiscard]] static bool createDense(uint32_t startFuncIndex, - uint32_t numFuncs, - FuncToCodeRangeMap* result) { - Uint32Vector funcToCodeRange; - if (!funcToCodeRange.appendN(BAD_CODE_RANGE, numFuncs)) { - return false; - } - *result = FuncToCodeRangeMap(startFuncIndex, std::move(funcToCodeRange)); - return true; - } + bool initialize(const CodeTier& codeTier, const LinkData& linkData, + const CodeMetadata& codeMeta, + const CodeMetadataForAsmJS* codeMetaForAsmJS, + const MetadataTier& metadataTier); - FuncToCodeRangeMap() = default; - FuncToCodeRangeMap(FuncToCodeRangeMap&& rhs) = default; - FuncToCodeRangeMap& operator=(FuncToCodeRangeMap&& rhs) = default; - FuncToCodeRangeMap(const FuncToCodeRangeMap& rhs) = delete; - FuncToCodeRangeMap& operator=(const FuncToCodeRangeMap& rhs) = delete; + Tier tier() const { return tier_; } - uint32_t lookup(uint32_t funcIndex) const { - if (!denseHasFuncIndex(funcIndex)) { - return BAD_CODE_RANGE; - } - return funcToCodeRange_[funcIndex - startFuncIndex_]; - } + // Pointers to stubs to which PC is redirected from the signal-handler. - uint32_t operator[](uint32_t funcIndex) const { return lookup(funcIndex); } + uint8_t* trapCode() const { return trapCode_; } - [[nodiscard]] bool insert(uint32_t funcIndex, uint32_t codeRangeIndex) { - if (!denseHasFuncIndex(funcIndex)) { - return false; - } - funcToCodeRange_[funcIndex - startFuncIndex_] = codeRangeIndex; - return true; - } - void insertInfallible(uint32_t funcIndex, uint32_t codeRangeIndex) { - bool result = insert(funcIndex, codeRangeIndex); - MOZ_RELEASE_ASSERT(result); - } + const CodeRange* lookupRange(const void* pc) const; - void shrinkStorageToFit() { funcToCodeRange_.shrinkStorageToFit(); } + void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code, + size_t* data) const; - void assertAllInitialized() { -#ifdef DEBUG - for (uint32_t codeRangeIndex : funcToCodeRange_) { - MOZ_ASSERT(codeRangeIndex != BAD_CODE_RANGE); - } -#endif - } + WASM_DECLARE_FRIEND_SERIALIZE(ModuleSegment); +}; - size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const { - return funcToCodeRange_.sizeOfExcludingThis(mallocSizeOf); - } +extern UniqueCodeBytes AllocateCodeBytes( + mozilla::Maybe& writable, + uint32_t codeLength); +extern bool StaticallyLink(const ModuleSegment& ms, const LinkData& linkData); +extern void StaticallyUnlink(uint8_t* base, const LinkData& linkData); - size_t numEntries() const { return funcToCodeRange_.length(); } +// MetadataTier holds all the data that is needed to describe compiled wasm +// code at runtime (as opposed to data that is only used to statically link or +// instantiate a module), for one specific tier (baseline or Ion) of code. - WASM_DECLARE_FRIEND_SERIALIZE(FuncToCodeRangeMap); -}; +struct MetadataTier { + explicit MetadataTier(Tier tier = Tier::Serialized) + : tier(tier), debugTrapOffset(0) {} -// CodeBlock contains all the data related to a given compilation tier. It is -// built during module generation and then immutably stored in a Code. -// -// Code contains a map from PC to containing code block. The map is thread-safe -// to support lookups from multiple threads (see ThreadSafeCodeBlockMap). This -// is safe because code blocks are immutable after creation, so there won't -// be any concurrent modification during a metadata lookup. + const Tier tier; -class CodeBlock { - public: - // Weak reference to the code that owns us, not serialized. - const Code* code; - - // The following information is all serialized - // Which kind of code is being stored in this block. Most consumers don't - // care about this. - const CodeBlockKind kind; - - // The code segment our JIT code is within. - SharedCodeSegment segment; - // The sub-range of the code segment our JIT code is within. - const uint8_t* codeBase; - size_t codeLength; - - // Metadata about the code we've put in the segment. All offsets are - // temporarily relative to the segment base, not our block base. - FuncToCodeRangeMap funcToCodeRange; + Uint32Vector funcToCodeRange; CodeRangeVector codeRanges; CallSiteVector callSites; TrapSiteVectorArray trapSites; + FuncImportVector funcImports; FuncExportVector funcExports; StackMaps stackMaps; TryNoteVector tryNotes; @@ -401,242 +275,178 @@ class CodeBlock { // Debug information, not serialized. uint32_t debugTrapOffset; - // Track whether we are registered in the process map of code blocks. - bool unregisterOnDestroy_; + FuncExport& lookupFuncExport(uint32_t funcIndex, + size_t* funcExportIndex = nullptr); + const FuncExport& lookupFuncExport(uint32_t funcIndex, + size_t* funcExportIndex = nullptr) const; - static constexpr CodeBlockKind kindFromTier(Tier tier) { - if (tier == Tier::Optimized) { - return CodeBlockKind::OptimizedTier; - } - MOZ_ASSERT(tier == Tier::Baseline); - return CodeBlockKind::BaselineTier; + const CodeRange& codeRange(const FuncExport& funcExport) const { + return codeRanges[funcToCodeRange[funcExport.funcIndex()]]; } - explicit CodeBlock(CodeBlockKind kind) - : code(nullptr), - kind(kind), - debugTrapOffset(0), - unregisterOnDestroy_(false) {} - ~CodeBlock(); + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; +}; + +using UniqueMetadataTier = UniquePtr; + +// LazyStubSegment is a code segment lazily generated for function entry stubs +// (both interpreter and jit ones). +// +// Because a stub is usually small (a few KiB) and an executable code segment +// isn't (64KiB), a given stub segment can contain entry stubs of many +// functions. - bool initialized() const { return !!code; } +using UniqueLazyStubSegment = UniquePtr; +using LazyStubSegmentVector = + Vector; - bool initialize(const Code& code); +class LazyStubSegment : public CodeSegment { + CodeRangeVector codeRanges_; + size_t usedBytes_; - // Gets the tier for this code block. Only valid for non-lazy stub code. - Tier tier() const { - switch (kind) { - case CodeBlockKind::BaselineTier: - return Tier::Baseline; - case CodeBlockKind::OptimizedTier: - return Tier::Optimized; - default: - MOZ_CRASH(); - } - } + public: + LazyStubSegment(UniqueCodeBytes bytes, size_t length) + : CodeSegment(std::move(bytes), length, CodeSegment::Kind::LazyStubs), + usedBytes_(0) {} - const uint8_t* base() const { return codeBase; } - uint32_t length() const { return codeLength; } - bool containsCodePC(const void* pc) const { - return pc >= base() && pc < (base() + length()); - } + static UniqueLazyStubSegment create(const CodeTier& codeTier, + size_t codeLength); - const CodeRange& codeRange(const FuncExport& funcExport) const { - return codeRanges[funcToCodeRange[funcExport.funcIndex()]]; + static size_t AlignBytesNeeded(size_t bytes) { + return AlignBytes(bytes, gc::SystemPageSize()); } - const CodeRange* lookupRange(const void* pc) const; - const CallSite* lookupCallSite(void* pc) const; - const StackMap* lookupStackMap(uint8_t* pc) const; - const TryNote* lookupTryNote(const void* pc) const; - bool lookupTrap(void* pc, Trap* trapOut, BytecodeOffset* bytecode) const; - const CodeRangeUnwindInfo* lookupUnwindInfo(void* pc) const; - FuncExport& lookupFuncExport(uint32_t funcIndex, - size_t* funcExportIndex = nullptr); - const FuncExport& lookupFuncExport(uint32_t funcIndex, - size_t* funcExportIndex = nullptr) const; + bool hasSpace(size_t bytes) const; + [[nodiscard]] bool addStubs(const CodeMetadata& codeMeta, size_t codeLength, + const Uint32Vector& funcExportIndices, + const FuncExportVector& funcExports, + const CodeRangeVector& codeRanges, + uint8_t** codePtr, + size_t* indexFirstInsertedCodeRange); - void disassemble(JSContext* cx, int kindSelection, - PrintCallback printString) const; + const CodeRangeVector& codeRanges() const { return codeRanges_; } + [[nodiscard]] const CodeRange* lookupRange(const void* pc) const; void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, size_t* data) const; +}; - WASM_DECLARE_FRIEND_SERIALIZE_ARGS(CodeBlock, const wasm::LinkData& data); +// LazyFuncExport helps to efficiently lookup a CodeRange from a given function +// index. It is inserted in a vector sorted by function index, to perform +// binary search on it later. + +struct LazyFuncExport { + size_t funcIndex; + size_t lazyStubSegmentIndex; + size_t funcCodeRangeIndex; + LazyFuncExport(size_t funcIndex, size_t lazyStubSegmentIndex, + size_t funcCodeRangeIndex) + : funcIndex(funcIndex), + lazyStubSegmentIndex(lazyStubSegmentIndex), + funcCodeRangeIndex(funcCodeRangeIndex) {} }; -// Because of profiling, the thread running wasm might need to know to which -// CodeBlock the current PC belongs, during a call to lookup(). A lookup -// is a read-only operation, and we don't want to take a lock then -// (otherwise, we could have a deadlock situation if an async lookup -// happened on a given thread that was holding mutatorsMutex_ while getting -// sampled). Since the writer could be modifying the data that is getting -// looked up, the writer functions use spin-locks to know if there are any -// observers (i.e. calls to lookup()) of the atomic data. - -class ThreadSafeCodeBlockMap { - // Since writes (insertions or removals) can happen on any background - // thread at the same time, we need a lock here. - - Mutex mutatorsMutex_ MOZ_UNANNOTATED; - - RawCodeBlockVector segments1_; - RawCodeBlockVector segments2_; - - // Except during swapAndWait(), there are no lookup() observers of the - // vector pointed to by mutableCodeBlocks_ - - RawCodeBlockVector* mutableCodeBlocks_; - Atomic readonlyCodeBlocks_; - Atomic numActiveLookups_; - - struct CodeBlockPC { - const void* pc; - explicit CodeBlockPC(const void* pc) : pc(pc) {} - int operator()(const CodeBlock* cb) const { - if (cb->containsCodePC(pc)) { - return 0; - } - if (pc < cb->base()) { - return -1; - } - return 1; - } - }; +using LazyFuncExportVector = Vector; - void swapAndWait() { - // Both vectors are consistent for lookup at this point although their - // contents are different: there is no way for the looked up PC to be - // in the code segment that is getting registered, because the code - // segment is not even fully created yet. - - // If a lookup happens before this instruction, then the - // soon-to-become-former read-only pointer is used during the lookup, - // which is valid. - - mutableCodeBlocks_ = const_cast( - readonlyCodeBlocks_.exchange(mutableCodeBlocks_)); - - // If a lookup happens after this instruction, then the updated vector - // is used, which is valid: - // - in case of insertion, it means the new vector contains more data, - // but it's fine since the code segment is getting registered and thus - // isn't even fully created yet, so the code can't be running. - // - in case of removal, it means the new vector contains one less - // entry, but it's fine since unregistering means the code segment - // isn't used by any live instance anymore, thus PC can't be in the - // to-be-removed code segment's range. - - // A lookup could have happened on any of the two vectors. Wait for - // observers to be done using any vector before mutating. - - while (numActiveLookups_ > 0) { - } - } +// LazyStubTier contains all the necessary information for lazy function entry +// stubs that are generated at runtime. None of its data are ever serialized. +// +// It must be protected by a lock, because the main thread can both read and +// write lazy stubs at any time while a background thread can regenerate lazy +// stubs for tier2 at any time. - public: - ThreadSafeCodeBlockMap() - : mutatorsMutex_(mutexid::WasmCodeBlockMap), - mutableCodeBlocks_(&segments1_), - readonlyCodeBlocks_(&segments2_), - numActiveLookups_(0) {} - - ~ThreadSafeCodeBlockMap() { - MOZ_RELEASE_ASSERT(numActiveLookups_ == 0); - segments1_.clearAndFree(); - segments2_.clearAndFree(); - } +class LazyStubTier { + LazyStubSegmentVector stubSegments_; + LazyFuncExportVector exports_; + size_t lastStubSegmentIndex_; - size_t numActiveLookups() const { return numActiveLookups_; } + [[nodiscard]] bool createManyEntryStubs(const Uint32Vector& funcExportIndices, + const CodeMetadata& codeMeta, + const CodeTier& codeTier, + size_t* stubSegmentIndex); - bool insert(const CodeBlock* cs) { - LockGuard lock(mutatorsMutex_); + public: + LazyStubTier() : lastStubSegmentIndex_(0) {} - size_t index; - MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeBlocks_, 0, - mutableCodeBlocks_->length(), - CodeBlockPC(cs->base()), &index)); + // Creates one lazy stub for the exported function, for which the jit entry + // will be set to the lazily-generated one. + [[nodiscard]] bool createOneEntryStub(uint32_t funcExportIndex, + const CodeMetadata& codeMeta, + const CodeTier& codeTier); - if (!mutableCodeBlocks_->insert(mutableCodeBlocks_->begin() + index, cs)) { - return false; - } + bool entryStubsEmpty() const { return stubSegments_.empty(); } + bool hasEntryStub(uint32_t funcIndex) const; - swapAndWait(); + // Returns a pointer to the raw interpreter entry of a given function for + // which stubs have been lazily generated. + [[nodiscard]] void* lookupInterpEntry(uint32_t funcIndex) const; -#ifdef DEBUG - size_t otherIndex; - MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeBlocks_, 0, - mutableCodeBlocks_->length(), - CodeBlockPC(cs->base()), &otherIndex)); - MOZ_ASSERT(index == otherIndex); -#endif + // Create one lazy stub for all the functions in funcExportIndices, putting + // them in a single stub. Jit entries won't be used until + // setJitEntries() is actually called, after the Code owner has committed + // tier2. + [[nodiscard]] bool createTier2(const Uint32Vector& funcExportIndices, + const CodeMetadata& codeMeta, + const CodeTier& codeTier, + Maybe* stubSegmentIndex); + void setJitEntries(const Maybe& stubSegmentIndex, const Code& code); - // Although we could simply revert the insertion in the read-only - // vector, it is simpler to just crash and given that each CodeBlock - // consumes multiple pages, it is unlikely this insert() would OOM in - // practice - AutoEnterOOMUnsafeRegion oom; - if (!mutableCodeBlocks_->insert(mutableCodeBlocks_->begin() + index, cs)) { - oom.crash("when inserting a CodeBlock in the process-wide map"); - } + void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, + size_t* data) const; +}; - return true; - } +// CodeTier contains all the data related to a given compilation tier. It is +// built during module generation and then immutably stored in a Code. - size_t remove(const CodeBlock* cs) { - LockGuard lock(mutatorsMutex_); +using UniqueCodeTier = UniquePtr; +using UniqueConstCodeTier = UniquePtr; - size_t index; - MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeBlocks_, 0, - mutableCodeBlocks_->length(), - CodeBlockPC(cs->base()), &index)); +class CodeTier { + const Code* code_; - mutableCodeBlocks_->erase(mutableCodeBlocks_->begin() + index); - size_t newCodeBlockCount = mutableCodeBlocks_->length(); + // Serialized information. + const UniqueMetadataTier metadata_; + const UniqueModuleSegment segment_; - swapAndWait(); + // Lazy stubs, not serialized. + RWExclusiveData lazyStubs_; -#ifdef DEBUG - size_t otherIndex; - MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeBlocks_, 0, - mutableCodeBlocks_->length(), - CodeBlockPC(cs->base()), &otherIndex)); - MOZ_ASSERT(index == otherIndex); -#endif + static const MutexId& mutexForTier(Tier tier) { + if (tier == Tier::Baseline) { + return mutexid::WasmLazyStubsTier1; + } + MOZ_ASSERT(tier == Tier::Optimized); + return mutexid::WasmLazyStubsTier2; + } - mutableCodeBlocks_->erase(mutableCodeBlocks_->begin() + index); - return newCodeBlockCount; + public: + CodeTier(UniqueMetadataTier metadata, UniqueModuleSegment segment) + : code_(nullptr), + metadata_(std::move(metadata)), + segment_(std::move(segment)), + lazyStubs_(mutexForTier(segment_->tier())) {} + + bool initialized() const { return !!code_ && segment_->initialized(); } + bool initialize(const Code& code, const LinkData& linkData, + const CodeMetadata& codeMeta, + const CodeMetadataForAsmJS* codeMetaForAsmJS); + + Tier tier() const { return segment_->tier(); } + const RWExclusiveData& lazyStubs() const { return lazyStubs_; } + const MetadataTier& metadata() const { return *metadata_.get(); } + const ModuleSegment& segment() const { return *segment_.get(); } + const Code& code() const { + MOZ_ASSERT(initialized()); + return *code_; } - const CodeBlock* lookup(const void* pc, - const CodeRange** codeRange = nullptr) { - auto decObserver = mozilla::MakeScopeExit([&] { - MOZ_ASSERT(numActiveLookups_ > 0); - numActiveLookups_--; - }); - numActiveLookups_++; - - const RawCodeBlockVector* readonly = readonlyCodeBlocks_; - - size_t index; - if (!BinarySearchIf(*readonly, 0, readonly->length(), CodeBlockPC(pc), - &index)) { - if (codeRange) { - *codeRange = nullptr; - } - return nullptr; - } + const CodeRange* lookupRange(const void* pc) const; + const TryNote* lookupTryNote(const void* pc) const; - // It is fine returning a raw CodeBlock*, because we assume we are - // looking up a live PC in code which is on the stack, keeping the - // CodeBlock alive. + void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, + size_t* data) const; - const CodeBlock* result = (*readonly)[index]; - if (codeRange) { - *codeRange = result->lookupRange(pc); - } - return result; - } + WASM_DECLARE_FRIEND_SERIALIZE_ARGS(CodeTier, const wasm::LinkData& data); }; // Jump tables that implement function tiering and fast js-to-wasm calls. @@ -685,8 +495,8 @@ class JumpTables { "SelfHostedLazyScript"); public: - bool initialize(CompileMode mode, const CodeBlock& sharedStubs, - const CodeBlock& tier1); + bool init(CompileMode mode, const ModuleSegment& ms, + const CodeRangeVector& codeRanges); void setJitEntry(size_t i, void* target) const { // Make sure that write is atomic; see comment in wasm::Module::finishTier2 @@ -741,24 +551,6 @@ using MetadataAnalysisHashMap = HashMap; class Code : public ShareableBase { - struct ProtectedData { - UniqueCodeBlockVector blocks; - SharedCodeSegmentVector lazySegments; - LazyFuncExportVector lazyExports; - }; - using ReadGuard = RWExclusiveData::ReadGuard; - using WriteGuard = RWExclusiveData::WriteGuard; - - // The compile mode this code is used with. - const CompileMode mode_; - - // Core data that is not thread-safe and must acquire a lock in order to - // access. - RWExclusiveData data_; - - // Thread-safe mutable map from code pointer to code block that contains it. - mutable ThreadSafeCodeBlockMap blockMap_; - // These have the same lifetime end as Code itself -- they can be dropped // when Code itself is dropped. FIXME: should these be MutableCodeXX? // @@ -767,8 +559,6 @@ class Code : public ShareableBase { // This is null for a wasm module, non-null for asm.js SharedCodeMetadataForAsmJS codeMetaForAsmJS_; - const CodeBlock* sharedStubs_; - const CodeBlock* tier1_; // [SMDOC] Tier-2 data // // hasTier2_ and tier2_ implement a three-state protocol for broadcasting @@ -786,64 +576,30 @@ class Code : public ShareableBase { // Once hasTier2_ is true, *no* thread may write tier2_ and *no* thread may // read tier2_ without having observed hasTier2_ as true first. Once // hasTier2_ is true, it stays true. - mutable const CodeBlock* tier2_; + mutable UniqueConstCodeTier tier2_; mutable Atomic hasTier2_; - FuncImportVector funcImports_; + UniqueCodeTier tier1_; + ExclusiveData profilingLabels_; JumpTables jumpTables_; - // Where to redirect PC to for handling traps from the signal handler. - uint8_t* trapCode_; - - // Methods for getting complete tiers, private while we're moving to partial - // tiering. - bool hasTier2() const { return hasTier2_; } - Tiers tiers() const; - bool hasTier(Tier t) const; - const CodeBlock& codeBlock(Tier tier) const; - - // Returns a pointer to the raw interpreter entry of a given function for - // which stubs have been lazily generated. - [[nodiscard]] void* lookupLazyInterpEntry(const WriteGuard& guard, - uint32_t funcIndex) const; - - [[nodiscard]] bool createOneLazyEntryStub(const WriteGuard& guard, - uint32_t funcExportIndex, - const CodeBlock& tierCodeBlock, - void** interpEntry) const; - [[nodiscard]] bool createManyLazyEntryStubs( - const WriteGuard& guard, const Uint32Vector& funcExportIndices, - const CodeBlock& tierCodeBlock, size_t* stubBlockIndex) const; - // Create one lazy stub for all the functions in funcExportIndices, putting - // them in a single stub. Jit entries won't be used until - // setJitEntries() is actually called, after the Code owner has committed - // tier2. - [[nodiscard]] bool createTier2LazyEntryStubs( - const WriteGuard& guard, const CodeBlock& tier2Code, - Maybe* outStubBlockIndex) const; - [[nodiscard]] bool appendProfilingLabels( - const ExclusiveData::Guard& labels, - const CodeBlock& codeBlock) const; - public: - Code(CompileMode mode, const CodeMetadata& codeMeta, - const CodeMetadataForAsmJS* codeMetaForAsmJS); - bool initialized() const { return !!tier1_ && tier1_->initialized(); } - - [[nodiscard]] bool initialize(FuncImportVector&& funcImports, - UniqueCodeBlock sharedStubs, - const LinkData& sharedStubsLinkData, - UniqueCodeBlock tier1CodeBlock); - [[nodiscard]] bool finishCompleteTier2(const LinkData& linkData, - UniqueCodeBlock tier2Code) const; + Code(const CodeMetadata& codeMeta, + const CodeMetadataForAsmJS* codeMetaForAsmJS, UniqueCodeTier tier1, + JumpTables&& maybeJumpTables); + bool initialized() const { return tier1_->initialized(); } - [[nodiscard]] bool getOrCreateInterpEntry(uint32_t funcIndex, - const FuncExport** funcExport, - void** interpEntry) const; + bool initialize(const LinkData& linkData); + void setTieringEntry(size_t i, void* target) const { + jumpTables_.setTieringEntry(i, target); + } void** tieringJumpTable() const { return jumpTables_.tiering(); } + void setJitEntry(size_t i, void* target) const { + jumpTables_.setJitEntry(i, target); + } void setJitEntryIfNull(size_t i, void* target) const { jumpTables_.setJitEntryIfNull(i, target); } @@ -852,109 +608,44 @@ class Code : public ShareableBase { } uint32_t getFuncIndex(JSFunction* fun) const; - uint8_t* trapCode() const { return trapCode_; } + // Install the tier2 code without committing it. To maintain the invariant + // that tier2_ is never accessed without the tier having been committed, this + // returns a pointer to the installed tier that the caller can use for + // subsequent operations. + bool setAndBorrowTier2(UniqueCodeTier tier2, const LinkData& linkData, + const CodeTier** borrowedTier) const; + void commitTier2() const; - const FuncImport& funcImport(uint32_t funcIndex) const { - return funcImports_[funcIndex]; - } - const FuncImportVector& funcImports() const { return funcImports_; } + bool hasTier2() const { return hasTier2_; } + Tiers tiers() const; + bool hasTier(Tier t) const; - bool hasSerializableCode() const { return hasTier(Tier::Serialized); } Tier stableTier() const; // This is stable during a run Tier bestTier() const; // This may transition from Baseline -> Ion at any time + const CodeTier& codeTier(Tier tier) const; const CodeMetadata& codeMeta() const { return *codeMeta_; } const CodeMetadataForAsmJS* codeMetaForAsmJS() const { return codeMetaForAsmJS_; } - const CodeBlock& sharedStubs() const { return *sharedStubs_; } - const CodeBlock& debugCodeBlock() const { - MOZ_ASSERT(codeMeta_->debugEnabled); - MOZ_ASSERT(tier1_->tier() == Tier::Debug); - return *tier1_; + const ModuleSegment& segment(Tier iter) const { + return codeTier(iter).segment(); } - const CodeBlock& completeTierCodeBlock(Tier tier) const { - return codeBlock(tier); - } - bool hasCompleteTier(Tier tier) const { return hasTier(tier); } - const CodeBlock& funcCodeBlock(uint32_t funcIndex) const { - if (funcIndex < funcImports_.length()) { - return *sharedStubs_; - } - return codeBlock(bestTier()); - } - bool funcHasTier(uint32_t funcIndex, Tier tier) const { - return funcCodeBlock(funcIndex).tier() == tier; + const MetadataTier& metadata(Tier iter) const { + return codeTier(iter).metadata(); } - // Function type lookup: - const TypeDef& getFuncImportTypeDef(uint32_t funcIndex) const { - return codeMeta().types->type(funcImports_[funcIndex].typeIndex()); - } - const FuncType& getFuncImportType(uint32_t funcIndex) const { - return getFuncImportTypeDef(funcIndex).funcType(); - } - const FuncType& getFuncExportType(FuncExport funcExport) const { - return codeMeta().types->type(funcExport.typeIndex()).funcType(); - } - const TypeDef& getFuncExportTypeDef(uint32_t funcIndex) const { - const CodeBlock& code = funcCodeBlock(funcIndex); - const FuncExport& funcExport = code.lookupFuncExport(funcIndex); - return codeMeta().types->type(funcExport.typeIndex()); - } - const FuncType& getFuncExportType(uint32_t funcIndex) const { - return getFuncExportTypeDef(funcIndex).funcType(); - } + // Metadata lookup functions: - // Code metadata lookup: - const CallSite* lookupCallSite(void* pc) const { - const CodeBlock* block = blockMap_.lookup(pc); - if (!block) { - return nullptr; - } - return block->lookupCallSite(pc); - } - const CodeRange* lookupFuncRange(void* pc) const { - const CodeBlock* block = blockMap_.lookup(pc); - if (!block) { - return nullptr; - } - const CodeRange* result = block->lookupRange(pc); - if (result && result->isFunction()) { - return result; - } - return nullptr; - } - const StackMap* lookupStackMap(uint8_t* pc) const { - const CodeBlock* block = blockMap_.lookup(pc); - if (!block) { - return nullptr; - } - return block->lookupStackMap(pc); - } - const wasm::TryNote* lookupTryNote(void* pc, const CodeBlock** block) const { - *block = blockMap_.lookup(pc); - if (!*block) { - return nullptr; - } - return (*block)->lookupTryNote(pc); - } - bool lookupTrap(void* pc, Trap* trapOut, BytecodeOffset* bytecode) const { - const CodeBlock* block = blockMap_.lookup(pc); - if (!block) { - return false; - } - return block->lookupTrap(pc, trapOut, bytecode); - } - const CodeRangeUnwindInfo* lookupUnwindInfo(void* pc) const { - const CodeBlock* block = blockMap_.lookup(pc); - if (!block) { - return nullptr; - } - return block->lookupUnwindInfo(pc); - } + const CallSite* lookupCallSite(void* returnAddress) const; + const CodeRange* lookupFuncRange(void* pc) const; + const StackMap* lookupStackMap(uint8_t* nextPC) const; + const TryNote* lookupTryNote(void* pc, Tier* tier) const; + bool containsCodePC(const void* pc) const; + bool lookupTrap(void* pc, Trap* trap, BytecodeOffset* bytecode) const; + const CodeRangeUnwindInfo* lookupUnwindInfo(void* pc) const; bool lookupFunctionTier(const CodeRange* codeRange, Tier* tier) const; // To save memory, profilingLabels_ are generated lazily when profiling mode @@ -978,13 +669,7 @@ class Code : public ShareableBase { CodeMetadataForAsmJS::SeenSet* seenCodeMetaForAsmJS, Code::SeenSet* seenCode, size_t* code, size_t* data) const; - size_t tier1CodeMemoryUsed() const { - return tier1_->segment->capacityBytes(); - } - - WASM_DECLARE_FRIEND_SERIALIZE_ARGS(SharedCode, - const wasm::LinkData& sharedStubsLinkData, - const wasm::LinkData& optimizedLinkData); + WASM_DECLARE_FRIEND_SERIALIZE_ARGS(SharedCode, const wasm::LinkData& data); }; void PatchDebugSymbolicAccesses(uint8_t* codeBase, jit::MacroAssembler& masm); diff --git a/js/src/wasm/WasmCodegenTypes.h b/js/src/wasm/WasmCodegenTypes.h index 32e583d3a567d..ed7abf2c57edd 100644 --- a/js/src/wasm/WasmCodegenTypes.h +++ b/js/src/wasm/WasmCodegenTypes.h @@ -283,18 +283,6 @@ struct TrapSiteVectorArray size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; }; -struct CallFarJump { - uint32_t targetFuncIndex; - uint32_t jumpOffset; - WASM_CHECK_CACHEABLE_POD(targetFuncIndex, jumpOffset); - - CallFarJump(uint32_t targetFuncIndex, uint32_t jumpOffset) - : targetFuncIndex(targetFuncIndex), jumpOffset(jumpOffset) {} -}; -WASM_DECLARE_CACHEABLE_POD(CallFarJump); - -using CallFarJumpVector = Vector; - // On trap, the bytecode offset to be reported in callstacks is saved. struct TrapData { diff --git a/js/src/wasm/WasmDebug.cpp b/js/src/wasm/WasmDebug.cpp index d5c8f78c26144..62405c630014b 100644 --- a/js/src/wasm/WasmDebug.cpp +++ b/js/src/wasm/WasmDebug.cpp @@ -43,6 +43,7 @@ DebugState::DebugState(const Code& code, const Module& module) enterFrameTrapsEnabled_(false), enterAndLeaveFrameTrapsCounter_(0) { MOZ_RELEASE_ASSERT(code.codeMeta().debugEnabled); + MOZ_RELEASE_ASSERT(code.hasTier(Tier::Debug)); } void DebugState::trace(JSTracer* trc) { @@ -59,9 +60,9 @@ void DebugState::finalize(JS::GCContext* gcx) { } } -static const CallSite* SlowCallSiteSearchByOffset(const CodeBlock& code, +static const CallSite* SlowCallSiteSearchByOffset(const MetadataTier& metadata, uint32_t offset) { - for (const CallSite& callSite : code.callSites) { + for (const CallSite& callSite : metadata.callSites) { if (callSite.lineOrBytecode() == offset && callSite.kind() == CallSiteDesc::Breakpoint) { return &callSite; @@ -71,12 +72,13 @@ static const CallSite* SlowCallSiteSearchByOffset(const CodeBlock& code, } bool DebugState::getLineOffsets(size_t lineno, Vector* offsets) { - const CallSite* callsite = SlowCallSiteSearchByOffset(debugCode(), lineno); + const CallSite* callsite = + SlowCallSiteSearchByOffset(metadata(Tier::Debug), lineno); return !(callsite && !offsets->append(lineno)); } bool DebugState::getAllColumnOffsets(Vector* offsets) { - for (const CallSite& callSite : debugCode().callSites) { + for (const CallSite& callSite : metadata(Tier::Debug).callSites) { if (callSite.kind() != CallSite::Breakpoint) { continue; } @@ -93,7 +95,7 @@ bool DebugState::getAllColumnOffsets(Vector* offsets) { bool DebugState::getOffsetLocation(uint32_t offset, uint32_t* lineno, JS::LimitedColumnNumberOneOrigin* column) { - if (!SlowCallSiteSearchByOffset(debugCode(), offset)) { + if (!SlowCallSiteSearchByOffset(metadata(Tier::Debug), offset)) { return false; } *lineno = offset; @@ -129,7 +131,7 @@ bool DebugState::incrementStepperCount(JSContext* cx, Instance* instance, void DebugState::decrementStepperCount(JS::GCContext* gcx, Instance* instance, uint32_t funcIndex) { const CodeRange& codeRange = - debugCode().codeRanges[funcToCodeRangeIndex(funcIndex)]; + codeRanges(Tier::Debug)[funcToCodeRangeIndex(funcIndex)]; MOZ_ASSERT(codeRange.isFunction()); MOZ_ASSERT(!stepperCounters_.empty()); @@ -146,7 +148,7 @@ void DebugState::decrementStepperCount(JS::GCContext* gcx, Instance* instance, bool anyEnterAndLeave = enterAndLeaveFrameTrapsCounter_ > 0; bool keepDebugging = false; - for (const CallSite& callSite : debugCode().callSites) { + for (const CallSite& callSite : callSites(Tier::Debug)) { if (callSite.kind() != CallSite::Breakpoint) { continue; } @@ -165,18 +167,19 @@ void DebugState::decrementStepperCount(JS::GCContext* gcx, Instance* instance, } bool DebugState::hasBreakpointTrapAtOffset(uint32_t offset) { - return SlowCallSiteSearchByOffset(debugCode(), offset); + return SlowCallSiteSearchByOffset(metadata(Tier::Debug), offset); } void DebugState::toggleBreakpointTrap(JSRuntime* rt, Instance* instance, uint32_t offset, bool enabled) { - const CallSite* callSite = SlowCallSiteSearchByOffset(debugCode(), offset); + const CallSite* callSite = + SlowCallSiteSearchByOffset(metadata(Tier::Debug), offset); if (!callSite) { return; } size_t debugTrapOffset = callSite->returnAddressOffset(); - const CodeSegment& codeSegment = debugSegment(); + const ModuleSegment& codeSegment = code_->segment(Tier::Debug); const CodeRange* codeRange = code_->lookupFuncRange(codeSegment.base() + debugTrapOffset); MOZ_ASSERT(codeRange); @@ -296,8 +299,8 @@ void DebugState::disableDebuggingForFunction(Instance* instance, } void DebugState::enableDebugTrap(Instance* instance) { - instance->setDebugTrapHandler(code_->sharedStubs().segment->base() + - code_->sharedStubs().debugTrapOffset); + instance->setDebugTrapHandler(code_->segment(Tier::Debug).base() + + metadata(Tier::Debug).debugTrapOffset); } void DebugState::disableDebugTrap(Instance* instance) { @@ -336,10 +339,10 @@ void DebugState::adjustEnterAndLeaveFrameTrapsState(JSContext* cx, !iter.done() && !mustLeaveEnabled; iter.next()) { WasmBreakpointSite* site = iter.get().value(); const CallSite* callSite = - SlowCallSiteSearchByOffset(debugCode(), site->offset); + SlowCallSiteSearchByOffset(metadata(Tier::Debug), site->offset); if (callSite) { size_t debugTrapOffset = callSite->returnAddressOffset(); - const CodeSegment& codeSegment = debugSegment(); + const ModuleSegment& codeSegment = code_->segment(Tier::Debug); const CodeRange* codeRange = code_->lookupFuncRange(codeSegment.base() + debugTrapOffset); MOZ_ASSERT(codeRange); @@ -387,7 +390,7 @@ bool DebugState::debugGetLocalTypes(uint32_t funcIndex, ValTypeVector* locals, // Decode local var types from wasm binary function body. const CodeRange& range = - debugCode().codeRanges[funcToCodeRangeIndex(funcIndex)]; + codeRanges(Tier::Debug)[funcToCodeRangeIndex(funcIndex)]; // In wasm, the Code points to the function start via funcLineOrBytecode. size_t offsetInModule = range.funcLineOrBytecode(); Decoder d(bytecode().begin() + offsetInModule, bytecode().end(), diff --git a/js/src/wasm/WasmDebug.h b/js/src/wasm/WasmDebug.h index efd3992d2d965..b6174529f6ff3 100644 --- a/js/src/wasm/WasmDebug.h +++ b/js/src/wasm/WasmDebug.h @@ -38,6 +38,8 @@ class WasmInstanceObject; namespace wasm { +struct MetadataTier; + // The generated source location for the AST node/expression. The offset field // refers an offset in an binary format file. @@ -161,17 +163,20 @@ class DebugState { // Accessors for commonly used elements of linked structures. - const CodeBlock& debugCode() const { return code_->debugCodeBlock(); } - const CodeSegment& debugSegment() const { - return *code_->debugCodeBlock().segment; - } + const MetadataTier& metadata(Tier t) const { return code_->metadata(t); } const CodeMetadata& codeMeta() const { return code_->codeMeta(); } const CodeMetadataForAsmJS* codeMetaForAsmJS() const { return code_->codeMetaForAsmJS(); } + const CodeRangeVector& codeRanges(Tier t) const { + return metadata(t).codeRanges; + } + const CallSiteVector& callSites(Tier t) const { + return metadata(t).callSites; + } uint32_t funcToCodeRangeIndex(uint32_t funcIndex) const { - return debugCode().funcToCodeRange[funcIndex]; + return metadata(Tier::Debug).funcToCodeRange[funcIndex]; } // about:memory reporting: diff --git a/js/src/wasm/WasmFrameIter.cpp b/js/src/wasm/WasmFrameIter.cpp index fb5d1e3f53cc1..ea524cba62011 100644 --- a/js/src/wasm/WasmFrameIter.cpp +++ b/js/src/wasm/WasmFrameIter.cpp @@ -389,7 +389,8 @@ bool WasmFrameIter::debugEnabled() const { } // Only non-imported functions can have debug frames. - if (codeRange_->funcIndex() < code_->funcImports().length()) { + if (codeRange_->funcIndex() < + code_->metadata(Tier::Debug).funcImports.length()) { return false; } @@ -1183,14 +1184,18 @@ static bool CanUnwindSignatureCheck(uint8_t* fp) { return code && !codeRange->isEntry(); } -static bool GetUnwindInfo(const CodeBlock* codeBlock, +static bool GetUnwindInfo(const CodeSegment* codeSegment, const CodeRange* codeRange, uint8_t* pc, const CodeRangeUnwindInfo** info) { + if (!codeSegment->isModule()) { + return false; + } if (!codeRange->isFunction() || !codeRange->funcHasUnwindInfo()) { return false; } - *info = codeBlock->code->lookupUnwindInfo(pc); + const ModuleSegment* segment = codeSegment->asModule(); + *info = segment->code().lookupUnwindInfo(pc); return *info; } @@ -1247,13 +1252,13 @@ bool js::wasm::StartUnwinding(const RegisterState& registers, // thunk, then execution must be entering from or leaving to the C++ caller // that pushed the JitActivation. const CodeRange* codeRange; - const uint8_t* codeBase; + uint8_t* codeBase; const Code* code = nullptr; - const CodeBlock* codeBlock = LookupCodeBlock(pc, &codeRange); - if (codeBlock) { - code = codeBlock->code; - codeBase = codeBlock->segment->base(); + const CodeSegment* codeSegment = LookupCodeSegment(pc, &codeRange); + if (codeSegment) { + code = &codeSegment->code(); + codeBase = codeSegment->base(); MOZ_ASSERT(codeRange); } else if (!LookupBuiltinThunk(pc, &codeRange, &codeBase)) { return false; @@ -1454,7 +1459,8 @@ bool js::wasm::StartUnwinding(const RegisterState& registers, } const CodeRangeUnwindInfo* unwindInfo; - if (codeBlock && GetUnwindInfo(codeBlock, codeRange, pc, &unwindInfo)) { + if (codeSegment && + GetUnwindInfo(codeSegment, codeRange, pc, &unwindInfo)) { switch (unwindInfo->unwindHow()) { case CodeRangeUnwindInfo::RestoreFpRa: fixedPC = (uint8_t*)registers.tempRA; diff --git a/js/src/wasm/WasmGenerator.cpp b/js/src/wasm/WasmGenerator.cpp index fe9a682c519cd..efa7c44af0c00 100644 --- a/js/src/wasm/WasmGenerator.cpp +++ b/js/src/wasm/WasmGenerator.cpp @@ -67,9 +67,7 @@ bool CompiledCode::swap(MacroAssembler& masm) { static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024; static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024; - -ModuleGenerator::MacroAssemblerScope::MacroAssemblerScope(LifoAlloc& lifo) - : masmAlloc(&lifo), masm(masmAlloc, /* limitedSize= */ false) {} +static const uint32_t BAD_CODE_RANGE = UINT32_MAX; ModuleGenerator::ModuleGenerator(const CompileArgs& args, CodeMetadata* codeMeta, @@ -83,10 +81,11 @@ ModuleGenerator::ModuleGenerator(const CompileArgs& args, cancelled_(cancelled), codeMeta_(codeMeta), compilerEnv_(compilerEnv), - codeBlock_(nullptr), linkData_(nullptr), + metadataTier_(nullptr), lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE), - masm_(nullptr), + masmAlloc_(&lifo_), + masm_(masmAlloc_, /* limitedSize= */ false), debugTrapCodeOffset_(), lastPatchedCallSite_(0), startOfUnpatchedCallsites_(0), @@ -138,10 +137,7 @@ ModuleGenerator::~ModuleGenerator() { } bool ModuleGenerator::init(CodeMetadataForAsmJS* codeMetaForAsmJS) { - // Initialize our task system - if (!initTasks()) { - return false; - } + // Perform fallible metadata, linkdata, assumption allocations. // If codeMetaForAsmJS is null, we're compiling wasm; else we're compiling // asm.js, in whih case it contains wasm::Code-lifetime asm.js-specific @@ -168,6 +164,47 @@ bool ModuleGenerator::init(CodeMetadataForAsmJS* codeMetaForAsmJS) { } } + linkData_ = js::MakeUnique(tier()); + if (!linkData_) { + return false; + } + + metadataTier_ = js::MakeUnique(tier()); + if (!metadataTier_) { + return false; + } + + // funcToCodeRange maps function indices to code-range indices and all + // elements will be initialized by the time module generation is finished. + + if (!metadataTier_->funcToCodeRange.appendN(BAD_CODE_RANGE, + codeMeta_->funcs.length())) { + return false; + } + + // Pre-reserve space for large Vectors to avoid the significant cost of the + // final reallocs. In particular, the MacroAssembler can be enormous, so be + // extra conservative. Since large over-reservations may fail when the + // actual allocations will succeed, ignore OOM failures. Note, + // shrinkStorageToFit calls at the end will trim off unneeded capacity. + + size_t codeSectionSize = + codeMeta_->codeSection ? codeMeta_->codeSection->size : 0; + + size_t estimatedCodeSize = + size_t(1.2 * EstimateCompiledCodeSize(tier(), codeSectionSize)); + (void)masm_.reserve(std::min(estimatedCodeSize, MaxCodeBytesPerProcess)); + + (void)metadataTier_->codeRanges.reserve(2 * codeMeta_->numFuncDefs()); + + const size_t ByteCodesPerCallSite = 50; + (void)metadataTier_->callSites.reserve(codeSectionSize / + ByteCodesPerCallSite); + + const size_t ByteCodesPerOOBTrap = 10; + (void)metadataTier_->trapSites[Trap::OutOfBounds].reserve( + codeSectionSize / ByteCodesPerOOBTrap); + // Allocate space in instance for declarations that need it. This sets // various fields in `codeMeta_` and leaves the total length in // `codeMeta_->instanceDataLength`. @@ -177,37 +214,101 @@ bool ModuleGenerator::init(CodeMetadataForAsmJS* codeMetaForAsmJS) { } // Initialize function import metadata - if (!funcImports_.resize(codeMeta_->numFuncImports)) { + if (!metadataTier_->funcImports.resize(codeMeta_->numFuncImports)) { return false; } for (size_t i = 0; i < codeMeta_->numFuncImports; i++) { - funcImports_[i] = FuncImport(codeMeta_->funcs[i].typeIndex, - codeMeta_->offsetOfFuncImportInstanceData(i)); + metadataTier_->funcImports[i] = + FuncImport(codeMeta_->funcs[i].typeIndex, + codeMeta_->offsetOfFuncImportInstanceData(i)); + } + + // Accumulate all exported functions: + // - explicitly marked as such; + // - implicitly exported by being an element of function tables; + // - implicitly exported by being the start function; + // - implicitly exported by being used in global ref.func initializer + // ModuleMetadata accumulates this information for us during decoding. + + uint32_t exportedFuncCount = 0; + for (const FuncDesc& func : codeMeta_->funcs) { + if (func.isExported()) { + exportedFuncCount++; + } + } + if (!metadataTier_->funcExports.reserve(exportedFuncCount)) { + return false; + } + + for (uint32_t funcIndex = 0; funcIndex < codeMeta_->funcs.length(); + funcIndex++) { + const FuncDesc& func = codeMeta_->funcs[funcIndex]; + + if (!func.isExported()) { + continue; + } + + metadataTier_->funcExports.infallibleEmplaceBack( + FuncExport(func.typeIndex, funcIndex, func.isEager())); + } + + // Determine whether parallel or sequential compilation is to be used and + // initialize the CompileTasks that will be used in either mode. + + MOZ_ASSERT(GetHelperThreadCount() > 1); + + uint32_t numTasks; + if (CanUseExtraThreads() && GetHelperThreadCPUCount() > 1) { + parallel_ = true; + numTasks = 2 * GetMaxWasmCompilationThreads(); + } else { + numTasks = 1; + } + + if (!tasks_.initCapacity(numTasks)) { + return false; + } + for (size_t i = 0; i < numTasks; i++) { + tasks_.infallibleEmplaceBack(*codeMeta_, *compilerEnv_, taskState_, + COMPILATION_LIFO_DEFAULT_CHUNK_SIZE); } - // Generate the shared stubs block - if (!generateSharedStubs()) { + if (!freeTasks_.reserve(numTasks)) { return false; } + for (size_t i = 0; i < numTasks; i++) { + freeTasks_.infallibleAppend(&tasks_[i]); + } + + // Fill in function stubs for each import so that imported functions can be + // used in all the places that normal function definitions can (table + // elements, export calls, etc). + + CompiledCode& importCode = tasks_[0].output; + MOZ_ASSERT(importCode.empty()); - // Start creating a code block for a complete tier of code - if (!startCompleteTier()) { + if (!GenerateImportFunctions(*codeMeta_, metadataTier_->funcImports, + &importCode)) { return false; } + if (!linkCompiledCode(importCode)) { + return false; + } + + importCode.clear(); return true; } -bool ModuleGenerator::funcIsCompiledInBlock(uint32_t funcIndex) const { - return codeBlock_->funcToCodeRange[funcIndex] != BAD_CODE_RANGE; +bool ModuleGenerator::funcIsCompiled(uint32_t funcIndex) const { + return metadataTier_->funcToCodeRange[funcIndex] != BAD_CODE_RANGE; } -const CodeRange& ModuleGenerator::funcCodeRangeInBlock( - uint32_t funcIndex) const { - MOZ_ASSERT(funcIsCompiledInBlock(funcIndex)); +const CodeRange& ModuleGenerator::funcCodeRange(uint32_t funcIndex) const { + MOZ_ASSERT(funcIsCompiled(funcIndex)); const CodeRange& cr = - codeBlock_->codeRanges[codeBlock_->funcToCodeRange[funcIndex]]; + metadataTier_->codeRanges[metadataTier_->funcToCodeRange[funcIndex]]; MOZ_ASSERT(cr.isFunction()); return cr; } @@ -230,9 +331,9 @@ using TrapMaybeOffsetArray = EnumeratedArray, size_t(Trap::Limit)>; bool ModuleGenerator::linkCallSites() { - AutoCreatedBy acb(*masm_, "linkCallSites"); + AutoCreatedBy acb(masm_, "linkCallSites"); - masm_->haltingAlign(CodeAlignment); + masm_.haltingAlign(CodeAlignment); // Create far jumps for calls that have relative offsets that may otherwise // go out of range. This method is called both between function bodies (at a @@ -240,9 +341,9 @@ bool ModuleGenerator::linkCallSites() { // a module's codegen after all possible calls/traps have been emitted. OffsetMap existingCallFarJumps; - for (; lastPatchedCallSite_ < codeBlock_->callSites.length(); + for (; lastPatchedCallSite_ < metadataTier_->callSites.length(); lastPatchedCallSite_++) { - const CallSite& callSite = codeBlock_->callSites[lastPatchedCallSite_]; + const CallSite& callSite = metadataTier_->callSites[lastPatchedCallSite_]; const CallSiteTarget& target = callSiteTargets_[lastPatchedCallSite_]; uint32_t callerOffset = callSite.returnAddressOffset(); switch (callSite.kind()) { @@ -264,15 +365,15 @@ bool ModuleGenerator::linkCallSites() { auto patch = [this, callSite](uint32_t callerOffset, uint32_t calleeOffset) { if (callSite.kind() == CallSiteDesc::ReturnFunc) { - masm_->patchFarJump(CodeOffset(callerOffset), calleeOffset); + masm_.patchFarJump(CodeOffset(callerOffset), calleeOffset); } else { MOZ_ASSERT(callSite.kind() == CallSiteDesc::Func); - masm_->patchCall(callerOffset, calleeOffset); + masm_.patchCall(callerOffset, calleeOffset); } }; - if (funcIsCompiledInBlock(target.funcIndex())) { + if (funcIsCompiled(target.funcIndex())) { uint32_t calleeOffset = - funcCodeRangeInBlock(target.funcIndex()).funcUncheckedCallEntry(); + funcCodeRange(target.funcIndex()).funcUncheckedCallEntry(); if (InRange(callerOffset, calleeOffset)) { patch(callerOffset, calleeOffset); break; @@ -283,17 +384,17 @@ bool ModuleGenerator::linkCallSites() { existingCallFarJumps.lookupForAdd(target.funcIndex()); if (!p) { Offsets offsets; - offsets.begin = masm_->currentOffset(); + offsets.begin = masm_.currentOffset(); if (!callFarJumps_.emplaceBack(target.funcIndex(), - masm_->farJumpWithPatch().offset())) { + masm_.farJumpWithPatch())) { return false; } - offsets.end = masm_->currentOffset(); - if (masm_->oom()) { + offsets.end = masm_.currentOffset(); + if (masm_.oom()) { return false; } - if (!codeBlock_->codeRanges.emplaceBack(CodeRange::FarJumpIsland, - offsets)) { + if (!metadataTier_->codeRanges.emplaceBack(CodeRange::FarJumpIsland, + offsets)) { return false; } if (!existingCallFarJumps.add(p, target.funcIndex(), offsets.begin)) { @@ -307,31 +408,31 @@ bool ModuleGenerator::linkCallSites() { } } - masm_->flushBuffer(); - return !masm_->oom(); + masm_.flushBuffer(); + return !masm_.oom(); } void ModuleGenerator::noteCodeRange(uint32_t codeRangeIndex, const CodeRange& codeRange) { switch (codeRange.kind()) { case CodeRange::Function: - MOZ_ASSERT(codeBlock_->funcToCodeRange[codeRange.funcIndex()] == + MOZ_ASSERT(metadataTier_->funcToCodeRange[codeRange.funcIndex()] == BAD_CODE_RANGE); - codeBlock_->funcToCodeRange.insertInfallible(codeRange.funcIndex(), - codeRangeIndex); + metadataTier_->funcToCodeRange[codeRange.funcIndex()] = codeRangeIndex; break; case CodeRange::InterpEntry: - codeBlock_->lookupFuncExport(codeRange.funcIndex()) + metadataTier_->lookupFuncExport(codeRange.funcIndex()) .initEagerInterpEntryOffset(codeRange.begin()); break; case CodeRange::JitEntry: // Nothing to do: jit entries are linked in the jump tables. break; case CodeRange::ImportJitExit: - funcImports_[codeRange.funcIndex()].initJitExitOffset(codeRange.begin()); + metadataTier_->funcImports[codeRange.funcIndex()].initJitExitOffset( + codeRange.begin()); break; case CodeRange::ImportInterpExit: - funcImports_[codeRange.funcIndex()].initInterpExitOffset( + metadataTier_->funcImports[codeRange.funcIndex()].initInterpExitOffset( codeRange.begin()); break; case CodeRange::DebugTrap: @@ -403,7 +504,7 @@ static bool AppendForEach(Vec* dstVec, const Vec& srcVec, MutateOp mutateOp) { } bool ModuleGenerator::linkCompiledCode(CompiledCode& code) { - AutoCreatedBy acb(*masm_, "ModuleGenerator::linkCompiledCode"); + AutoCreatedBy acb(masm_, "ModuleGenerator::linkCompiledCode"); JitContext jcx; // Combine observed features from the compiled code into the metadata @@ -413,8 +514,8 @@ bool ModuleGenerator::linkCompiledCode(CompiledCode& code) { // range, insert far jumps to extend the range. if (!InRange(startOfUnpatchedCallsites_, - masm_->size() + code.bytes.length())) { - startOfUnpatchedCallsites_ = masm_->size(); + masm_.size() + code.bytes.length())) { + startOfUnpatchedCallsites_ = masm_.size(); if (!linkCallSites()) { return false; } @@ -423,10 +524,9 @@ bool ModuleGenerator::linkCompiledCode(CompiledCode& code) { // All code offsets in 'code' must be incremented by their position in the // overall module when the code was appended. - masm_->haltingAlign(CodeAlignment); - const size_t offsetInModule = masm_->size(); - if (code.bytes.length() != 0 && - !masm_->appendRawCode(code.bytes.begin(), code.bytes.length())) { + masm_.haltingAlign(CodeAlignment); + const size_t offsetInModule = masm_.size(); + if (!masm_.appendRawCode(code.bytes.begin(), code.bytes.length())) { return false; } @@ -435,14 +535,15 @@ bool ModuleGenerator::linkCompiledCode(CompiledCode& code) { codeRange->offsetBy(offsetInModule); noteCodeRange(codeRangeIndex, *codeRange); }; - if (!AppendForEach(&codeBlock_->codeRanges, code.codeRanges, codeRangeOp)) { + if (!AppendForEach(&metadataTier_->codeRanges, code.codeRanges, + codeRangeOp)) { return false; } auto callSiteOp = [=](uint32_t, CallSite* cs) { cs->offsetBy(offsetInModule); }; - if (!AppendForEach(&codeBlock_->callSites, code.callSites, callSiteOp)) { + if (!AppendForEach(&metadataTier_->callSites, code.callSites, callSiteOp)) { return false; } @@ -454,7 +555,7 @@ bool ModuleGenerator::linkCompiledCode(CompiledCode& code) { auto trapSiteOp = [=](uint32_t, TrapSite* ts) { ts->offsetBy(offsetInModule); }; - if (!AppendForEach(&codeBlock_->trapSites[trap], code.trapSites[trap], + if (!AppendForEach(&metadataTier_->trapSites[trap], code.trapSites[trap], trapSiteOp)) { return false; } @@ -482,7 +583,7 @@ bool ModuleGenerator::linkCompiledCode(CompiledCode& code) { for (size_t i = 0; i < code.stackMaps.length(); i++) { StackMaps::Maplet maplet = code.stackMaps.move(i); maplet.offsetBy(offsetInModule); - if (!codeBlock_->stackMaps.add(maplet)) { + if (!metadataTier_->stackMaps.add(maplet)) { // This function is now the only owner of maplet.map, so we'd better // free it right now. maplet.map->destroy(); @@ -493,7 +594,7 @@ bool ModuleGenerator::linkCompiledCode(CompiledCode& code) { auto unwindInfoOp = [=](uint32_t, CodeRangeUnwindInfo* i) { i->offsetBy(offsetInModule); }; - if (!AppendForEach(&codeBlock_->codeRangeUnwindInfos, + if (!AppendForEach(&metadataTier_->codeRangeUnwindInfos, code.codeRangeUnwindInfos, unwindInfoOp)) { return false; } @@ -504,7 +605,7 @@ bool ModuleGenerator::linkCompiledCode(CompiledCode& code) { return tn->hasTryBody(); }; auto tryNoteOp = [=](uint32_t, TryNote* tn) { tn->offsetBy(offsetInModule); }; - return AppendForEach(&codeBlock_->tryNotes, code.tryNotes, tryNoteFilter, + return AppendForEach(&metadataTier_->tryNotes, code.tryNotes, tryNoteFilter, tryNoteOp); } @@ -568,37 +669,6 @@ ThreadType CompileTask::threadType() { } } -bool ModuleGenerator::initTasks() { - // Determine whether parallel or sequential compilation is to be used and - // initialize the CompileTasks that will be used in either mode. - - MOZ_ASSERT(GetHelperThreadCount() > 1); - - uint32_t numTasks; - if (CanUseExtraThreads() && GetHelperThreadCPUCount() > 1) { - parallel_ = true; - numTasks = 2 * GetMaxWasmCompilationThreads(); - } else { - numTasks = 1; - } - - if (!tasks_.initCapacity(numTasks)) { - return false; - } - for (size_t i = 0; i < numTasks; i++) { - tasks_.infallibleEmplaceBack(*codeMeta_, *compilerEnv_, taskState_, - COMPILATION_LIFO_DEFAULT_CHUNK_SIZE); - } - - if (!freeTasks_.reserve(numTasks)) { - return false; - } - for (size_t i = 0; i < numTasks; i++) { - freeTasks_.infallibleAppend(&tasks_[i]); - } - return true; -} - bool ModuleGenerator::locallyCompileCurrentTask() { if (!ExecuteCompileTask(currentTask_, error_)) { return false; @@ -612,9 +682,9 @@ bool ModuleGenerator::locallyCompileCurrentTask() { } bool ModuleGenerator::finishTask(CompileTask* task) { - AutoCreatedBy acb(*masm_, "ModuleGenerator::finishTask"); + AutoCreatedBy acb(masm_, "ModuleGenerator::finishTask"); - masm_->haltingAlign(CodeAlignment); + masm_.haltingAlign(CodeAlignment); if (!linkCompiledCode(task->output)) { return false; @@ -739,31 +809,77 @@ bool ModuleGenerator::finishFuncDefs() { return true; } -static void CheckCodeBlock(const CodeBlock& codeBlock) { -#if defined(DEBUG) +bool ModuleGenerator::finishCodegen() { + // Now that all functions and stubs are generated and their CodeRanges + // known, patch all calls (which can emit far jumps) and far jumps. Linking + // can emit tiny far-jump stubs, so there is an ordering dependency here. + + if (!linkCallSites()) { + return false; + } + + for (CallFarJump far : callFarJumps_) { + masm_.patchFarJump(far.jump, + funcCodeRange(far.funcIndex).funcUncheckedCallEntry()); + } + + metadataTier_->debugTrapOffset = debugTrapCodeOffset_; + + // None of the linking or far-jump operations should emit masm metadata. + + MOZ_ASSERT(masm_.callSites().empty()); + MOZ_ASSERT(masm_.callSiteTargets().empty()); + MOZ_ASSERT(masm_.trapSites().empty()); + MOZ_ASSERT(masm_.symbolicAccesses().empty()); + MOZ_ASSERT(masm_.tryNotes().empty()); + MOZ_ASSERT(masm_.codeLabels().empty()); + + masm_.finish(); + return !masm_.oom(); +} + +bool ModuleGenerator::finishMetadataTier() { + // The stackmaps aren't yet sorted. Do so now, since we'll need to + // binary-search them at GC time. + metadataTier_->stackMaps.finishAndSort(); + + // The try notes also need to be sorted to simplify lookup. + std::sort(metadataTier_->tryNotes.begin(), metadataTier_->tryNotes.end()); + +#ifdef DEBUG + // Check that the stackmap contains no duplicates, since that could lead to + // ambiguities about stack slot pointerness. + const uint8_t* previousNextInsnAddr = nullptr; + for (size_t i = 0; i < metadataTier_->stackMaps.length(); i++) { + const StackMaps::Maplet& maplet = metadataTier_->stackMaps.get(i); + MOZ_ASSERT_IF(i > 0, uintptr_t(maplet.nextInsnAddr) > + uintptr_t(previousNextInsnAddr)); + previousNextInsnAddr = maplet.nextInsnAddr; + } + // Assert all sorted metadata is sorted. uint32_t last = 0; - for (const CodeRange& codeRange : codeBlock.codeRanges) { + for (const CodeRange& codeRange : metadataTier_->codeRanges) { MOZ_ASSERT(codeRange.begin() >= last); last = codeRange.end(); } last = 0; - for (const CallSite& callSite : codeBlock.callSites) { + for (const CallSite& callSite : metadataTier_->callSites) { MOZ_ASSERT(callSite.returnAddressOffset() >= last); last = callSite.returnAddressOffset(); } for (Trap trap : MakeEnumeratedRange(Trap::Limit)) { last = 0; - for (const TrapSite& trapSite : codeBlock.trapSites[trap]) { + for (const TrapSite& trapSite : metadataTier_->trapSites[trap]) { MOZ_ASSERT(trapSite.pcOffset >= last); last = trapSite.pcOffset; } } last = 0; - for (const CodeRangeUnwindInfo& info : codeBlock.codeRangeUnwindInfos) { + for (const CodeRangeUnwindInfo& info : metadataTier_->codeRangeUnwindInfos) { MOZ_ASSERT(info.offset() >= last); last = info.offset(); } @@ -771,303 +887,125 @@ static void CheckCodeBlock(const CodeBlock& codeBlock) { // Try notes should be sorted so that the end of ranges are in rising order // so that the innermost catch handler is chosen. last = 0; - for (const wasm::TryNote& tryNote : codeBlock.tryNotes) { + for (const TryNote& tryNote : metadataTier_->tryNotes) { MOZ_ASSERT(tryNote.tryBodyEnd() >= last); MOZ_ASSERT(tryNote.tryBodyEnd() > tryNote.tryBodyBegin()); last = tryNote.tryBodyBegin(); } - - // Check that the stackmap vector is sorted with no duplicates, and each - // entry points to a plausible instruction. - const uint8_t* previousNextInsnAddr = nullptr; - for (size_t i = 0; i < codeBlock.stackMaps.length(); i++) { - const StackMaps::Maplet& maplet = codeBlock.stackMaps.get(i); - MOZ_ASSERT_IF(i > 0, uintptr_t(maplet.nextInsnAddr) > - uintptr_t(previousNextInsnAddr)); - previousNextInsnAddr = maplet.nextInsnAddr; - - MOZ_ASSERT(IsPlausibleStackMapKey(maplet.nextInsnAddr), - "wasm stackmap does not reference a valid insn"); - } - -# if (defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \ - defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_ARM) || \ - defined(JS_CODEGEN_LOONG64)) - // Check that each trapsite is associated with a plausible instruction. The - // required instruction kind depends on the trapsite kind. - // - // NOTE: currently only enabled on x86_{32,64} and arm{32,64}. Ideally it - // should be extended to riscv, loongson, mips. - // - for (Trap trap : MakeEnumeratedRange(Trap::Limit)) { - const TrapSiteVector& trapSites = codeBlock.trapSites[trap]; - for (const TrapSite& trapSite : trapSites) { - const uint8_t* insnAddr = ((const uint8_t*)(codeBlock.segment->base())) + - uintptr_t(trapSite.pcOffset); - // `expected` describes the kind of instruction we expect to see at - // `insnAddr`. Find out what is actually there and check it matches. - const TrapMachineInsn expected = trapSite.insn; - mozilla::Maybe actual = - SummarizeTrapInstruction(insnAddr); - bool valid = actual.isSome() && actual.value() == expected; - // This is useful for diagnosing validation failures. - // if (!valid) { - // fprintf(stderr, - // "FAIL: reason=%-22s expected=%-12s " - // "pcOffset=%-5u addr= %p\n", - // NameOfTrap(trap), NameOfTrapMachineInsn(expected), - // trapSite.pcOffset, insnAddr); - // if (actual.isSome()) { - // fprintf(stderr, "FAIL: identified as %s\n", - // actual.isSome() ? NameOfTrapMachineInsn(actual.value()) - // : "(insn not identified)"); - // } - // } - MOZ_ASSERT(valid, "wasm trapsite does not reference a valid insn"); - } - } -# endif #endif -} - -bool ModuleGenerator::startCodeBlock(CodeBlockKind kind) { - MOZ_ASSERT(!masmScope_ && !linkData_ && !codeBlock_); - masmScope_.emplace(lifo_); - masm_ = &masmScope_->masm; - linkData_ = js::MakeUnique(); - codeBlock_ = js::MakeUnique(kind); - return !!linkData_ && !!codeBlock_; -} - -UniqueCodeBlock ModuleGenerator::finishCodeBlock(UniqueLinkData* linkData) { - // Now that all functions and stubs are generated and their CodeRanges - // known, patch all calls (which can emit far jumps) and far jumps. Linking - // can emit tiny far-jump stubs, so there is an ordering dependency here. - - if (!linkCallSites()) { - return nullptr; - } - - for (CallFarJump far : callFarJumps_) { - masm_->patchFarJump( - jit::CodeOffset(far.jumpOffset), - funcCodeRangeInBlock(far.targetFuncIndex).funcUncheckedCallEntry()); - } - - codeBlock_->debugTrapOffset = debugTrapCodeOffset_; - debugTrapCodeOffset_ = UINT32_MAX; - - lastPatchedCallSite_ = 0; - startOfUnpatchedCallsites_ = 0; - callSiteTargets_.clear(); - callFarJumps_.clear(); - - // None of the linking or far-jump operations should emit masm metadata. - - MOZ_ASSERT(masm_->callSites().empty()); - MOZ_ASSERT(masm_->callSiteTargets().empty()); - MOZ_ASSERT(masm_->trapSites().empty()); - MOZ_ASSERT(masm_->symbolicAccesses().empty()); - MOZ_ASSERT(masm_->tryNotes().empty()); - MOZ_ASSERT(masm_->codeLabels().empty()); - - masm_->finish(); - if (masm_->oom()) { - return nullptr; - } - - // The stackmaps aren't yet sorted. Do so now, since we'll need to - // binary-search them at GC time. - codeBlock_->stackMaps.finishAndSort(); - - // The try notes also need to be sorted to simplify lookup. - std::sort(codeBlock_->tryNotes.begin(), codeBlock_->tryNotes.end()); // These Vectors can get large and the excess capacity can be significant, // so realloc them down to size. - codeBlock_->funcToCodeRange.shrinkStorageToFit(); - codeBlock_->codeRanges.shrinkStorageToFit(); - codeBlock_->callSites.shrinkStorageToFit(); - codeBlock_->trapSites.shrinkStorageToFit(); - codeBlock_->tryNotes.shrinkStorageToFit(); + metadataTier_->funcToCodeRange.shrinkStorageToFit(); + metadataTier_->codeRanges.shrinkStorageToFit(); + metadataTier_->callSites.shrinkStorageToFit(); + metadataTier_->trapSites.shrinkStorageToFit(); + metadataTier_->tryNotes.shrinkStorageToFit(); for (Trap trap : MakeEnumeratedRange(Trap::Limit)) { - codeBlock_->trapSites[trap].shrinkStorageToFit(); + metadataTier_->trapSites[trap].shrinkStorageToFit(); } - SharedCodeSegment segment = CodeSegment::createFromMasm( - *masm_, *linkData_, sharedStubsCodeBlock_.get()); - if (!segment) { - warnf("failed to allocate executable memory for module"); - return nullptr; - } - codeBlock_->segment = std::move(segment); - codeBlock_->codeBase = codeBlock_->segment->base(); - codeBlock_->codeLength = codeBlock_->segment->lengthBytes(); - codeBlock_->stackMaps.offsetBy(uintptr_t(codeBlock_->segment->base())); - - // Check that metadata is consistent with the actual code we generated, - // linked, and loaded. - CheckCodeBlock(*codeBlock_); - - // Free the macro assembler scope, and reset our masm pointer - masm_ = nullptr; - masmScope_ = Nothing(); - - *linkData = std::move(linkData_); - return std::move(codeBlock_); + return true; } -bool ModuleGenerator::generateSharedStubs() { - if (!startCodeBlock(CodeBlockKind::SharedStubs)) { - return false; - } - - // The shared stubs code will contains function definitions for each imported - // function. - if (!FuncToCodeRangeMap::createDense(0, codeMeta_->numFuncImports, - &codeBlock_->funcToCodeRange)) { - return false; - } +UniqueCodeTier ModuleGenerator::finishCodeTier() { + MOZ_ASSERT(finishedFuncDefs_); - uint32_t exportedFuncCount = 0; - for (uint32_t funcIndex = 0; funcIndex < codeMeta_->numFuncImports; - funcIndex++) { - const FuncDesc& func = codeMeta_->funcs[funcIndex]; - if (func.isExported()) { - exportedFuncCount++; + while (outstanding_ > 0) { + if (!finishOutstandingTask()) { + return nullptr; } } - if (!codeBlock_->funcExports.reserve(exportedFuncCount)) { - return false; - } - - for (uint32_t funcIndex = 0; funcIndex < codeMeta_->numFuncImports; - funcIndex++) { - const FuncDesc& func = codeMeta_->funcs[funcIndex]; - if (!func.isExported()) { - continue; - } - codeBlock_->funcExports.infallibleEmplaceBack( - FuncExport(func.typeIndex, funcIndex, func.isEager())); +#ifdef DEBUG + for (uint32_t codeRangeIndex : metadataTier_->funcToCodeRange) { + MOZ_ASSERT(codeRangeIndex != BAD_CODE_RANGE); } +#endif + + // Now that all imports/exports are known, we can generate a special + // CompiledCode containing stubs. - // Generate the stubs for the module first CompiledCode& stubCode = tasks_[0].output; MOZ_ASSERT(stubCode.empty()); - if (!GenerateStubs(*codeMeta_, funcImports_, codeBlock_->funcExports, - &stubCode) || - !linkCompiledCode(stubCode)) { - return false; + if (!GenerateStubs(*codeMeta_, metadataTier_->funcImports, + metadataTier_->funcExports, &stubCode)) { + return nullptr; } - stubCode.clear(); - sharedStubsCodeBlock_ = finishCodeBlock(&sharedStubsLinkData_); - return !!sharedStubsCodeBlock_; -} - -bool ModuleGenerator::startCompleteTier() { - if (!startCodeBlock(CodeBlock::kindFromTier(tier()))) { - return false; + if (!linkCompiledCode(stubCode)) { + return nullptr; } - // funcToCodeRange maps function indices to code-range indices and all - // elements will be initialized by the time module generation is finished. + // Finish linking and metadata. - if (!FuncToCodeRangeMap::createDense( - codeMeta_->numFuncImports, - codeMeta_->funcs.length() - codeMeta_->numFuncImports, - &codeBlock_->funcToCodeRange)) { - return false; + if (!finishCodegen()) { + return nullptr; } - // Pre-reserve space for large Vectors to avoid the significant cost of the - // final reallocs. In particular, the MacroAssembler can be enormous, so be - // extra conservative. Since large over-reservations may fail when the - // actual allocations will succeed, ignore OOM failures. Note, - // shrinkStorageToFit calls at the end will trim off unneeded capacity. - - size_t codeSectionSize = - codeMeta_->codeSection ? codeMeta_->codeSection->size : 0; - - size_t estimatedCodeSize = - size_t(1.2 * EstimateCompiledCodeSize(tier(), codeSectionSize)); - (void)masm_->reserve(std::min(estimatedCodeSize, MaxCodeBytesPerProcess)); - - (void)codeBlock_->codeRanges.reserve(2 * codeMeta_->numFuncDefs()); - - const size_t ByteCodesPerCallSite = 50; - (void)codeBlock_->callSites.reserve(codeSectionSize / ByteCodesPerCallSite); - - const size_t ByteCodesPerOOBTrap = 10; - (void)codeBlock_->trapSites[Trap::OutOfBounds].reserve(codeSectionSize / - ByteCodesPerOOBTrap); - - // Accumulate all exported functions: - // - explicitly marked as such; - // - implicitly exported by being an element of function tables; - // - implicitly exported by being the start function; - // - implicitly exported by being used in global ref.func initializer - // ModuleEnvironment accumulates this information for us during decoding, - // transfer it to the FuncExportVector stored in Metadata. - - uint32_t exportedFuncCount = 0; - for (uint32_t funcIndex = codeMeta_->numFuncImports; - funcIndex < codeMeta_->funcs.length(); funcIndex++) { - const FuncDesc& func = codeMeta_->funcs[funcIndex]; - if (func.isExported()) { - exportedFuncCount++; - } - } - if (!codeBlock_->funcExports.reserve(exportedFuncCount)) { - return false; + if (!finishMetadataTier()) { + return nullptr; } - for (uint32_t funcIndex = codeMeta_->numFuncImports; - funcIndex < codeMeta_->funcs.length(); funcIndex++) { - const FuncDesc& func = codeMeta_->funcs[funcIndex]; - - if (!func.isExported()) { - continue; - } - - codeBlock_->funcExports.infallibleEmplaceBack( - FuncExport(func.typeIndex, funcIndex, func.isEager())); + UniqueModuleSegment segment = + ModuleSegment::create(tier(), masm_, *linkData_); + if (!segment) { + warnf("failed to allocate executable memory for module"); + return nullptr; } - return true; -} - -UniqueCodeBlock ModuleGenerator::finishCompleteTier(UniqueLinkData* linkData) { - MOZ_ASSERT(finishedFuncDefs_); + metadataTier_->stackMaps.offsetBy(uintptr_t(segment->base())); - while (outstanding_ > 0) { - if (!finishOutstandingTask()) { - return nullptr; - } +#if defined(DEBUG) + // Check that each stackmap is associated with a plausible instruction. + for (size_t i = 0; i < metadataTier_->stackMaps.length(); i++) { + MOZ_ASSERT( + IsPlausibleStackMapKey(metadataTier_->stackMaps.get(i).nextInsnAddr), + "wasm stackmap does not reference a valid insn"); } - -#ifdef DEBUG - codeBlock_->funcToCodeRange.assertAllInitialized(); #endif - // Now that all funcs have been compiled, we can generate entry stubs for - // the ones that have been exported. - - CompiledCode& stubCode = tasks_[0].output; - MOZ_ASSERT(stubCode.empty()); - - if (!GenerateEntryStubs(*codeMeta_, codeBlock_->funcExports, &stubCode)) { - return nullptr; - } - - if (!linkCompiledCode(stubCode)) { - return nullptr; +#if defined(DEBUG) && (defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \ + defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_ARM) || \ + defined(JS_CODEGEN_LOONG64)) + // Check that each trapsite is associated with a plausible instruction. The + // required instruction kind depends on the trapsite kind. + // + // NOTE: currently only enabled on x86_{32,64} and arm{32,64}. Ideally it + // should be extended to riscv, loongson, mips. + // + for (Trap trap : MakeEnumeratedRange(Trap::Limit)) { + const TrapSiteVector& trapSites = metadataTier_->trapSites[trap]; + for (const TrapSite& trapSite : trapSites) { + const uint8_t* insnAddr = + ((const uint8_t*)(segment->base())) + uintptr_t(trapSite.pcOffset); + // `expected` describes the kind of instruction we expect to see at + // `insnAddr`. Find out what is actually there and check it matches. + const TrapMachineInsn expected = trapSite.insn; + mozilla::Maybe actual = + SummarizeTrapInstruction(insnAddr); + bool valid = actual.isSome() && actual.value() == expected; + // This is useful for diagnosing validation failures. + // if (!valid) { + // fprintf(stderr, + // "FAIL: reason=%-22s expected=%-12s " + // "pcOffset=%-5u addr= %p\n", + // NameOfTrap(trap), NameOfTrapMachineInsn(expected), + // trapSite.pcOffset, insnAddr); + // if (actual.isSome()) { + // fprintf(stderr, "FAIL: identified as %s\n", + // actual.isSome() ? NameOfTrapMachineInsn(actual.value()) + // : "(insn not identified)"); + // } + // } + MOZ_ASSERT(valid, "wasm trapsite does not reference a valid insn"); + } } +#endif - return finishCodeBlock(linkData); + return js::MakeUnique(std::move(metadataTier_), std::move(segment)); } bool ModuleGenerator::finishCodeMetadata(const Bytes& bytecode) { @@ -1113,9 +1051,14 @@ SharedModule ModuleGenerator::finishModule( JS::OptimizedEncodingListener* maybeTier2Listener) { MOZ_ASSERT(mode() == CompileMode::Once || mode() == CompileMode::Tier1); - UniqueLinkData tier1LinkData; - UniqueCodeBlock tier1Code = finishCompleteTier(&tier1LinkData); - if (!tier1Code) { + UniqueCodeTier codeTier = finishCodeTier(); + if (!codeTier) { + return nullptr; + } + + JumpTables jumpTables; + if (!jumpTables.init(mode(), codeTier->segment(), + codeTier->metadata().codeRanges)) { return nullptr; } @@ -1172,10 +1115,9 @@ SharedModule ModuleGenerator::finishModule( return nullptr; } - MutableCode code = js_new(mode(), *codeMeta_, codeMetaForAsmJS_); - if (!code || !code->initialize(std::move(funcImports_), - std::move(sharedStubsCodeBlock_), - *sharedStubsLinkData_, std::move(tier1Code))) { + MutableCode code = js_new(*codeMeta_, codeMetaForAsmJS_, + std::move(codeTier), std::move(jumpTables)); + if (!code || !code->initialize(*linkData_)) { return nullptr; } @@ -1202,8 +1144,7 @@ SharedModule ModuleGenerator::finishModule( tier() == Tier::Serialized); Bytes serializedBytes; - if (!module->serialize(*sharedStubsLinkData_, *tier1LinkData, - &serializedBytes)) { + if (!module->serialize(*linkData_, &serializedBytes)) { return nullptr; } @@ -1227,7 +1168,7 @@ SharedModule ModuleGenerator::finishModule( module->startTier2(*compileArgs_, bytecode, maybeTier2Listener); } else if (tier() == Tier::Serialized && maybeTier2Listener) { Bytes bytes; - if (module->serialize(*sharedStubsLinkData_, *tier1LinkData, &bytes)) { + if (module->serialize(*linkData_, &bytes)) { maybeTier2Listener->storeOptimizedEncoding(bytes.begin(), bytes.length()); } } @@ -1246,9 +1187,8 @@ bool ModuleGenerator::finishTier2(const Module& module) { return false; } - UniqueLinkData tier2LinkData; - UniqueCodeBlock tier2Code = finishCompleteTier(&tier2LinkData); - if (!tier2Code) { + UniqueCodeTier codeTier = finishCodeTier(); + if (!codeTier) { return false; } @@ -1258,8 +1198,7 @@ bool ModuleGenerator::finishTier2(const Module& module) { ThisThread::SleepMilliseconds(500); } - return module.finishTier2(*sharedStubsLinkData_, *tier2LinkData, - std::move(tier2Code)); + return module.finishTier2(*linkData_, std::move(codeTier)); } void ModuleGenerator::warnf(const char* msg, ...) { diff --git a/js/src/wasm/WasmGenerator.h b/js/src/wasm/WasmGenerator.h index 9b3ed476d0e1c..e0d55930b6eac 100644 --- a/js/src/wasm/WasmGenerator.h +++ b/js/src/wasm/WasmGenerator.h @@ -20,7 +20,6 @@ #define wasm_generator_h #include "mozilla/Attributes.h" -#include "mozilla/Maybe.h" #include "mozilla/MemoryReporting.h" #include "jit/MacroAssembler.h" @@ -169,16 +168,12 @@ struct CompileTask : public HelperThreadTask { class MOZ_STACK_CLASS ModuleGenerator { using CompileTaskVector = Vector; using CodeOffsetVector = Vector; - // Encapsulates the macro assembler state so that we can create a new one for - // each code block. Not heap allocated because the macro assembler is a - // 'stack class'. - struct MacroAssemblerScope { - jit::TempAllocator masmAlloc; - jit::WasmMacroAssembler masm; - - explicit MacroAssemblerScope(LifoAlloc& lifo); - ~MacroAssemblerScope() = default; + struct CallFarJump { + uint32_t funcIndex; + jit::CodeOffset jump; + CallFarJump(uint32_t fi, jit::CodeOffset j) : funcIndex(fi), jump(j) {} }; + using CallFarJumpVector = Vector; // Constant parameters SharedCompileArgs const compileArgs_; @@ -188,18 +183,17 @@ class MOZ_STACK_CLASS ModuleGenerator { CodeMetadata* const codeMeta_; CompilerEnvironment* const compilerEnv_; - // Data that is moved into the Module/Code as the result of finish() - FuncImportVector funcImports_; - UniqueLinkData sharedStubsLinkData_; - UniqueCodeBlock sharedStubsCodeBlock_; + // Data that is moved into the result of finish() + UniqueLinkData linkData_; + UniqueMetadataTier metadataTier_; MutableCodeMetadataForAsmJS codeMetaForAsmJS_; - // Data that is used to construct a CodeBlock - UniqueCodeBlock codeBlock_; - UniqueLinkData linkData_; + // Data scoped to the ModuleGenerator's lifetime + CompileTaskState taskState_; LifoAlloc lifo_; - Maybe masmScope_; - jit::WasmMacroAssembler* masm_; + jit::TempAllocator masmAlloc_; + jit::WasmMacroAssembler masm_; + Uint32Vector funcToCodeRange_; uint32_t debugTrapCodeOffset_; CallFarJumpVector callFarJumps_; CallSiteTargetVector callSiteTargets_; @@ -209,7 +203,6 @@ class MOZ_STACK_CLASS ModuleGenerator { // Parallel compilation bool parallel_; uint32_t outstanding_; - CompileTaskState taskState_; CompileTaskVector tasks_; CompileTaskPtrVector freeTasks_; CompileTask* currentTask_; @@ -218,37 +211,18 @@ class MOZ_STACK_CLASS ModuleGenerator { // Assertions DebugOnly finishedFuncDefs_; - bool funcIsCompiledInBlock(uint32_t funcIndex) const; - const CodeRange& funcCodeRangeInBlock(uint32_t funcIndex) const; + bool funcIsCompiled(uint32_t funcIndex) const; + const CodeRange& funcCodeRange(uint32_t funcIndex) const; bool linkCallSites(); void noteCodeRange(uint32_t codeRangeIndex, const CodeRange& codeRange); bool linkCompiledCode(CompiledCode& code); - [[nodiscard]] bool initTasks(); bool locallyCompileCurrentTask(); bool finishTask(CompileTask* task); bool launchBatchCompile(); bool finishOutstandingTask(); - - // Begins the creation of a code block. All code compiled during this time - // will go into this code block. All previous code blocks must be finished. - [[nodiscard]] bool startCodeBlock(CodeBlockKind kind); - // Finish the creation of a code block. This will move all the compiled code - // and metadata into the code block and initialize it. Returns a `linkData` - // through an out-param that can be serialized with the code block. - UniqueCodeBlock finishCodeBlock(UniqueLinkData* linkData); - - // Generate a code block containing all stubs that are shared between the - // different tiers. - [[nodiscard]] bool generateSharedStubs(); - - // Starts the creation of a complete tier of wasm code. Every function - // defined in this module must be compiled, then finishCompleteTier must be - // called. - [[nodiscard]] bool startCompleteTier(); - // Finishes a complete tier of wasm code. Returns a `linkData` through an - // out-param that can be serialized with the code block. - UniqueCodeBlock finishCompleteTier(UniqueLinkData* linkData); - + bool finishCodegen(); + bool finishMetadataTier(); + UniqueCodeTier finishCodeTier(); bool finishCodeMetadata(const Bytes& bytecode); bool isAsmJS() const { return codeMeta_->isAsmJS(); } diff --git a/js/src/wasm/WasmInstance-inl.h b/js/src/wasm/WasmInstance-inl.h index 7c50d70ef73ed..49457bf635cc8 100644 --- a/js/src/wasm/WasmInstance-inl.h +++ b/js/src/wasm/WasmInstance-inl.h @@ -14,6 +14,14 @@ namespace js { namespace wasm { +const CodeTier& Instance::code(Tier t) const { return code_->codeTier(t); } + +uint8_t* Instance::codeBase(Tier t) const { return code_->segment(t).base(); } + +const MetadataTier& Instance::metadata(Tier t) const { + return code_->metadata(t); +} + const CodeMetadata& Instance::codeMeta() const { return code_->codeMeta(); } const CodeMetadataForAsmJS* Instance::codeMetaForAsmJS() const { return code_->codeMetaForAsmJS(); diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp index 52c68535abb0f..cfaf65b5b96f9 100644 --- a/js/src/wasm/WasmInstance.cpp +++ b/js/src/wasm/WasmInstance.cpp @@ -227,8 +227,10 @@ bool Instance::callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, uint64_t* argv) { AssertRealmUnchanged aru(cx); - const FuncImport& fi = code().funcImport(funcImportIndex); - const FuncType& funcType = code().getFuncImportType(funcImportIndex); + Tier tier = code().bestTier(); + + const FuncImport& fi = metadata(tier).funcImports[funcImportIndex]; + const FuncType& funcType = codeMeta().getFuncImportType(fi); ArgTypeVector argTypes(funcType); InvokeArgs args(cx); @@ -315,12 +317,15 @@ bool Instance::callImport(JSContext* cx, uint32_t funcImportIndex, #endif // The import may already have become optimized. - void* jitExitCode = - code().sharedStubs().segment->base() + fi.jitExitCodeOffset(); - if (import.code == jitExitCode) { - return true; + for (auto t : code().tiers()) { + void* jitExitCode = codeBase(t) + fi.jitExitCodeOffset(); + if (import.code == jitExitCode) { + return true; + } } + void* jitExitCode = codeBase(tier) + fi.jitExitCodeOffset(); + if (!importCallable->is()) { return true; } @@ -1012,16 +1017,20 @@ bool Instance::iterElemsFunctions(const ModuleElemSegment& seg, return true; } - const FuncImportVector& funcImports = code().funcImports(); + Tier tier = code().bestTier(); + const MetadataTier& metadataTier = metadata(tier); + const FuncImportVector& funcImports = metadataTier.funcImports; + const CodeRangeVector& codeRanges = metadataTier.codeRanges; + const Uint32Vector& funcToCodeRange = metadataTier.funcToCodeRange; + const Uint32Vector& elemIndices = seg.elemIndices; + uint8_t* codeBaseTier = codeBase(tier); for (uint32_t i = 0; i < seg.numElements(); i++) { - uint32_t elemFuncIndex = seg.elemIndices[i]; - - if (elemFuncIndex < funcImports.length()) { + uint32_t elemIndex = elemIndices[i]; + if (elemIndex < metadataTier.funcImports.length()) { FuncImportInstanceData& import = - funcImportInstanceData(funcImports[elemFuncIndex]); + funcImportInstanceData(funcImports[elemIndex]); MOZ_ASSERT(import.callable->isCallable()); - if (import.callable->is()) { JSFunction* fun = &import.callable->as(); if (IsWasmExportedFunction(fun)) { @@ -1034,11 +1043,11 @@ bool Instance::iterElemsFunctions(const ModuleElemSegment& seg, WasmInstanceObject* calleeInstanceObj = ExportedFunctionToInstanceObject(fun); Instance& calleeInstance = calleeInstanceObj->instance(); - uint8_t* codeRangeBase; - const CodeRange* codeRange; - calleeInstanceObj->getExportedFunctionCodeRange(fun, &codeRange, - &codeRangeBase); - void* code = codeRangeBase + codeRange->funcCheckedCallEntry(); + Tier calleeTier = calleeInstance.code().bestTier(); + const CodeRange& calleeCodeRange = + calleeInstanceObj->getExportedFunctionCodeRange(fun, calleeTier); + void* code = calleeInstance.codeBase(calleeTier) + + calleeCodeRange.funcCheckedCallEntry(); if (!onFunc(i, code, &calleeInstance)) { return false; } @@ -1047,12 +1056,8 @@ bool Instance::iterElemsFunctions(const ModuleElemSegment& seg, } } - const CodeBlock& codeBlock = code().funcCodeBlock(elemFuncIndex); - const CodeRangeVector& codeRanges = codeBlock.codeRanges; - const FuncToCodeRangeMap& funcToCodeRange = codeBlock.funcToCodeRange; - void* code = - codeBlock.segment->base() + - codeRanges[funcToCodeRange[elemFuncIndex]].funcCheckedCallEntry(); + void* code = codeBaseTier + + codeRanges[funcToCodeRange[elemIndex]].funcCheckedCallEntry(); if (!onFunc(i, code, this)) { return false; } @@ -1341,7 +1346,9 @@ static int32_t MemDiscardShared(Instance* instance, I byteOffset, I byteLen, MOZ_ASSERT(SASigRefFunc.failureMode == FailureMode::FailOnInvalidRef); JSContext* cx = instance->cx(); - const FuncImportVector& funcImports = instance->code().funcImports(); + Tier tier = instance->code().bestTier(); + const MetadataTier& metadataTier = instance->metadata(tier); + const FuncImportVector& funcImports = metadataTier.funcImports; // If this is an import, we need to recover the original function to maintain // reference equality between a re-exported function and 'ref.func'. The @@ -2290,7 +2297,11 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports, const ModuleElemSegmentVector& elemSegments) { MOZ_ASSERT(!!maybeDebug_ == codeMeta().debugEnabled); - MOZ_ASSERT(funcImports.length() == code().funcImports().length()); +#ifdef DEBUG + for (auto t : code_->tiers()) { + MOZ_ASSERT(funcImports.length() == metadata(t).funcImports.length()); + } +#endif MOZ_ASSERT(tables_.length() == codeMeta().tables.length()); cx_ = cx; @@ -2379,13 +2390,15 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports, } // Initialize function imports in the instance data - for (size_t i = 0; i < code().funcImports().length(); i++) { + Tier callerTier = code_->bestTier(); + for (size_t i = 0; i < metadata(callerTier).funcImports.length(); i++) { JSObject* f = funcImports[i]; #ifdef ENABLE_WASM_JSPI if (JSObject* suspendingObject = MaybeUnwrapSuspendingObject(f)) { // Compile suspending function Wasm wrapper. - const FuncType& funcType = code().getFuncImportType(i); + const FuncImport& fi = metadata(callerTier).funcImports[i]; + const FuncType& funcType = codeMeta().getFuncImportType(fi); RootedObject wrapped(cx, suspendingObject); RootedFunction wrapper( cx, WasmSuspendingFunctionCreate(cx, wrapped, funcType)); @@ -2398,8 +2411,8 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports, #endif MOZ_ASSERT(f->isCallable()); - const FuncImport& fi = code().funcImport(i); - const FuncType& funcType = code().getFuncImportType(i); + const FuncImport& fi = metadata(callerTier).funcImports[i]; + const FuncType& funcType = codeMeta().getFuncImportType(fi); FuncImportInstanceData& import = funcImportInstanceData(fi); import.callable = f; if (f->is()) { @@ -2408,13 +2421,14 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports, WasmInstanceObject* calleeInstanceObj = ExportedFunctionToInstanceObject(fun); Instance& calleeInstance = calleeInstanceObj->instance(); - uint8_t* codeRangeBase; - const CodeRange* codeRange; - calleeInstanceObj->getExportedFunctionCodeRange( - &f->as(), &codeRange, &codeRangeBase); + Tier calleeTier = calleeInstance.code().bestTier(); + const CodeRange& codeRange = + calleeInstanceObj->getExportedFunctionCodeRange( + &f->as(), calleeTier); import.instance = &calleeInstance; import.realm = fun->realm(); - import.code = codeRangeBase + codeRange->funcUncheckedCallEntry(); + import.code = calleeInstance.codeBase(calleeTier) + + codeRange.funcUncheckedCallEntry(); } else if (void* thunk = MaybeGetBuiltinThunk(fun, funcType)) { import.instance = this; import.realm = fun->realm(); @@ -2422,14 +2436,12 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports, } else { import.instance = this; import.realm = fun->realm(); - import.code = - code().sharedStubs().segment->base() + fi.interpExitCodeOffset(); + import.code = codeBase(callerTier) + fi.interpExitCodeOffset(); } } else { import.instance = this; import.realm = f->nonCCWRealm(); - import.code = - code().sharedStubs().segment->base() + fi.interpExitCodeOffset(); + import.code = codeBase(callerTier) + fi.interpExitCodeOffset(); } } @@ -2705,7 +2717,7 @@ void Instance::tracePrivate(JSTracer* trc) { // OK to just do one tier here; though the tiers have different funcImports // tables, they share the instance object. - for (const FuncImport& fi : code().funcImports()) { + for (const FuncImport& fi : metadata(code().stableTier()).funcImports) { TraceNullableEdge(trc, &funcImportInstanceData(fi).callable, "wasm import"); } @@ -2906,18 +2918,90 @@ WasmInstanceObject* Instance::objectUnbarriered() const { WasmInstanceObject* Instance::object() const { return object_; } +static bool EnsureEntryStubs(const Instance& instance, uint32_t funcIndex, + const FuncExport** funcExport, + void** interpEntry) { + Tier tier = instance.code().bestTier(); + + size_t funcExportIndex; + *funcExport = + &instance.metadata(tier).lookupFuncExport(funcIndex, &funcExportIndex); + + const FuncExport& fe = **funcExport; + if (fe.hasEagerStubs()) { + *interpEntry = instance.codeBase(tier) + fe.eagerInterpEntryOffset(); + return true; + } + + MOZ_ASSERT(!instance.isAsmJS(), "only wasm can lazily export functions"); + + // If the best tier is Ion, life is simple: background compilation has + // already completed and has been committed, so there's no risk of race + // conditions here. + // + // If the best tier is Baseline, there could be a background compilation + // happening at the same time. The background compilation will lock the + // first tier lazy stubs first to stop new baseline stubs from being + // generated, then the second tier stubs to generate them. + // + // - either we take the tier1 lazy stub lock before the background + // compilation gets it, then we generate the lazy stub for tier1. When the + // background thread gets the tier1 lazy stub lock, it will see it has a + // lazy stub and will recompile it for tier2. + // - or we don't take the lock here first. Background compilation won't + // find a lazy stub for this function, thus won't generate it. So we'll do + // it ourselves after taking the tier2 lock. + // + // Also see doc block for stubs in WasmJS.cpp. + + auto stubs = instance.code(tier).lazyStubs().writeLock(); + *interpEntry = stubs->lookupInterpEntry(fe.funcIndex()); + if (*interpEntry) { + return true; + } + + // The best tier might have changed after we've taken the lock. + Tier prevTier = tier; + tier = instance.code().bestTier(); + const CodeMetadata& codeMeta = instance.codeMeta(); + const CodeTier& codeTier = instance.code(tier); + if (tier == prevTier) { + if (!stubs->createOneEntryStub(funcExportIndex, codeMeta, codeTier)) { + return false; + } + + *interpEntry = stubs->lookupInterpEntry(fe.funcIndex()); + MOZ_ASSERT(*interpEntry); + return true; + } + + MOZ_RELEASE_ASSERT(prevTier == Tier::Baseline && tier == Tier::Optimized); + auto stubs2 = instance.code(tier).lazyStubs().writeLock(); + + // If it didn't have a stub in the first tier, background compilation + // shouldn't have made one in the second tier. + MOZ_ASSERT(!stubs2->hasEntryStub(fe.funcIndex())); + + if (!stubs2->createOneEntryStub(funcExportIndex, codeMeta, codeTier)) { + return false; + } + + *interpEntry = stubs2->lookupInterpEntry(fe.funcIndex()); + MOZ_ASSERT(*interpEntry); + return true; +} + static bool GetInterpEntryAndEnsureStubs(JSContext* cx, Instance& instance, uint32_t funcIndex, const CallArgs& args, void** interpEntry, const FuncType** funcType) { const FuncExport* funcExport; - if (!instance.code().getOrCreateInterpEntry(funcIndex, &funcExport, - interpEntry)) { + if (!EnsureEntryStubs(instance, funcIndex, &funcExport, interpEntry)) { return false; } - *funcType = &instance.code().getFuncExportType(funcIndex); + *funcType = &instance.codeMeta().getFuncExportType(*funcExport); #ifdef DEBUG // EnsureEntryStubs() has ensured proper jit-entry stubs have been created and @@ -3415,13 +3499,14 @@ void Instance::destroyBreakpointSite(JS::GCContext* gcx, uint32_t offset) { void Instance::disassembleExport(JSContext* cx, uint32_t funcIndex, Tier tier, PrintCallback printString) const { - const CodeBlock& codeBlock = code().funcCodeBlock(funcIndex); - const FuncExport& funcExport = codeBlock.lookupFuncExport(funcIndex); - const CodeRange& range = codeBlock.codeRange(funcExport); - const CodeSegment& segment = *codeBlock.segment; - - MOZ_ASSERT(range.begin() < segment.lengthBytes()); - MOZ_ASSERT(range.end() < segment.lengthBytes()); + const MetadataTier& metadataTier = metadata(tier); + const FuncExport& funcExport = metadataTier.lookupFuncExport(funcIndex); + const CodeRange& range = metadataTier.codeRange(funcExport); + const CodeTier& codeTier = code(tier); + const ModuleSegment& segment = codeTier.segment(); + + MOZ_ASSERT(range.begin() < segment.length()); + MOZ_ASSERT(range.end() < segment.length()); uint8_t* functionCode = segment.base() + range.begin(); jit::Disassemble(functionCode, range.end() - range.begin(), printString); diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h index 074db85df7a07..e37edeaae3eda 100644 --- a/js/src/wasm/WasmInstance.h +++ b/js/src/wasm/WasmInstance.h @@ -364,6 +364,9 @@ class alignas(16) Instance { void setDebugFilter(uint32_t funcIndex, bool value); const Code& code() const { return *code_; } + inline const CodeTier& code(Tier t) const; + inline uint8_t* codeBase(Tier t) const; + inline const MetadataTier& metadata(Tier t) const; inline const CodeMetadata& codeMeta() const; inline const CodeMetadataForAsmJS* codeMetaForAsmJS() const; inline bool isAsmJS() const; diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp index 9ce1e99e8e854..0d52c6f20cb67 100644 --- a/js/src/wasm/WasmJS.cpp +++ b/js/src/wasm/WasmJS.cpp @@ -491,7 +491,7 @@ bool wasm::CompileAndSerialize(JSContext* cx, const ShareableBytes& bytecode, return false; } - MOZ_ASSERT(module->code().hasCompleteTier(Tier::Serialized)); + MOZ_ASSERT(module->code().hasTier(Tier::Serialized)); MOZ_ASSERT(listener.called); return !listener.serialized->empty(); } @@ -1009,10 +1009,7 @@ const JSFunctionSpec WasmModuleObject::static_methods[] = { /* static */ void WasmModuleObject::finalize(JS::GCContext* gcx, JSObject* obj) { const Module& module = obj->as().module(); - size_t codeMemory = module.tier1CodeMemoryUsed(); - if (codeMemory) { - obj->zone()->decJitMemory(codeMemory); - } + obj->zone()->decJitMemory(module.codeLength(module.code().stableTier())); gcx->release(obj, &module, module.gcMallocBytesExcludingCode(), MemoryUse::WasmModule); } @@ -1129,7 +1126,8 @@ bool WasmModuleObject::imports(JSContext* cx, unsigned argc, Value* vp) { #ifdef ENABLE_WASM_TYPE_REFLECTIONS const CodeMetadata& codeMeta = module->codeMeta(); - const Code& code = module->code(); + const MetadataTier& metadataTier = + module->metadata(module->code().stableTier()); size_t numFuncImport = 0; size_t numMemoryImport = 0; @@ -1170,7 +1168,8 @@ bool WasmModuleObject::imports(JSContext* cx, unsigned argc, Value* vp) { switch (import.kind) { case DefinitionKind::Function: { size_t funcIndex = numFuncImport++; - const FuncType& funcType = code.getFuncImportType(funcIndex); + const FuncType& funcType = + codeMeta.getFuncImportType(metadataTier.funcImports[funcIndex]); typeObj = FuncTypeToObject(cx, funcType); break; } @@ -1250,6 +1249,8 @@ bool WasmModuleObject::exports(JSContext* cx, unsigned argc, Value* vp) { #ifdef ENABLE_WASM_TYPE_REFLECTIONS const CodeMetadata& codeMeta = module->codeMeta(); + const MetadataTier& metadataTier = + module->metadata(module->code().stableTier()); #endif // ENABLE_WASM_TYPE_REFLECTIONS for (const Export& exp : moduleMeta.exports) { @@ -1276,8 +1277,8 @@ bool WasmModuleObject::exports(JSContext* cx, unsigned argc, Value* vp) { RootedObject typeObj(cx); switch (exp.kind()) { case DefinitionKind::Function: { - const FuncType& funcType = - module->code().getFuncExportType(exp.funcIndex()); + const FuncExport& fe = metadataTier.lookupFuncExport(exp.funcIndex()); + const FuncType& funcType = codeMeta.getFuncExportType(fe); typeObj = FuncTypeToObject(cx, funcType); break; } @@ -1420,10 +1421,7 @@ WasmModuleObject* WasmModuleObject::create(JSContext* cx, const Module& module, // Bug 1569888: We account for the first tier here; the second tier, if // different, also needs to be accounted for. - size_t codeMemory = module.tier1CodeMemoryUsed(); - if (codeMemory) { - cx->zone()->incJitMemory(codeMemory); - } + cx->zone()->incJitMemory(module.codeLength(module.code().stableTier())); return obj; } @@ -1953,7 +1951,7 @@ static bool WasmCall(JSContext* cx, unsigned argc, Value* vp) { * * The explicitly exported functions have stubs created for them eagerly. Eager * stubs are created with their tier when the module is compiled, see - * ModuleGenerator::finishCodeBlock(), which calls wasm::GenerateStubs(), which + * ModuleGenerator::finishCodeTier(), which calls wasm::GenerateStubs(), which * generates stubs for functions with eager stubs. * * An eager stub for tier-1 is upgraded to tier-2 if the module tiers up, see @@ -2080,7 +2078,30 @@ static bool WasmCall(JSContext* cx, unsigned argc, Value* vp) { * * The locking protocol ensuring that all stubs are upgraded properly and * that the system switches to creating tier-2 stubs is implemented in - * Module::finishTier2() and EnsureEntryStubs(). + * Module::finishTier2() and EnsureEntryStubs(): + * + * There are two locks, one per code tier. + * + * EnsureEntryStubs() is attempting to create a tier-appropriate lazy stub, + * so it takes the lock for the current best tier, checks to see if there is + * a stub, and exits if there is. If the tier changed racily it takes the + * other lock too, since that is now the lock for the best tier. Then it + * creates the stub, installs it, and releases the locks. Thus at most one + * stub per tier can be created at a time. + * + * Module::finishTier2() takes both locks (tier-1 before tier-2), thus + * preventing EnsureEntryStubs() from creating stubs while stub upgrading is + * going on, and itself waiting until EnsureEntryStubs() is not active. Once + * it has both locks, it upgrades all lazy stubs and makes tier-2 the new + * best tier. Should EnsureEntryStubs subsequently enter, it will find that + * a stub already exists at tier-2 and will exit early. + * + * (It would seem that the locking protocol could be simplified a little by + * having only one lock, hanging off the Code object, or by unconditionally + * taking both locks in EnsureEntryStubs(). However, in some cases where we + * acquire a lock the Code object is not readily available, so plumbing would + * have to be added, and in EnsureEntryStubs(), there are sometimes not two code + * tiers.) * * ## Stub lifetimes and serialization * @@ -2101,9 +2122,10 @@ bool WasmInstanceObject::getExportedFunction( } const Instance& instance = instanceObj->instance(); - const CodeBlock& codeBlock = instance.code().funcCodeBlock(funcIndex); - const FuncExport& funcExport = codeBlock.lookupFuncExport(funcIndex); - const TypeDef& funcTypeDef = instance.code().getFuncExportTypeDef(funcIndex); + const FuncExport& funcExport = + instance.metadata(instance.code().bestTier()).lookupFuncExport(funcIndex); + const TypeDef& funcTypeDef = + instance.codeMeta().getFuncExportTypeDef(funcExport); unsigned numArgs = funcTypeDef.funcType().args().length(); if (instance.isAsmJS()) { @@ -2167,9 +2189,12 @@ bool WasmInstanceObject::getExportedFunction( fun->setExtendedSlot(FunctionExtended::WASM_STV_SLOT, PrivateValue((void*)funcTypeDef.superTypeVector())); - const CodeRange& codeRange = codeBlock.codeRange(funcExport); + const CodeTier& codeTier = + instance.code().codeTier(instance.code().bestTier()); + const CodeRange& codeRange = codeTier.metadata().codeRange(funcExport); + fun->setExtendedSlot(FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT, - PrivateValue(codeBlock.segment->base() + + PrivateValue(codeTier.segment().base() + codeRange.funcUncheckedCallEntry())); if (!instanceObj->exports().putNew(funcIndex, fun)) { @@ -2180,13 +2205,12 @@ bool WasmInstanceObject::getExportedFunction( return true; } -void WasmInstanceObject::getExportedFunctionCodeRange( - JSFunction* fun, const wasm::CodeRange** range, uint8_t** codeBase) { - uint32_t funcIndex = wasm::ExportedFunctionToFuncIndex(fun); +const CodeRange& WasmInstanceObject::getExportedFunctionCodeRange( + JSFunction* fun, Tier tier) { + uint32_t funcIndex = ExportedFunctionToFuncIndex(fun); MOZ_ASSERT(exports().lookup(funcIndex)->value() == fun); - const CodeBlock& code = instance().code().funcCodeBlock(funcIndex); - *range = &code.codeRanges[code.funcToCodeRange[funcIndex]]; - *codeBase = code.segment->base(); + const MetadataTier& metadata = instance().metadata(tier); + return metadata.codeRange(metadata.lookupFuncExport(funcIndex)); } /* static */ @@ -2255,10 +2279,6 @@ uint32_t wasm::ExportedFunctionToFuncIndex(JSFunction* fun) { return fun->wasmInstance().code().getFuncIndex(fun); } -const wasm::TypeDef& wasm::ExportedFunctionToTypeDef(JSFunction* fun) { - return *fun->wasmTypeDef(); -} - // ============================================================================ // WebAssembly.Memory class and methods @@ -4069,8 +4089,11 @@ bool WasmFunctionTypeImpl(JSContext* cx, const CallArgs& args) { RootedFunction function(cx, &args.thisv().toObject().as()); Rooted instanceObj( cx, ExportedFunctionToInstanceObject(function)); - const TypeDef& funcTypeDef = ExportedFunctionToTypeDef(function); - const FuncType& funcType = funcTypeDef.funcType(); + uint32_t funcIndex = ExportedFunctionToFuncIndex(function); + Instance& instance = instanceObj->instance(); + const FuncExport& fe = + instance.metadata(instance.code().bestTier()).lookupFuncExport(funcIndex); + const FuncType& funcType = instance.codeMeta().getFuncExportType(fe); RootedObject typeObj(cx, FuncTypeToObject(cx, funcType)); if (!typeObj) { return false; diff --git a/js/src/wasm/WasmJS.h b/js/src/wasm/WasmJS.h index 0a2ea1caa9421..3ebc2d9705edb 100644 --- a/js/src/wasm/WasmJS.h +++ b/js/src/wasm/WasmJS.h @@ -111,7 +111,6 @@ bool IsWasmExportedFunction(JSFunction* fun); Instance& ExportedFunctionToInstance(JSFunction* fun); WasmInstanceObject* ExportedFunctionToInstanceObject(JSFunction* fun); uint32_t ExportedFunctionToFuncIndex(JSFunction* fun); -const wasm::TypeDef& ExportedFunctionToTypeDef(JSFunction* fun); bool IsSharedWasmMemoryObject(JSObject* obj); @@ -251,9 +250,8 @@ class WasmInstanceObject : public NativeObject { JSContext* cx, Handle instanceObj, uint32_t funcIndex, MutableHandleFunction fun); - void getExportedFunctionCodeRange(JSFunction* fun, - const wasm::CodeRange** range, - uint8_t** codeBase); + const wasm::CodeRange& getExportedFunctionCodeRange(JSFunction* fun, + wasm::Tier tier); static WasmInstanceScope* getScope(JSContext* cx, Handle instanceObj); diff --git a/js/src/wasm/WasmModule.cpp b/js/src/wasm/WasmModule.cpp index 6c8baa56b6c2e..07a7e079cf851 100644 --- a/js/src/wasm/WasmModule.cpp +++ b/js/src/wasm/WasmModule.cpp @@ -20,7 +20,8 @@ #include -#include "js/BuildId.h" // JS::BuildIdCharVector +#include "jit/FlushICache.h" // for FlushExecutionContextForAllThreads +#include "js/BuildId.h" // JS::BuildIdCharVector #include "js/experimental/TypedData.h" // JS_NewUint8Array #include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_* #include "js/Printf.h" // JS_smprintf @@ -165,20 +166,97 @@ void Module::startTier2(const CompileArgs& args, const ShareableBytes& bytecode, StartOffThreadWasmTier2Generator(std::move(task)); } -bool Module::finishTier2(const LinkData& sharedStubsLinkData, - const LinkData& linkData2, - UniqueCodeBlock code2) const { - if (!code_->finishCompleteTier2(linkData2, std::move(code2))) { +bool Module::finishTier2(const LinkData& linkData2, + UniqueCodeTier code2) const { + MOZ_ASSERT(code().bestTier() == Tier::Baseline && + code2->tier() == Tier::Optimized); + + // Install the data in the data structures. They will not be visible + // until commitTier2(). + + const CodeTier* borrowedTier2; + if (!code().setAndBorrowTier2(std::move(code2), linkData2, &borrowedTier2)) { return false; } + // Before we can make tier-2 live, we need to compile tier2 versions of any + // extant tier1 lazy stubs (otherwise, tiering would break the assumption + // that any extant exported wasm function has had a lazy entry stub already + // compiled for it). + // + // Also see doc block for stubs in WasmJS.cpp. + { + // We need to prevent new tier1 stubs generation until we've committed + // the newer tier2 stubs, otherwise we might not generate one tier2 + // stub that has been generated for tier1 before we committed. + + const MetadataTier& metadataTier1 = metadata(Tier::Baseline); + + auto stubs1 = code().codeTier(Tier::Baseline).lazyStubs().readLock(); + auto stubs2 = borrowedTier2->lazyStubs().writeLock(); + + MOZ_ASSERT(stubs2->entryStubsEmpty()); + + Uint32Vector funcExportIndices; + for (size_t i = 0; i < metadataTier1.funcExports.length(); i++) { + const FuncExport& fe = metadataTier1.funcExports[i]; + if (fe.hasEagerStubs()) { + continue; + } + if (!stubs1->hasEntryStub(fe.funcIndex())) { + continue; + } + if (!funcExportIndices.emplaceBack(i)) { + return false; + } + } + + Maybe stub2Index; + if (!stubs2->createTier2(funcExportIndices, codeMeta(), *borrowedTier2, + &stub2Index)) { + return false; + } + + // Initializing the code above will have flushed the icache for all cores. + // However, there could still be stale data in the execution pipeline of + // other cores on some platforms. Force an execution context flush on all + // threads to fix this before we commit the code. + // + // This is safe due to the check in `PlatformCanTier` in WasmCompile.cpp + jit::FlushExecutionContextForAllThreads(); + + // Now that we can't fail or otherwise abort tier2, make it live. + + MOZ_ASSERT(!code().hasTier2()); + code().commitTier2(); + + stubs2->setJitEntries(stub2Index, code()); + } + + // And we update the jump vectors with pointers to tier-2 functions and eager + // stubs. Callers will continue to invoke tier-1 code until, suddenly, they + // will invoke tier-2 code. This is benign. + + uint8_t* base = code().segment(Tier::Optimized).base(); + for (const CodeRange& cr : metadata(Tier::Optimized).codeRanges) { + // These are racy writes that we just want to be visible, atomically, + // eventually. All hardware we care about will do this right. But + // we depend on the compiler not splitting the stores hidden inside the + // set*Entry functions. + if (cr.isFunction()) { + code().setTieringEntry(cr.funcIndex(), base + cr.funcTierEntry()); + } else if (cr.isJitEntry()) { + code().setJitEntry(cr.funcIndex(), base + cr.begin()); + } + } + // Tier-2 is done; let everyone know. Mark tier-2 active for testing // purposes so that wasmHasTier2CompilationCompleted() only returns true // after tier-2 has been fully cached. if (tier2Listener_) { Bytes bytes; - if (serialize(sharedStubsLinkData, linkData2, &bytes)) { + if (serialize(linkData2, &bytes)) { tier2Listener_->storeOptimizedEncoding(bytes.begin(), bytes.length()); } tier2Listener_ = nullptr; @@ -281,22 +359,21 @@ bool Module::extractCode(JSContext* cx, Tier tier, // block on tiered compilation to complete. testingBlockOnTier2Complete(); - if (!code_->hasCompleteTier(tier)) { + if (!code_->hasTier(tier)) { vp.setNull(); return true; } - const CodeBlock& codeBlock = code_->completeTierCodeBlock(tier); - const CodeSegment& codeSegment = *codeBlock.segment; - RootedObject codeObj(cx, JS_NewUint8Array(cx, codeSegment.lengthBytes())); - if (!codeObj) { + const ModuleSegment& moduleSegment = code_->segment(tier); + RootedObject code(cx, JS_NewUint8Array(cx, moduleSegment.length())); + if (!code) { return false; } - memcpy(codeObj->as().dataPointerUnshared(), - codeSegment.base(), codeSegment.lengthBytes()); + memcpy(code->as().dataPointerUnshared(), + moduleSegment.base(), moduleSegment.length()); - RootedValue value(cx, ObjectValue(*codeObj)); + RootedValue value(cx, ObjectValue(*code)); if (!JS_DefineProperty(cx, result, "code", value, JSPROP_ENUMERATE)) { return false; } @@ -306,7 +383,7 @@ bool Module::extractCode(JSContext* cx, Tier tier, return false; } - for (const CodeRange& p : codeBlock.codeRanges) { + for (const CodeRange& p : metadata(tier).codeRanges) { RootedObject segment(cx, NewPlainObjectWithProto(cx, nullptr)); if (!segment) { return false; @@ -378,14 +455,18 @@ static const Import& FindImportFunction(const ImportVector& imports, bool Module::instantiateFunctions(JSContext* cx, const JSObjectVector& funcImports) const { #ifdef DEBUG - MOZ_ASSERT(funcImports.length() == code().funcImports().length()); + for (auto t : code().tiers()) { + MOZ_ASSERT(funcImports.length() == metadata(t).funcImports.length()); + } #endif if (codeMeta().isAsmJS()) { return true; } - for (size_t i = 0; i < code().funcImports().length(); i++) { + Tier tier = code().stableTier(); + + for (size_t i = 0; i < metadata(tier).funcImports.length(); i++) { if (!funcImports[i]->is()) { continue; } @@ -397,10 +478,12 @@ bool Module::instantiateFunctions(JSContext* cx, uint32_t funcIndex = ExportedFunctionToFuncIndex(f); Instance& instance = ExportedFunctionToInstance(f); + Tier otherTier = instance.code().stableTier(); - const TypeDef& exportFuncType = - instance.code().getFuncExportTypeDef(funcIndex); - const TypeDef& importFuncType = code().getFuncImportTypeDef(i); + const TypeDef& exportFuncType = instance.codeMeta().getFuncExportTypeDef( + instance.metadata(otherTier).lookupFuncExport(funcIndex)); + const TypeDef& importFuncType = + codeMeta().getFuncImportTypeDef(metadata(tier).funcImports[i]); if (!TypeDef::isSubTypeOf(&exportFuncType, &importFuncType)) { const Import& import = FindImportFunction(moduleMeta().imports, i); diff --git a/js/src/wasm/WasmModule.h b/js/src/wasm/WasmModule.h index d5ea067e7cfbe..dbfbd05f2c7ff 100644 --- a/js/src/wasm/WasmModule.h +++ b/js/src/wasm/WasmModule.h @@ -75,8 +75,8 @@ struct ImportValues { // where the LinkData is also available, which is primarily (1) at the end of // module generation, (2) at the end of tier-2 compilation. // -// Fully linked-and-instantiated code (represented by SharedCode) can be shared -// between instances. +// Fully linked-and-instantiated code (represented by SharedCode and its owned +// ModuleSegment) can be shared between instances. class Module : public JS::WasmModule { // This has the same lifetime end as Module itself -- it can be dropped when @@ -154,14 +154,16 @@ class Module : public JS::WasmModule { ~Module() override; const Code& code() const { return *code_; } + const ModuleSegment& moduleSegment(Tier t) const { return code_->segment(t); } const ModuleMetadata& moduleMeta() const { return *moduleMeta_; } const CodeMetadata& codeMeta() const { return code_->codeMeta(); } const CodeMetadataForAsmJS* codeMetaForAsmJS() const { return code_->codeMetaForAsmJS(); } + const MetadataTier& metadata(Tier t) const { return code_->metadata(t); } const CustomSectionVector& customSections() const { return customSections_; } const Bytes& debugBytecode() const { return debugBytecode_->bytes; } - uint32_t tier1CodeMemoryUsed() const { return code_->tier1CodeMemoryUsed(); } + uint32_t codeLength(Tier t) const { return code_->segment(t).length(); } // Instantiate this module with the given imports: @@ -176,17 +178,14 @@ class Module : public JS::WasmModule { void startTier2(const CompileArgs& args, const ShareableBytes& bytecode, JS::OptimizedEncodingListener* listener); - bool finishTier2(const LinkData& sharedStubsLinkData, - const LinkData& linkData2, UniqueCodeBlock code2) const; + bool finishTier2(const LinkData& linkData2, UniqueCodeTier code2) const; void testingBlockOnTier2Complete() const; bool testingTier2Active() const { return testingTier2Active_; } // Code caching support. - [[nodiscard]] bool serialize(const LinkData& sharedStubsLinkData, - const LinkData& optimizedLinkData, - Bytes* bytes) const; + [[nodiscard]] bool serialize(const LinkData& linkData, Bytes* bytes) const; static RefPtr deserialize(const uint8_t* begin, size_t size); bool loggingDeserialized() const { return loggingDeserialized_; } @@ -213,9 +212,7 @@ class Module : public JS::WasmModule { bool extractCode(JSContext* cx, Tier tier, MutableHandleValue vp) const; - WASM_DECLARE_FRIEND_SERIALIZE_ARGS(Module, - const wasm::LinkData& sharedStubsLinkData, - const wasm::LinkData& optimizedLinkData); + WASM_DECLARE_FRIEND_SERIALIZE_ARGS(Module, const wasm::LinkData& linkData); }; using MutableModule = RefPtr; diff --git a/js/src/wasm/WasmPI.cpp b/js/src/wasm/WasmPI.cpp index 97dd17480f0cc..98050e4cc1791 100644 --- a/js/src/wasm/WasmPI.cpp +++ b/js/src/wasm/WasmPI.cpp @@ -1752,8 +1752,12 @@ JSObject* GetSuspendingPromiseResult(Instance* instance, // The struct object is constructed based on returns of exported function. // It is the only way we can get ValType for Val::fromJSValue call. - const wasm::FuncType& sig = instance->code().getFuncExportType( - SuspendingFunctionModuleFactory::ExportedFnIndex); + auto bestTier = instance->code().bestTier(); + const wasm::FuncExport& funcExport = + instance->metadata(bestTier).lookupFuncExport( + SuspendingFunctionModuleFactory::ExportedFnIndex); + const wasm::FuncType& sig = + instance->codeMeta().getFuncExportType(funcExport); if (fields.length() == 1) { RootedVal val(cx); diff --git a/js/src/wasm/WasmProcess.cpp b/js/src/wasm/WasmProcess.cpp index 1f865d44185a4..0436e0a23f073 100644 --- a/js/src/wasm/WasmProcess.cpp +++ b/js/src/wasm/WasmProcess.cpp @@ -20,6 +20,7 @@ #include "mozilla/Attributes.h" #include "mozilla/BinarySearch.h" +#include "mozilla/ScopeExit.h" #include "gc/Memory.h" #include "threading/ExclusiveData.h" @@ -37,77 +38,253 @@ using namespace wasm; using mozilla::BinarySearchIf; -Atomic wasm::CodeExists(false); - -// Per-process map from values of program-counter (pc) to CodeBlocks. +// Per-process map from values of program-counter (pc) to CodeSegments. // -// Whenever a new CodeBlock is ready to use, it has to be registered so that -// we can have fast lookups from pc to CodeBlocks in numerous places. Since +// Whenever a new CodeSegment is ready to use, it has to be registered so that +// we can have fast lookups from pc to CodeSegments in numerous places. Since // wasm compilation may be tiered, and the second tier doesn't have access to // any JSContext/JS::Compartment/etc lying around, we have to use a process-wide // map instead. -// This field is only atomic to handle buggy scenarios where we crash during -// startup or shutdown and thus racily perform wasm::LookupCodeBlock() from -// the crashing thread. +using CodeSegmentVector = Vector; -static Atomic sThreadSafeCodeBlockMap(nullptr); +Atomic wasm::CodeExists(false); -bool wasm::RegisterCodeBlock(const CodeBlock* cs) { - MOZ_ASSERT(cs->code->initialized()); +// Because of profiling, the thread running wasm might need to know to which +// CodeSegment the current PC belongs, during a call to lookup(). A lookup +// is a read-only operation, and we don't want to take a lock then +// (otherwise, we could have a deadlock situation if an async lookup +// happened on a given thread that was holding mutatorsMutex_ while getting +// sampled). Since the writer could be modifying the data that is getting +// looked up, the writer functions use spin-locks to know if there are any +// observers (i.e. calls to lookup()) of the atomic data. + +static Atomic sNumActiveLookups(0); + +class ProcessCodeSegmentMap { + // Since writes (insertions or removals) can happen on any background + // thread at the same time, we need a lock here. + + Mutex mutatorsMutex_ MOZ_UNANNOTATED; + + CodeSegmentVector segments1_; + CodeSegmentVector segments2_; + + // Except during swapAndWait(), there are no lookup() observers of the + // vector pointed to by mutableCodeSegments_ + + CodeSegmentVector* mutableCodeSegments_; + Atomic readonlyCodeSegments_; + + struct CodeSegmentPC { + const void* pc; + explicit CodeSegmentPC(const void* pc) : pc(pc) {} + int operator()(const CodeSegment* cs) const { + if (cs->containsCodePC(pc)) { + return 0; + } + if (pc < cs->base()) { + return -1; + } + return 1; + } + }; + + void swapAndWait() { + // Both vectors are consistent for lookup at this point although their + // contents are different: there is no way for the looked up PC to be + // in the code segment that is getting registered, because the code + // segment is not even fully created yet. + + // If a lookup happens before this instruction, then the + // soon-to-become-former read-only pointer is used during the lookup, + // which is valid. + + mutableCodeSegments_ = const_cast( + readonlyCodeSegments_.exchange(mutableCodeSegments_)); + + // If a lookup happens after this instruction, then the updated vector + // is used, which is valid: + // - in case of insertion, it means the new vector contains more data, + // but it's fine since the code segment is getting registered and thus + // isn't even fully created yet, so the code can't be running. + // - in case of removal, it means the new vector contains one less + // entry, but it's fine since unregistering means the code segment + // isn't used by any live instance anymore, thus PC can't be in the + // to-be-removed code segment's range. + + // A lookup could have happened on any of the two vectors. Wait for + // observers to be done using any vector before mutating. + + while (sNumActiveLookups > 0) { + } + } - if (cs->length() == 0) { - return true; + public: + ProcessCodeSegmentMap() + : mutatorsMutex_(mutexid::WasmCodeSegmentMap), + mutableCodeSegments_(&segments1_), + readonlyCodeSegments_(&segments2_) {} + + ~ProcessCodeSegmentMap() { + MOZ_RELEASE_ASSERT(sNumActiveLookups == 0); + MOZ_ASSERT(segments1_.empty()); + MOZ_ASSERT(segments2_.empty()); + segments1_.clearAndFree(); + segments2_.clearAndFree(); } - // This function cannot race with startup/shutdown. - ThreadSafeCodeBlockMap* map = sThreadSafeCodeBlockMap; - MOZ_RELEASE_ASSERT(map); - bool result = map->insert(cs); - if (result) { + bool insert(const CodeSegment* cs) { + LockGuard lock(mutatorsMutex_); + + size_t index; + MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeSegments_, 0, + mutableCodeSegments_->length(), + CodeSegmentPC(cs->base()), &index)); + + if (!mutableCodeSegments_->insert(mutableCodeSegments_->begin() + index, + cs)) { + return false; + } + CodeExists = true; + + swapAndWait(); + +#ifdef DEBUG + size_t otherIndex; + MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeSegments_, 0, + mutableCodeSegments_->length(), + CodeSegmentPC(cs->base()), &otherIndex)); + MOZ_ASSERT(index == otherIndex); +#endif + + // Although we could simply revert the insertion in the read-only + // vector, it is simpler to just crash and given that each CodeSegment + // consumes multiple pages, it is unlikely this insert() would OOM in + // practice + AutoEnterOOMUnsafeRegion oom; + if (!mutableCodeSegments_->insert(mutableCodeSegments_->begin() + index, + cs)) { + oom.crash("when inserting a CodeSegment in the process-wide map"); + } + + return true; } - return result; -} -void wasm::UnregisterCodeBlock(const CodeBlock* cs) { - if (cs->length() == 0) { - return; + void remove(const CodeSegment* cs) { + LockGuard lock(mutatorsMutex_); + + size_t index; + MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeSegments_, 0, + mutableCodeSegments_->length(), + CodeSegmentPC(cs->base()), &index)); + + mutableCodeSegments_->erase(mutableCodeSegments_->begin() + index); + + if (!mutableCodeSegments_->length()) { + CodeExists = false; + } + + swapAndWait(); + +#ifdef DEBUG + size_t otherIndex; + MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeSegments_, 0, + mutableCodeSegments_->length(), + CodeSegmentPC(cs->base()), &otherIndex)); + MOZ_ASSERT(index == otherIndex); +#endif + + mutableCodeSegments_->erase(mutableCodeSegments_->begin() + index); + } + + const CodeSegment* lookup(const void* pc) { + const CodeSegmentVector* readonly = readonlyCodeSegments_; + + size_t index; + if (!BinarySearchIf(*readonly, 0, readonly->length(), CodeSegmentPC(pc), + &index)) { + return nullptr; + } + + // It is fine returning a raw CodeSegment*, because we assume we are + // looking up a live PC in code which is on the stack, keeping the + // CodeSegment alive. + + return (*readonly)[index]; } +}; + +// This field is only atomic to handle buggy scenarios where we crash during +// startup or shutdown and thus racily perform wasm::LookupCodeSegment() from +// the crashing thread. + +static Atomic sProcessCodeSegmentMap(nullptr); + +bool wasm::RegisterCodeSegment(const CodeSegment* cs) { + MOZ_ASSERT(cs->codeTier().code().initialized()); // This function cannot race with startup/shutdown. - ThreadSafeCodeBlockMap* map = sThreadSafeCodeBlockMap; + ProcessCodeSegmentMap* map = sProcessCodeSegmentMap; MOZ_RELEASE_ASSERT(map); - size_t newCount = map->remove(cs); - if (newCount == 0) { - CodeExists = false; - } + return map->insert(cs); +} + +void wasm::UnregisterCodeSegment(const CodeSegment* cs) { + // This function cannot race with startup/shutdown. + ProcessCodeSegmentMap* map = sProcessCodeSegmentMap; + MOZ_RELEASE_ASSERT(map); + map->remove(cs); } -const CodeBlock* wasm::LookupCodeBlock( +const CodeSegment* wasm::LookupCodeSegment( const void* pc, const CodeRange** codeRange /*= nullptr */) { - ThreadSafeCodeBlockMap* map = sThreadSafeCodeBlockMap; + // Since wasm::LookupCodeSegment() can race with wasm::ShutDown(), we must + // additionally keep sNumActiveLookups above zero for the duration we're + // using the ProcessCodeSegmentMap. wasm::ShutDown() spin-waits on + // sNumActiveLookups getting to zero. + + auto decObserver = mozilla::MakeScopeExit([&] { + MOZ_ASSERT(sNumActiveLookups > 0); + sNumActiveLookups--; + }); + sNumActiveLookups++; + + ProcessCodeSegmentMap* map = sProcessCodeSegmentMap; if (!map) { return nullptr; } - return map->lookup(pc, codeRange); + if (const CodeSegment* found = map->lookup(pc)) { + if (codeRange) { + *codeRange = found->isModule() ? found->asModule()->lookupRange(pc) + : found->asLazyStub()->lookupRange(pc); + } + return found; + } + + if (codeRange) { + *codeRange = nullptr; + } + + return nullptr; } const Code* wasm::LookupCode(const void* pc, const CodeRange** codeRange /* = nullptr */) { - const CodeBlock* found = LookupCodeBlock(pc, codeRange); + const CodeSegment* found = LookupCodeSegment(pc, codeRange); MOZ_ASSERT_IF(!found && codeRange, !*codeRange); - return found ? found->code : nullptr; + return found ? &found->code() : nullptr; } bool wasm::InCompiledCode(void* pc) { - if (LookupCodeBlock(pc)) { + if (LookupCodeSegment(pc)) { return true; } const CodeRange* codeRange; - const uint8_t* codeBase; + uint8_t* codeBase; return LookupBuiltinThunk(pc, &codeRange, &codeBase); } @@ -247,7 +424,7 @@ static bool InitTagForJSValue() { } bool wasm::Init() { - MOZ_RELEASE_ASSERT(!sThreadSafeCodeBlockMap); + MOZ_RELEASE_ASSERT(!sProcessCodeSegmentMap); // Assert invariants that should universally hold true, but cannot be checked // at compile time. @@ -258,7 +435,7 @@ bool wasm::Init() { ConfigureHugeMemory(); AutoEnterOOMUnsafeRegion oomUnsafe; - ThreadSafeCodeBlockMap* map = js_new(); + ProcessCodeSegmentMap* map = js_new(); if (!map) { oomUnsafe.crash("js::wasm::Init"); } @@ -272,7 +449,7 @@ bool wasm::Init() { oomUnsafe.crash("js::wasm::Init"); } - sThreadSafeCodeBlockMap = map; + sProcessCodeSegmentMap = map; if (!InitTagForJSValue()) { oomUnsafe.crash("js::wasm::Init"); @@ -298,12 +475,12 @@ void wasm::ShutDown() { sWrappedJSValueTagType = nullptr; } - // After signalling shutdown by clearing sThreadSafeCodeBlockMap, wait for - // concurrent wasm::LookupCodeBlock()s to finish. - ThreadSafeCodeBlockMap* map = sThreadSafeCodeBlockMap; + // After signalling shutdown by clearing sProcessCodeSegmentMap, wait for + // concurrent wasm::LookupCodeSegment()s to finish. + ProcessCodeSegmentMap* map = sProcessCodeSegmentMap; MOZ_RELEASE_ASSERT(map); - sThreadSafeCodeBlockMap = nullptr; - while (map->numActiveLookups() > 0) { + sProcessCodeSegmentMap = nullptr; + while (sNumActiveLookups > 0) { } ReleaseBuiltinThunks(); diff --git a/js/src/wasm/WasmProcess.h b/js/src/wasm/WasmProcess.h index 144d542b8d52f..e107e8917a2c5 100644 --- a/js/src/wasm/WasmProcess.h +++ b/js/src/wasm/WasmProcess.h @@ -28,18 +28,18 @@ namespace wasm { class Code; class CodeRange; -class CodeBlock; +class CodeSegment; class TagType; extern const TagType* sWrappedJSValueTagType; static constexpr uint32_t WrappedJSValueTagType_ValueOffset = 0; -// These methods return the wasm::CodeBlock (resp. wasm::Code) containing +// These methods return the wasm::CodeSegment (resp. wasm::Code) containing // the given pc, if any exist in the process. These methods do not take a lock, // and thus are safe to use in a profiling context. -const CodeBlock* LookupCodeBlock(const void* pc, - const CodeRange** codeRange = nullptr); +const CodeSegment* LookupCodeSegment(const void* pc, + const CodeRange** codeRange = nullptr); const Code* LookupCode(const void* pc, const CodeRange** codeRange = nullptr); @@ -52,12 +52,12 @@ bool InCompiledCode(void* pc); extern mozilla::Atomic CodeExists; -// These methods allow to (un)register CodeBlocks so they can be looked up +// These methods allow to (un)register CodeSegments so they can be looked up // via pc in the methods described above. -bool RegisterCodeBlock(const CodeBlock* cs); +bool RegisterCodeSegment(const CodeSegment* cs); -void UnregisterCodeBlock(const CodeBlock* cs); +void UnregisterCodeSegment(const CodeSegment* cs); // Whether this process is configured to use huge memory or not. Note that this // is not precise enough to tell whether a particular memory uses huge memory, diff --git a/js/src/wasm/WasmRealm.cpp b/js/src/wasm/WasmRealm.cpp index 6c9a5a9f0d36e..61d8b96a44415 100644 --- a/js/src/wasm/WasmRealm.cpp +++ b/js/src/wasm/WasmRealm.cpp @@ -42,7 +42,22 @@ struct InstanceComparator { if (instance == &target) { return 0; } - return instance < &target ? -1 : 1; + + // Instances can share code, so the segments can be equal (though they + // can't partially overlap). If the codeBases are equal, we sort by + // Instance address. Thus a Code may map to many instances. + + // Compare by the first tier, always. + + Tier instanceTier = instance->code().stableTier(); + Tier targetTier = target.code().stableTier(); + + if (instance->codeBase(instanceTier) == target.codeBase(targetTier)) { + return instance < &target ? -1 : 1; + } + + return target.codeBase(targetTier) < instance->codeBase(instanceTier) ? -1 + : 1; } }; diff --git a/js/src/wasm/WasmSerialize.cpp b/js/src/wasm/WasmSerialize.cpp index 4acf6b69519e6..b312f436926fa 100644 --- a/js/src/wasm/WasmSerialize.cpp +++ b/js/src/wasm/WasmSerialize.cpp @@ -121,14 +121,6 @@ CoderResult Coder::readBytes(void* dest, size_t length) { return Ok(); } -CoderResult Coder::readBytesRef(size_t length, - const uint8_t** bytesBegin) { - MOZ_RELEASE_ASSERT(buffer_ + length <= end_); - *bytesBegin = buffer_; - buffer_ += length; - return Ok(); -} - // Cacheable POD coding functions template @@ -970,31 +963,39 @@ CoderResult CodeLinkData(Coder& coder, // often. Exclude symbolicLinks field from trip wire value calculation. WASM_VERIFY_SERIALIZATION_FOR_SIZE( wasm::LinkData, 48 + sizeof(wasm::LinkData::SymbolicLinkArray)); + if constexpr (mode == MODE_ENCODE) { + MOZ_ASSERT(item->tier == Tier::Serialized); + } MOZ_TRY(CodePod(coder, &item->pod())); MOZ_TRY(CodePodVector(coder, &item->internalLinks)); MOZ_TRY(CodeSymbolicLinkArray(coder, &item->symbolicLinks)); return Ok(); } -CoderResult CodeCodeSegment(Coder& coder, - wasm::SharedCodeSegment* item, - const wasm::LinkData& linkData, - wasm::CodeBlock* maybeSharedStubs) { - WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CodeSegment, 40); - // Assert we're decoding a CodeSegment - MOZ_TRY(Magic(coder, Marker::CodeSegment)); +CoderResult CodeModuleSegment(Coder& coder, + wasm::UniqueModuleSegment* item, + const wasm::LinkData& linkData) { + WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::ModuleSegment, 48); + // Assert we're decoding a ModuleSegment + MOZ_TRY(Magic(coder, Marker::ModuleSegment)); // Decode the code bytes length size_t length; MOZ_TRY(CodePod(coder, &length)); + // Allocate the code bytes + Maybe writable; + UniqueCodeBytes bytes = AllocateCodeBytes(writable, length); + if (!bytes) { + return Err(OutOfMemory()); + } + // Decode the code bytes - const uint8_t* codeBytes; - MOZ_TRY(coder.readBytesRef(length, &codeBytes)); + MOZ_TRY(coder.readBytes(bytes.get(), length)); - // Initialize the CodeSegment - *item = CodeSegment::createFromBytes(codeBytes, length, linkData, - maybeSharedStubs); + // Initialize the ModuleSegment + *item = js::MakeUnique(Tier::Serialized, std::move(bytes), + length, linkData); if (!*item) { return Err(OutOfMemory()); } @@ -1002,17 +1003,18 @@ CoderResult CodeCodeSegment(Coder& coder, } template -CoderResult CodeCodeSegment(Coder& coder, - CoderArg item, - const wasm::LinkData& linkData) { - WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CodeSegment, 40); +CoderResult CodeModuleSegment(Coder& coder, + CoderArg item, + const wasm::LinkData& linkData) { + WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::ModuleSegment, 48); STATIC_ASSERT_ENCODING_OR_SIZING; + MOZ_ASSERT((*item)->tier() == Tier::Serialized); - // Mark that we're encoding a CodeSegment - MOZ_TRY(Magic(coder, Marker::CodeSegment)); + // Mark that we're encoding a ModuleSegment + MOZ_TRY(Magic(coder, Marker::ModuleSegment)); // Encode the length - size_t length = (*item)->lengthBytes(); + size_t length = (*item)->length(); MOZ_TRY(CodePod(coder, &length)); if constexpr (mode == MODE_SIZE) { @@ -1031,6 +1033,24 @@ CoderResult CodeCodeSegment(Coder& coder, return Ok(); } +template +CoderResult CodeMetadataTier(Coder& coder, + CoderArg item, + const uint8_t* codeStart) { + WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::MetadataTier, 896); + MOZ_TRY(Magic(coder, Marker::MetadataTier)); + MOZ_TRY(CodePodVector(coder, &item->funcToCodeRange)); + MOZ_TRY(CodePodVector(coder, &item->codeRanges)); + MOZ_TRY(CodePodVector(coder, &item->callSites)); + MOZ_TRY(CodeTrapSiteVectorArray(coder, &item->trapSites)); + MOZ_TRY(CodePodVector(coder, &item->funcImports)); + MOZ_TRY(CodePodVector(coder, &item->funcExports)); + MOZ_TRY(CodeStackMaps(coder, &item->stackMaps, codeStart)); + MOZ_TRY(CodePodVector(coder, &item->tryNotes)); + MOZ_TRY(CodePodVector(coder, &item->codeRangeUnwindInfos)); + return Ok(); +} + // WasmMetadata.h template @@ -1116,79 +1136,45 @@ CoderResult CodeCodeMetadata(Coder& coder, // WasmCode.h -template -CoderResult CodeFuncToCodeRangeMap( - Coder& coder, CoderArg item) { - WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::FuncToCodeRangeMap, 80); - MOZ_TRY(CodePod(coder, &item->startFuncIndex_)); - MOZ_TRY(CodePodVector(coder, &item->funcToCodeRange_)); - return Ok(); -} - -CoderResult CodeCodeBlock(Coder& coder, - wasm::UniqueCodeBlock* item, - const wasm::LinkData& linkData, - wasm::CodeBlock* maybeSharedStubs) { - WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CodeBlock, 896); - *item = js::MakeUnique(CodeBlock::kindFromTier(Tier::Serialized)); +CoderResult CodeCodeTier(Coder& coder, wasm::UniqueCodeTier* item, + const wasm::LinkData& linkData) { + WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CodeTier, 248); + UniqueMetadataTier metadata; + UniqueModuleSegment segment; + MOZ_TRY(Magic(coder, Marker::CodeTier)); + MOZ_TRY(CodeModuleSegment(coder, &segment, linkData)); + MOZ_TRY((CodeUniquePtr( + coder, &metadata, &CodeMetadataTier, segment->base()))); + *item = js::MakeUnique(std::move(metadata), std::move(segment)); if (!*item) { return Err(OutOfMemory()); } - MOZ_TRY(Magic(coder, Marker::CodeBlock)); - SharedCodeSegment codeSegment; - MOZ_TRY(CodeCodeSegment(coder, &codeSegment, linkData, maybeSharedStubs)); - (*item)->segment = codeSegment; - (*item)->codeBase = codeSegment->base(); - (*item)->codeLength = codeSegment->lengthBytes(); - MOZ_TRY(CodeFuncToCodeRangeMap(coder, &(*item)->funcToCodeRange)); - MOZ_TRY(CodePodVector(coder, &(*item)->codeRanges)); - MOZ_TRY(CodePodVector(coder, &(*item)->callSites)); - MOZ_TRY(CodeTrapSiteVectorArray(coder, &(*item)->trapSites)); - MOZ_TRY(CodePodVector(coder, &(*item)->funcExports)); - MOZ_TRY(CodeStackMaps(coder, &(*item)->stackMaps, (*item)->segment->base())); - MOZ_TRY(CodePodVector(coder, &(*item)->tryNotes)); - MOZ_TRY(CodePodVector(coder, &(*item)->codeRangeUnwindInfos)); return Ok(); } template -CoderResult CodeCodeBlock(Coder& coder, - CoderArg item, - const wasm::LinkData& linkData) { - WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CodeBlock, 896); +CoderResult CodeCodeTier(Coder& coder, + CoderArg item, + const wasm::LinkData& linkData) { + WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CodeTier, 248); STATIC_ASSERT_ENCODING_OR_SIZING; - MOZ_TRY(Magic(coder, Marker::CodeBlock)); - // We don't support serializing sub-ranges yet. These only happen with - // lazy stubs, which we don't serialize. - MOZ_ASSERT(item->codeBase == item->segment->base() && - item->codeLength == item->segment->lengthBytes()); - MOZ_TRY(CodeCodeSegment(coder, &item->segment, linkData)); - MOZ_TRY(CodeFuncToCodeRangeMap(coder, &item->funcToCodeRange)); - MOZ_TRY(CodePodVector(coder, &item->codeRanges)); - MOZ_TRY(CodePodVector(coder, &item->callSites)); - MOZ_TRY(CodeTrapSiteVectorArray(coder, &item->trapSites)); - MOZ_TRY(CodePodVector(coder, &item->funcExports)); - MOZ_TRY(CodeStackMaps(coder, &item->stackMaps, item->segment->base())); - MOZ_TRY(CodePodVector(coder, &item->tryNotes)); - MOZ_TRY(CodePodVector(coder, &item->codeRangeUnwindInfos)); + MOZ_TRY(Magic(coder, Marker::CodeTier)); + MOZ_TRY(CodeModuleSegment(coder, &item->segment_, linkData)); + MOZ_TRY((CodeUniquePtr(coder, &item->metadata_, + &CodeMetadataTier, + item->segment_->base()))); return Ok(); } CoderResult CodeSharedCode(Coder& coder, wasm::SharedCode* item, - const wasm::LinkData& sharedStubsLinkData, - const wasm::LinkData& optimizedLinkData, + const wasm::LinkData& linkData, const wasm::CustomSectionVector& customSections) { - WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::Code, 704); + WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::Code, 200); MutableCodeMetadata codeMeta; - UniqueCodeBlock sharedStubs; - UniqueCodeBlock optimizedCode; - FuncImportVector funcImports; + UniqueCodeTier codeTier; MOZ_TRY((CodeRefPtr( coder, &codeMeta))); - MOZ_TRY(CodePodVector(coder, &funcImports)); - MOZ_TRY(CodeCodeBlock(coder, &sharedStubs, sharedStubsLinkData, nullptr)); - MOZ_TRY(CodeCodeBlock(coder, &optimizedCode, optimizedLinkData, - sharedStubs.get())); + MOZ_TRY(CodeCodeTier(coder, &codeTier, linkData)); // Initialize metadata's name payload from the custom section if (codeMeta->nameCustomSectionIndex) { @@ -1199,12 +1185,17 @@ CoderResult CodeSharedCode(Coder& coder, wasm::SharedCode* item, MOZ_RELEASE_ASSERT(codeMeta->funcNames.empty()); } + // Initialize the jump tables + JumpTables jumpTables; + if (!jumpTables.init(CompileMode::Once, codeTier->segment(), + codeTier->metadata().codeRanges)) { + return Err(OutOfMemory()); + } + // Create and initialize the code - MutableCode code = - js_new(CompileMode::Once, *codeMeta, /*codeMetaForAsmJS=*/nullptr); - if (!code || - !code->initialize(std::move(funcImports), std::move(sharedStubs), - sharedStubsLinkData, std::move(optimizedCode))) { + MutableCode code = js_new(*codeMeta, /*codeMetaForAsmJS=*/nullptr, + std::move(codeTier), std::move(jumpTables)); + if (!code || !code->initialize(linkData)) { return Err(OutOfMemory()); } *item = code; @@ -1214,17 +1205,12 @@ CoderResult CodeSharedCode(Coder& coder, wasm::SharedCode* item, template CoderResult CodeSharedCode(Coder& coder, CoderArg item, - const wasm::LinkData& sharedStubsLinkData, - const wasm::LinkData& optimizedLinkData) { - WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::Code, 704); + const wasm::LinkData& linkData) { + WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::Code, 200); STATIC_ASSERT_ENCODING_OR_SIZING; MOZ_TRY((CodeRefPtr( coder, &(*item)->codeMeta_))); - MOZ_TRY(CodePodVector(coder, &(*item)->funcImports())); - MOZ_TRY(CodeCodeBlock(coder, &(*item)->sharedStubs(), sharedStubsLinkData)); - MOZ_TRY(CodeCodeBlock(coder, - &(*item)->completeTierCodeBlock(Tier::Serialized), - optimizedLinkData)); + MOZ_TRY(CodeCodeTier(coder, &(*item)->codeTier(Tier::Serialized), linkData)); return Ok(); } @@ -1247,16 +1233,13 @@ CoderResult CodeModule(Coder& coder, MutableModule* item) { (CodeVector>( coder, &customSections))); + LinkData linkData(Tier::Serialized); MOZ_TRY(Magic(coder, Marker::LinkData)); - LinkData sharedStubsLinkData; - LinkData optimizedLinkData; - MOZ_TRY(CodeLinkData(coder, &sharedStubsLinkData)); - MOZ_TRY(CodeLinkData(coder, &optimizedLinkData)); + MOZ_TRY(CodeLinkData(coder, &linkData)); SharedCode code; MOZ_TRY(Magic(coder, Marker::Code)); - MOZ_TRY(CodeSharedCode(coder, &code, sharedStubsLinkData, optimizedLinkData, - customSections)); + MOZ_TRY(CodeSharedCode(coder, &code, linkData, customSections)); MutableModuleMetadata moduleMeta; MOZ_TRY((CodeRefPtr( @@ -1285,12 +1268,11 @@ CoderResult CodeModule(Coder& coder, MutableModule* item) { template CoderResult CodeModule(Coder& coder, CoderArg item, - const wasm::LinkData& sharedStubsLinkData, - const wasm::LinkData& optimizedLinkData) { + const wasm::LinkData& linkData) { WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::Module, 184); STATIC_ASSERT_ENCODING_OR_SIZING; MOZ_RELEASE_ASSERT(!item->codeMeta().debugEnabled); - MOZ_RELEASE_ASSERT(item->code_->hasCompleteTier(Tier::Serialized)); + MOZ_RELEASE_ASSERT(item->code_->hasTier(Tier::Serialized)); JS::BuildIdCharVector currentBuildId; if (!GetOptimizedEncodingBuildId(¤tBuildId)) { @@ -1301,11 +1283,9 @@ CoderResult CodeModule(Coder& coder, CoderArg item, MOZ_TRY((CodeVector>( coder, &item->customSections_))); MOZ_TRY(Magic(coder, Marker::LinkData)); - MOZ_TRY(CodeLinkData(coder, &sharedStubsLinkData)); - MOZ_TRY(CodeLinkData(coder, &optimizedLinkData)); + MOZ_TRY(CodeLinkData(coder, &linkData)); MOZ_TRY(Magic(coder, Marker::Code)); - MOZ_TRY(CodeSharedCode(coder, &item->code_, sharedStubsLinkData, - optimizedLinkData)); + MOZ_TRY(CodeSharedCode(coder, &item->code_, linkData)); MOZ_TRY((CodeRefPtr( coder, &item->moduleMeta_))); MOZ_TRY(Magic(coder, Marker::DataSegments)); @@ -1322,12 +1302,10 @@ CoderResult CodeModule(Coder& coder, CoderArg item, } // namespace wasm } // namespace js -static bool GetSerializedSize(const Module& module, - const LinkData& sharedStubsLinkData, - const LinkData& optimizedLinkData, size_t* size) { +static bool GetSerializedSize(const Module& module, const LinkData& linkData, + size_t* size) { Coder coder(module.codeMeta().types.get()); - auto result = - CodeModule(coder, &module, sharedStubsLinkData, optimizedLinkData); + auto result = CodeModule(coder, &module, linkData); if (result.isErr()) { return false; } @@ -1335,14 +1313,12 @@ static bool GetSerializedSize(const Module& module, return true; } -bool Module::serialize(const LinkData& sharedStubsLinkData, - const LinkData& optimizedLinkData, Bytes* bytes) const { +bool Module::serialize(const LinkData& linkData, Bytes* bytes) const { MOZ_RELEASE_ASSERT(!codeMeta().debugEnabled); - MOZ_RELEASE_ASSERT(code_->hasCompleteTier(Tier::Serialized)); + MOZ_RELEASE_ASSERT(code_->hasTier(Tier::Serialized)); size_t serializedSize; - if (!GetSerializedSize(*this, sharedStubsLinkData, optimizedLinkData, - &serializedSize)) { + if (!GetSerializedSize(*this, linkData, &serializedSize)) { // An error is an overflow, return false return false; } @@ -1354,8 +1330,7 @@ bool Module::serialize(const LinkData& sharedStubsLinkData, Coder coder(codeMeta().types.get(), bytes->begin(), serializedSize); - CoderResult result = - CodeModule(coder, this, sharedStubsLinkData, optimizedLinkData); + CoderResult result = CodeModule(coder, this, linkData); if (result.isErr()) { // An error is an OOM, return false return false; diff --git a/js/src/wasm/WasmSerialize.h b/js/src/wasm/WasmSerialize.h index 7808115e633c4..918e50b42d2d3 100644 --- a/js/src/wasm/WasmSerialize.h +++ b/js/src/wasm/WasmSerialize.h @@ -180,7 +180,6 @@ struct Coder { const uint8_t* end_; CoderResult readBytes(void* dest, size_t length); - CoderResult readBytesRef(size_t length, const uint8_t** bytesBegin); }; // Macros to help types declare friendship with a coding function diff --git a/js/src/wasm/WasmSignalHandlers.cpp b/js/src/wasm/WasmSignalHandlers.cpp index b9c44e7a0eaf3..f838b7bec0e82 100644 --- a/js/src/wasm/WasmSignalHandlers.cpp +++ b/js/src/wasm/WasmSignalHandlers.cpp @@ -518,14 +518,16 @@ struct AutoHandlingTrap { MOZ_ASSERT(sAlreadyHandlingTrap.get()); uint8_t* pc = ContextToPC(context); - const CodeBlock* codeBlock = LookupCodeBlock(pc); - if (!codeBlock) { + const CodeSegment* codeSegment = LookupCodeSegment(pc); + if (!codeSegment || !codeSegment->isModule()) { return false; } + const ModuleSegment& segment = *codeSegment->asModule(); + Trap trap; BytecodeOffset bytecode; - if (!codeBlock->lookupTrap(pc, &trap, &bytecode)) { + if (!segment.code().lookupTrap(pc, &trap, &bytecode)) { return false; } @@ -537,7 +539,7 @@ struct AutoHandlingTrap { auto* frame = reinterpret_cast(ContextToFP(context)); Instance* instance = GetNearestEffectiveInstance(frame); - MOZ_RELEASE_ASSERT(&instance->code() == codeBlock->code || + MOZ_RELEASE_ASSERT(&instance->code() == &segment.code() || trap == Trap::IndirectCallBadSig); JSContext* cx = @@ -549,7 +551,7 @@ struct AutoHandlingTrap { // will call finishWasmTrap(). jit::JitActivation* activation = cx->activation()->asJit(); activation->startWasmTrap(trap, bytecode.offset(), ToRegisterState(context)); - SetContextPC(context, codeBlock->code->trapCode()); + SetContextPC(context, segment.trapCode()); return true; } @@ -981,14 +983,16 @@ bool wasm::MemoryAccessTraps(const RegisterState& regs, uint8_t* addr, #ifdef JS_CODEGEN_NONE return false; #else - const wasm::CodeBlock* codeBlock = wasm::LookupCodeBlock(regs.pc); - if (!codeBlock) { + const wasm::CodeSegment* codeSegment = wasm::LookupCodeSegment(regs.pc); + if (!codeSegment || !codeSegment->isModule()) { return false; } + const wasm::ModuleSegment& segment = *codeSegment->asModule(); + Trap trap; BytecodeOffset bytecode; - if (!codeBlock->code->lookupTrap(regs.pc, &trap, &bytecode)) { + if (!segment.code().lookupTrap(regs.pc, &trap, &bytecode)) { return false; } switch (trap) { @@ -1008,7 +1012,7 @@ bool wasm::MemoryAccessTraps(const RegisterState& regs, uint8_t* addr, const Instance& instance = *GetNearestEffectiveInstance(Frame::fromUntaggedWasmExitFP(regs.fp)); - MOZ_ASSERT(&instance.code() == codeBlock->code); + MOZ_ASSERT(&instance.code() == &segment.code()); switch (trap) { case Trap::OutOfBounds: @@ -1037,7 +1041,7 @@ bool wasm::MemoryAccessTraps(const RegisterState& regs, uint8_t* addr, JSContext* cx = TlsContext.get(); // Cold simulator helper function jit::JitActivation* activation = cx->activation()->asJit(); activation->startWasmTrap(trap, bytecode.offset(), regs); - *newPC = codeBlock->code->trapCode(); + *newPC = segment.trapCode(); return true; #endif } @@ -1047,21 +1051,23 @@ bool wasm::HandleIllegalInstruction(const RegisterState& regs, #ifdef JS_CODEGEN_NONE return false; #else - const wasm::CodeBlock* codeBlock = wasm::LookupCodeBlock(regs.pc); - if (!codeBlock) { + const wasm::CodeSegment* codeSegment = wasm::LookupCodeSegment(regs.pc); + if (!codeSegment || !codeSegment->isModule()) { return false; } + const wasm::ModuleSegment& segment = *codeSegment->asModule(); + Trap trap; BytecodeOffset bytecode; - if (!codeBlock->code->lookupTrap(regs.pc, &trap, &bytecode)) { + if (!segment.code().lookupTrap(regs.pc, &trap, &bytecode)) { return false; } JSContext* cx = TlsContext.get(); // Cold simulator helper function jit::JitActivation* activation = cx->activation()->asJit(); activation->startWasmTrap(trap, bytecode.offset(), regs); - *newPC = codeBlock->code->trapCode(); + *newPC = segment.trapCode(); return true; #endif } diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp index 50daea2b0a23b..54476582f65e4 100644 --- a/js/src/wasm/WasmStubs.cpp +++ b/js/src/wasm/WasmStubs.cpp @@ -1300,11 +1300,11 @@ static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex, // unify the BuiltinThunk's interface we call it here with wasm abi. jit::WasmABIArgIter argsIter(coerceArgTypes); - // argument 0: function index. + // argument 0: function export index. if (argsIter->kind() == ABIArg::GPR) { - masm.movePtr(ImmWord(fe.funcIndex()), argsIter->gpr()); + masm.movePtr(ImmWord(funcExportIndex), argsIter->gpr()); } else { - masm.storePtr(ImmWord(fe.funcIndex()), + masm.storePtr(ImmWord(funcExportIndex), Address(sp, argsIter->offsetFromArgBase())); } argsIter++; @@ -1354,7 +1354,7 @@ void wasm::GenerateDirectCallFromJit(MacroAssembler& masm, const FuncExport& fe, Register scratch, uint32_t* callOffset) { MOZ_ASSERT(!IsCompilingWasm()); - const FuncType& funcType = inst.code().getFuncExportType(fe); + const FuncType& funcType = inst.codeMeta().getFuncExportType(fe); size_t framePushedAtStart = masm.framePushed(); @@ -1506,9 +1506,10 @@ void wasm::GenerateDirectCallFromJit(MacroAssembler& masm, const FuncExport& fe, masm.loadWasmPinnedRegsFromInstance(); // Actual call. - const CodeBlock& codeBlock = inst.code().funcCodeBlock(fe.funcIndex()); - const CodeRange& codeRange = codeBlock.codeRange(fe); - void* callee = codeBlock.segment->base() + codeRange.funcUncheckedCallEntry(); + const CodeTier& codeTier = inst.code().codeTier(inst.code().bestTier()); + const MetadataTier& metadata = codeTier.metadata(); + const CodeRange& codeRange = metadata.codeRange(fe); + void* callee = codeTier.segment().base() + codeRange.funcUncheckedCallEntry(); masm.assertStackAlignment(WasmStackAlignment); MoveSPForJitABI(masm); @@ -1974,6 +1975,38 @@ static bool GenerateImportFunction(jit::MacroAssembler& masm, static const unsigned STUBS_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024; +bool wasm::GenerateImportFunctions(const CodeMetadata& codeMeta, + const FuncImportVector& imports, + CompiledCode* code) { + LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE); + TempAllocator alloc(&lifo); + WasmMacroAssembler masm(alloc); + + for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) { + const FuncImport& fi = imports[funcIndex]; + const FuncType& funcType = *codeMeta.funcs[funcIndex].type; + CallIndirectId callIndirectId = + CallIndirectId::forFunc(codeMeta, funcIndex); + + FuncOffsets offsets; + if (!GenerateImportFunction(masm, fi, funcType, callIndirectId, &offsets, + &code->stackMaps)) { + return false; + } + if (!code->codeRanges.emplaceBack(funcIndex, /* bytecodeOffset = */ 0, + offsets, /* hasUnwindInfo = */ false)) { + return false; + } + } + + masm.finish(); + if (masm.oom()) { + return false; + } + + return code->swap(masm); +} + // Generate a stub that is called via the internal ABI derived from the // signature of the import and calls into an appropriate callImport C++ // function, having boxed all the ABI arguments into a homogeneous Value array. @@ -2882,43 +2915,6 @@ static bool GenerateDebugTrapStub(MacroAssembler& masm, Label* throwLabel, return FinishOffsets(masm, offsets); } -bool wasm::GenerateEntryStubs(const CodeMetadata& codeMeta, - const FuncExportVector& exports, - CompiledCode* code) { - LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE); - TempAllocator alloc(&lifo); - JitContext jcx; - WasmMacroAssembler masm(alloc); - AutoCreatedBy acb(masm, "wasm::GenerateEntryStubs"); - - // Swap in already-allocated empty vectors to avoid malloc/free. - if (!code->swap(masm)) { - return false; - } - - JitSpew(JitSpew_Codegen, "# Emitting wasm export stubs"); - - Maybe noAbsolute; - for (size_t i = 0; i < exports.length(); i++) { - const FuncExport& fe = exports[i]; - const FuncType& funcType = (*codeMeta.types)[fe.typeIndex()].funcType(); - if (!fe.hasEagerStubs()) { - continue; - } - if (!GenerateEntryStubs(masm, i, fe, funcType, noAbsolute, - codeMeta.isAsmJS(), &code->codeRanges)) { - return false; - } - } - - masm.finish(); - if (masm.oom()) { - return false; - } - - return code->swap(masm); -} - bool wasm::GenerateEntryStubs(MacroAssembler& masm, size_t funcExportIndex, const FuncExport& fe, const FuncType& funcType, const Maybe& callee, bool isAsmJS, @@ -3008,20 +3004,6 @@ bool wasm::GenerateStubs(const CodeMetadata& codeMeta, const FuncImport& fi = imports[funcIndex]; const FuncType& funcType = *codeMeta.funcs[funcIndex].type; - CallIndirectId callIndirectId = - CallIndirectId::forFunc(codeMeta, funcIndex); - - FuncOffsets wrapperOffsets; - if (!GenerateImportFunction(masm, fi, funcType, callIndirectId, - &wrapperOffsets, &code->stackMaps)) { - return false; - } - if (!code->codeRanges.emplaceBack(funcIndex, /* bytecodeOffset = */ 0, - wrapperOffsets, - /* hasUnwindInfo = */ false)) { - return false; - } - CallableOffsets interpOffsets; if (!GenerateImportInterpExit(masm, fi, funcType, funcIndex, &throwLabel, &interpOffsets)) { @@ -3049,7 +3031,7 @@ bool wasm::GenerateStubs(const CodeMetadata& codeMeta, } } - JitSpew(JitSpew_Codegen, "# Emitting wasm entry stubs"); + JitSpew(JitSpew_Codegen, "# Emitting wasm export stubs"); Maybe noAbsolute; for (size_t i = 0; i < exports.length(); i++) { @@ -3064,7 +3046,7 @@ bool wasm::GenerateStubs(const CodeMetadata& codeMeta, } } - JitSpew(JitSpew_Codegen, "# Emitting wasm trap and throw stubs"); + JitSpew(JitSpew_Codegen, "# Emitting wasm exit stubs"); Offsets offsets; diff --git a/js/src/wasm/WasmStubs.h b/js/src/wasm/WasmStubs.h index 3041265fcc9ec..e58756fa37686 100644 --- a/js/src/wasm/WasmStubs.h +++ b/js/src/wasm/WasmStubs.h @@ -251,14 +251,14 @@ extern bool GenerateBuiltinThunk(jit::MacroAssembler& masm, ExitReason exitReason, void* funcPtr, CallableOffsets* offsets); +extern bool GenerateImportFunctions(const CodeMetadata& codeMeta, + const FuncImportVector& imports, + CompiledCode* code); + extern bool GenerateStubs(const CodeMetadata& codeMeta, const FuncImportVector& imports, const FuncExportVector& exports, CompiledCode* code); -extern bool GenerateEntryStubs(const CodeMetadata& codeMeta, - const FuncExportVector& exports, - CompiledCode* code); - extern bool GenerateEntryStubs(jit::MacroAssembler& masm, size_t funcExportIndex, const FuncExport& fe, const FuncType& funcType, diff --git a/js/src/wasm/WasmSummarizeInsn.h b/js/src/wasm/WasmSummarizeInsn.h index 5df96a4f5c850..93a82437e1d6c 100644 --- a/js/src/wasm/WasmSummarizeInsn.h +++ b/js/src/wasm/WasmSummarizeInsn.h @@ -22,7 +22,7 @@ namespace wasm { // must be the correct classification for the instruction. Return // `mozilla::Nothing` in case of doubt. // -// This function is only used by ModuleGenerator::finishCodeBlock to audit wasm +// This function is only used by ModuleGenerator::finishCodeTier to audit wasm // trap sites. So it doesn't need to handle the whole complexity of the // machine's instruction set. It only needs to handle the tiny sub-dialect // used by the trappable instructions we actually generate. diff --git a/js/src/wasm/WasmTable.cpp b/js/src/wasm/WasmTable.cpp index 6e3e1b92c696b..b1b228293bc29 100644 --- a/js/src/wasm/WasmTable.cpp +++ b/js/src/wasm/WasmTable.cpp @@ -188,10 +188,10 @@ void Table::setFuncRef(uint32_t index, JSFunction* fun) { // produce the same function object as was imported. WasmInstanceObject* instanceObj = ExportedFunctionToInstanceObject(fun); Instance& instance = instanceObj->instance(); - uint8_t* codeRangeBase; - const CodeRange* codeRange; - instanceObj->getExportedFunctionCodeRange(fun, &codeRange, &codeRangeBase); - void* code = codeRangeBase + codeRange->funcCheckedCallEntry(); + Tier tier = instance.code().bestTier(); + const CodeRange& calleeCodeRange = + instanceObj->getExportedFunctionCodeRange(fun, tier); + void* code = instance.codeBase(tier) + calleeCodeRange.funcCheckedCallEntry(); setFuncRef(index, code, &instance); } @@ -239,10 +239,11 @@ void Table::fillFuncRef(uint32_t index, uint32_t fillCount, FuncRef ref, #endif Instance& instance = instanceObj->instance(); - const CodeBlock& codeBlock = instance.code().funcCodeBlock(funcIndex); + Tier tier = instance.code().bestTier(); + const MetadataTier& metadata = instance.metadata(tier); const CodeRange& codeRange = - codeBlock.codeRange(codeBlock.lookupFuncExport(funcIndex)); - void* code = codeBlock.segment->base() + codeRange.funcCheckedCallEntry(); + metadata.codeRange(metadata.lookupFuncExport(funcIndex)); + void* code = instance.codeBase(tier) + codeRange.funcCheckedCallEntry(); for (uint32_t i = index, end = index + fillCount; i != end; i++) { setFuncRef(i, code, &instance); } diff --git a/js/src/wasm/WasmTypeDecls.h b/js/src/wasm/WasmTypeDecls.h index 5ed649df5c251..112a6ea8e9f7e 100644 --- a/js/src/wasm/WasmTypeDecls.h +++ b/js/src/wasm/WasmTypeDecls.h @@ -55,7 +55,9 @@ namespace wasm { struct ModuleMetadata; struct CodeMetadata; class CodeRange; -class CodeBlock; +class CodeTier; +class ModuleSegment; +struct MetadataTier; class Decoder; class GeneratedSourceMap; class Instance;