diff --git a/FEXCore/Source/Interface/Core/Frontend.cpp b/FEXCore/Source/Interface/Core/Frontend.cpp index f7b5c8c198..60a86d7b9d 100644 --- a/FEXCore/Source/Interface/Core/Frontend.cpp +++ b/FEXCore/Source/Interface/Core/Frontend.cpp @@ -20,8 +20,8 @@ desc: Extracts instruction & block meta info, frontend multiblock logic #include #include #include +#include #include -#include namespace FEXCore::Frontend { #include "Interface/Core/VSyscall/VSyscall.inc" @@ -1126,11 +1126,11 @@ void Decoder::DecodeInstructionsAtEntry(uint8_t const* _InstStream, uint64_t PC, // Entry is a jump target BlocksToDecode.emplace(PC); - uint64_t CurrentCodePage = PC & FHU::FEX_PAGE_MASK; + uint64_t CurrentCodePage = PC & FEXCore::Utils::FEX_PAGE_MASK; fextl::set CodePages = { CurrentCodePage }; - AddContainedCodePage(PC, CurrentCodePage, FHU::FEX_PAGE_SIZE); + AddContainedCodePage(PC, CurrentCodePage, FEXCore::Utils::FEX_PAGE_SIZE); if (MaxInst == 0) { MaxInst = CTX->Config.MaxInstPerBlock; @@ -1156,8 +1156,8 @@ void Decoder::DecodeInstructionsAtEntry(uint8_t const* _InstStream, uint64_t PC, auto OpMinAddress = RIPToDecode + PCOffset; auto OpMaxAddress = OpMinAddress + MAX_INST_SIZE; - auto OpMinPage = OpMinAddress & FHU::FEX_PAGE_MASK; - auto OpMaxPage = OpMaxAddress & FHU::FEX_PAGE_MASK; + auto OpMinPage = OpMinAddress & FEXCore::Utils::FEX_PAGE_MASK; + auto OpMaxPage = OpMaxAddress & FEXCore::Utils::FEX_PAGE_MASK; if (OpMinPage != CurrentCodePage) { CurrentCodePage = OpMinPage; @@ -1230,7 +1230,7 @@ void Decoder::DecodeInstructionsAtEntry(uint8_t const* _InstStream, uint64_t PC, } for (auto CodePage : CodePages) { - AddContainedCodePage(PC, CodePage, FHU::FEX_PAGE_SIZE); + AddContainedCodePage(PC, CodePage, FEXCore::Utils::FEX_PAGE_SIZE); } // sort for better branching diff --git a/FEXCore/Source/Interface/IR/Passes/RegisterAllocationPass.cpp b/FEXCore/Source/Interface/IR/Passes/RegisterAllocationPass.cpp index 3c6e826411..59532008f9 100644 --- a/FEXCore/Source/Interface/IR/Passes/RegisterAllocationPass.cpp +++ b/FEXCore/Source/Interface/IR/Passes/RegisterAllocationPass.cpp @@ -18,6 +18,7 @@ tags: ir|opts #include #include #include +#include #include #include #include @@ -25,7 +26,6 @@ tags: ir|opts #include #include -#include #include #include @@ -68,7 +68,7 @@ namespace { }; static_assert(sizeof(RegisterNode) == 128 * 4); - constexpr size_t REGISTER_NODES_PER_PAGE = FHU::FEX_PAGE_SIZE / sizeof(RegisterNode); + constexpr size_t REGISTER_NODES_PER_PAGE = FEXCore::Utils::FEX_PAGE_SIZE / sizeof(RegisterNode); struct RegisterSet { fextl::vector Classes; diff --git a/FEXCore/Source/Utils/Allocator.cpp b/FEXCore/Source/Utils/Allocator.cpp index 628d2f7689..16464be8d4 100644 --- a/FEXCore/Source/Utils/Allocator.cpp +++ b/FEXCore/Source/Utils/Allocator.cpp @@ -3,10 +3,10 @@ #include #include #include +#include #include #include #include -#include #include #include @@ -95,8 +95,8 @@ namespace FEXCore::Allocator { // Now allocate the next page after the sbrk address to ensure it can't grow. // In most cases at the start of `main` this will already be page aligned, which means subsequent `sbrk` // calls won't allocate any memory through that. - void* AlignedBRK = reinterpret_cast(FEXCore::AlignUp(reinterpret_cast(StartingSBRK), FHU::FEX_PAGE_SIZE)); - void *AfterBRK = mmap(AlignedBRK, FHU::FEX_PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE | MAP_NORESERVE, -1, 0); + void* AlignedBRK = reinterpret_cast(FEXCore::AlignUp(reinterpret_cast(StartingSBRK), FEXCore::Utils::FEX_PAGE_SIZE)); + void *AfterBRK = mmap(AlignedBRK, FEXCore::Utils::FEX_PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE | MAP_NORESERVE, -1, 0); if (AfterBRK == INVALID_PTR) { // Couldn't allocate the page after the aligned brk? This should never happen. // FEXCore::LogMan isn't configured yet so we just need to print the message. @@ -118,7 +118,7 @@ namespace FEXCore::Allocator { void ReenableSBRKAllocations(void* Ptr) { const void* INVALID_PTR = reinterpret_cast(~0ULL); if (Ptr != INVALID_PTR) { - munmap(Ptr, FHU::FEX_PAGE_SIZE); + munmap(Ptr, FEXCore::Utils::FEX_PAGE_SIZE); } } @@ -172,10 +172,10 @@ namespace FEXCore::Allocator { for (int i = 0; i < 64; ++i) { // Try grabbing a some of the top pages of the range // x86 allocates some high pages in the top end - void *Ptr = ::mmap(reinterpret_cast(Size - FHU::FEX_PAGE_SIZE * i), FHU::FEX_PAGE_SIZE, PROT_NONE, MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + void *Ptr = ::mmap(reinterpret_cast(Size - FEXCore::Utils::FEX_PAGE_SIZE * i), FEXCore::Utils::FEX_PAGE_SIZE, PROT_NONE, MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (Ptr != (void*)~0ULL) { - ::munmap(Ptr, FHU::FEX_PAGE_SIZE); - if (Ptr == (void*)(Size - FHU::FEX_PAGE_SIZE * i)) { + ::munmap(Ptr, FEXCore::Utils::FEX_PAGE_SIZE); + if (Ptr == (void*)(Size - FEXCore::Utils::FEX_PAGE_SIZE * i)) { return true; } } diff --git a/FEXCore/Source/Utils/Allocator/64BitAllocator.cpp b/FEXCore/Source/Utils/Allocator/64BitAllocator.cpp index 9c2d27133b..a2e097f4de 100644 --- a/FEXCore/Source/Utils/Allocator/64BitAllocator.cpp +++ b/FEXCore/Source/Utils/Allocator/64BitAllocator.cpp @@ -6,9 +6,9 @@ #include #include #include +#include #include #include -#include #include #include @@ -70,8 +70,8 @@ namespace Alloc::OSAllocator { // Lower bound is the starting of the range just past the lower 32bits constexpr static uintptr_t LOWER_BOUND = 0x1'0000'0000ULL; - uintptr_t UPPER_BOUND_PAGE = UPPER_BOUND / FHU::FEX_PAGE_SIZE; - constexpr static uintptr_t LOWER_BOUND_PAGE = LOWER_BOUND / FHU::FEX_PAGE_SIZE; + uintptr_t UPPER_BOUND_PAGE = UPPER_BOUND / FEXCore::Utils::FEX_PAGE_SIZE; + constexpr static uintptr_t LOWER_BOUND_PAGE = LOWER_BOUND / FEXCore::Utils::FEX_PAGE_SIZE; struct ReservedVMARegion { uintptr_t Base; @@ -114,22 +114,22 @@ namespace Alloc::OSAllocator { // 0x100'0000 Pages // 1 bit per page for tracking means 0x20'0000 (Pages / 8) bytes of flex space // Which is 2MB of tracking - uint64_t NumElements = (Size >> FHU::FEX_PAGE_SHIFT) * sizeof(FlexBitElementType); + uint64_t NumElements = (Size >> FEXCore::Utils::FEX_PAGE_SHIFT) * sizeof(FlexBitElementType); return sizeof(LiveVMARegion) + FEXCore::FlexBitSet::Size(NumElements); } static void InitializeVMARegionUsed(LiveVMARegion *Region, size_t AdditionalSize) { - size_t SizeOfLiveRegion = FEXCore::AlignUp(LiveVMARegion::GetSizeWithFlexSet(Region->SlabInfo->RegionSize), FHU::FEX_PAGE_SIZE); + size_t SizeOfLiveRegion = FEXCore::AlignUp(LiveVMARegion::GetSizeWithFlexSet(Region->SlabInfo->RegionSize), FEXCore::Utils::FEX_PAGE_SIZE); size_t SizePlusManagedData = SizeOfLiveRegion + AdditionalSize; Region->FreeSpace = Region->SlabInfo->RegionSize - SizePlusManagedData; - size_t NumManagedPages = SizePlusManagedData >> FHU::FEX_PAGE_SHIFT; - size_t ManagedSize = NumManagedPages << FHU::FEX_PAGE_SHIFT; + size_t NumManagedPages = SizePlusManagedData >> FEXCore::Utils::FEX_PAGE_SHIFT; + size_t ManagedSize = NumManagedPages << FEXCore::Utils::FEX_PAGE_SHIFT; // Use madvise to set the full tracking region to zero. // This ensures unused pages are zero, while not having the backing pages consuming memory. - ::madvise(Region->UsedPages.Memory + ManagedSize, (Region->SlabInfo->RegionSize >> FHU::FEX_PAGE_SHIFT) - ManagedSize, MADV_DONTNEED); + ::madvise(Region->UsedPages.Memory + ManagedSize, (Region->SlabInfo->RegionSize >> FEXCore::Utils::FEX_PAGE_SHIFT) - ManagedSize, MADV_DONTNEED); // Use madvise to claim WILLNEED on the beginning pages for initial state tracking. // Improves performance of the following MemClear by not doing a page level fault dance for data necessary to track >170TB of used pages. @@ -162,7 +162,7 @@ namespace Alloc::OSAllocator { ReservedRegions->erase(ReservedIterator); // mprotect the new region we've allocated - size_t SizeOfLiveRegion = FEXCore::AlignUp(LiveVMARegion::GetSizeWithFlexSet(ReservedRegion->RegionSize), FHU::FEX_PAGE_SIZE); + size_t SizeOfLiveRegion = FEXCore::AlignUp(LiveVMARegion::GetSizeWithFlexSet(ReservedRegion->RegionSize), FEXCore::Utils::FEX_PAGE_SIZE); size_t SizePlusManagedData = UsedSize + SizeOfLiveRegion; [[maybe_unused]] auto Res = mprotect(reinterpret_cast(ReservedRegion->Base), SizePlusManagedData, PROT_READ | PROT_WRITE); @@ -198,10 +198,10 @@ void OSAllocator_64Bit::DetermineVASize() { UPPER_BOUND = Size; #if _M_X86_64 // Last page cannot be allocated on x86 - UPPER_BOUND -= FHU::FEX_PAGE_SIZE; + UPPER_BOUND -= FEXCore::Utils::FEX_PAGE_SIZE; #endif - UPPER_BOUND_PAGE = UPPER_BOUND / FHU::FEX_PAGE_SIZE; + UPPER_BOUND_PAGE = UPPER_BOUND / FEXCore::Utils::FEX_PAGE_SIZE; } OSAllocator_64Bit::LiveVMARegion *OSAllocator_64Bit::FindLiveRegionForAddress(uintptr_t Addr, uintptr_t AddrEnd) { @@ -250,13 +250,13 @@ void *OSAllocator_64Bit::Mmap(void *addr, size_t length, int prot, int flags, in uint64_t Addr = reinterpret_cast(addr); // Addr must be page aligned - if (Addr & ~FHU::FEX_PAGE_MASK) { + if (Addr & ~FEXCore::Utils::FEX_PAGE_MASK) { return reinterpret_cast(-EINVAL); } // If FD is provided then offset must also be page aligned if (fd != -1 && - offset & ~FHU::FEX_PAGE_MASK) { + offset & ~FEXCore::Utils::FEX_PAGE_MASK) { return reinterpret_cast(-EINVAL); } @@ -266,10 +266,10 @@ void *OSAllocator_64Bit::Mmap(void *addr, size_t length, int prot, int flags, in } bool Fixed = (flags & MAP_FIXED) || (flags & MAP_FIXED_NOREPLACE); - length = FEXCore::AlignUp(length, FHU::FEX_PAGE_SIZE); + length = FEXCore::AlignUp(length, FEXCore::Utils::FEX_PAGE_SIZE); uint64_t AddrEnd = Addr + length; - size_t NumberOfPages = length / FHU::FEX_PAGE_SIZE; + size_t NumberOfPages = length / FEXCore::Utils::FEX_PAGE_SIZE; // This needs a mutex to be thread safe auto lk = FEXCore::GuardSignalDeferringSectionWithFallback(AllocationMutex, TLSThread); @@ -285,14 +285,14 @@ void *OSAllocator_64Bit::Mmap(void *addr, size_t length, int prot, int flags, in auto CheckIfRangeFits = [&AllocatedOffset](LiveVMARegion *Region, uint64_t length, int prot, int flags, int fd, off_t offset, uint64_t StartingPosition = 0) -> std::pair { uint64_t AllocatedPage{~0ULL}; - uint64_t NumberOfPages = length >> FHU::FEX_PAGE_SHIFT; + uint64_t NumberOfPages = length >> FEXCore::Utils::FEX_PAGE_SHIFT; if (Region->FreeSpace >= length) { uint64_t LastAllocation = StartingPosition ? - (StartingPosition - Region->SlabInfo->Base) >> FHU::FEX_PAGE_SHIFT + (StartingPosition - Region->SlabInfo->Base) >> FEXCore::Utils::FEX_PAGE_SHIFT : Region->LastPageAllocation; - size_t RegionNumberOfPages = Region->SlabInfo->RegionSize >> FHU::FEX_PAGE_SHIFT; + size_t RegionNumberOfPages = Region->SlabInfo->RegionSize >> FEXCore::Utils::FEX_PAGE_SHIFT; if (Region->HadMunmap) { @@ -317,7 +317,7 @@ void *OSAllocator_64Bit::Mmap(void *addr, size_t length, int prot, int flags, in } if (AllocatedPage != ~0ULL) { - AllocatedOffset = Region->SlabInfo->Base + AllocatedPage * FHU::FEX_PAGE_SIZE; + AllocatedOffset = Region->SlabInfo->Base + AllocatedPage * FEXCore::Utils::FEX_PAGE_SIZE; // We need to setup protections for this void *MMapResult = ::mmap(reinterpret_cast(AllocatedOffset), @@ -407,7 +407,7 @@ void *OSAllocator_64Bit::Mmap(void *addr, size_t length, int prot, int flags, in if (!LiveRegion) { // Couldn't find a fit in the live regions // Allocate a new reserved region - size_t lengthOfLiveRegion = FEXCore::AlignUp(LiveVMARegion::GetSizeWithFlexSet(length), FHU::FEX_PAGE_SIZE); + size_t lengthOfLiveRegion = FEXCore::AlignUp(LiveVMARegion::GetSizeWithFlexSet(length), FEXCore::Utils::FEX_PAGE_SIZE); size_t lengthPlusManagedData = length + lengthOfLiveRegion; for (auto it = ReservedRegions->begin(); it != ReservedRegions->end(); ++it) { if ((*it)->RegionSize >= lengthPlusManagedData) { @@ -421,7 +421,7 @@ void *OSAllocator_64Bit::Mmap(void *addr, size_t length, int prot, int flags, in if (LiveRegion) { // Mark the pages as used uintptr_t RegionBegin = LiveRegion->SlabInfo->Base; - uintptr_t MappedBegin = (AllocatedOffset - RegionBegin) >> FHU::FEX_PAGE_SHIFT; + uintptr_t MappedBegin = (AllocatedOffset - RegionBegin) >> FEXCore::Utils::FEX_PAGE_SHIFT; for (size_t i = 0; i < NumberOfPages; ++i) { LiveRegion->UsedPages.Set(MappedBegin + i); @@ -447,11 +447,11 @@ int OSAllocator_64Bit::Munmap(void *addr, size_t length) { uint64_t Addr = reinterpret_cast(addr); - if (Addr & ~FHU::FEX_PAGE_MASK) { + if (Addr & ~FEXCore::Utils::FEX_PAGE_MASK) { return -EINVAL; } - if (length & ~FHU::FEX_PAGE_MASK) { + if (length & ~FEXCore::Utils::FEX_PAGE_MASK) { return -EINVAL; } @@ -462,7 +462,7 @@ int OSAllocator_64Bit::Munmap(void *addr, size_t length) { // This needs a mutex to be thread safe auto lk = FEXCore::GuardSignalDeferringSectionWithFallback(AllocationMutex, TLSThread); - length = FEXCore::AlignUp(length, FHU::FEX_PAGE_SIZE); + length = FEXCore::AlignUp(length, FEXCore::Utils::FEX_PAGE_SIZE); uintptr_t PtrBegin = reinterpret_cast(addr); uintptr_t PtrEnd = PtrBegin + length; @@ -476,8 +476,8 @@ int OSAllocator_64Bit::Munmap(void *addr, size_t length) { // Live region fully encompasses slab range uint64_t FreedPages{}; - uint32_t SlabPageBegin = (PtrBegin - RegionBegin) >> FHU::FEX_PAGE_SHIFT; - uint64_t PagesToFree = length >> FHU::FEX_PAGE_SHIFT; + uint32_t SlabPageBegin = (PtrBegin - RegionBegin) >> FEXCore::Utils::FEX_PAGE_SHIFT; + uint64_t PagesToFree = length >> FEXCore::Utils::FEX_PAGE_SHIFT; for (size_t i = 0; i < PagesToFree; ++i) { FreedPages += (*it)->UsedPages.TestAndClear(SlabPageBegin + i) ? 1 : 0; diff --git a/FEXCore/Source/Utils/Allocator/IntrusiveArenaAllocator.h b/FEXCore/Source/Utils/Allocator/IntrusiveArenaAllocator.h index 91c27569f4..85cdf844a9 100644 --- a/FEXCore/Source/Utils/Allocator/IntrusiveArenaAllocator.h +++ b/FEXCore/Source/Utils/Allocator/IntrusiveArenaAllocator.h @@ -5,7 +5,7 @@ #include "HostAllocator.h" #include -#include +#include #include #include @@ -77,9 +77,9 @@ namespace Alloc { IntrusiveArenaAllocator(void* Ptr, size_t _Size) : Begin {reinterpret_cast(Ptr)} , Size {_Size} { - uint64_t NumberOfPages = _Size / FHU::FEX_PAGE_SIZE; + uint64_t NumberOfPages = _Size / FEXCore::Utils::FEX_PAGE_SIZE; uint64_t UsedBits = FEXCore::AlignUp(sizeof(IntrusiveArenaAllocator) + - Size / FHU::FEX_PAGE_SIZE / 8, FHU::FEX_PAGE_SIZE); + Size / FEXCore::Utils::FEX_PAGE_SIZE / 8, FEXCore::Utils::FEX_PAGE_SIZE); for (size_t i = 0; i < UsedBits; ++i) { UsedPages.Set(i); } @@ -107,7 +107,7 @@ namespace Alloc { void *do_allocate(std::size_t bytes, std::size_t alignment) override { std::scoped_lock lk{AllocationMutex}; - size_t NumberPages = FEXCore::AlignUp(bytes, FHU::FEX_PAGE_SIZE) / FHU::FEX_PAGE_SIZE; + size_t NumberPages = FEXCore::AlignUp(bytes, FEXCore::Utils::FEX_PAGE_SIZE) / FEXCore::Utils::FEX_PAGE_SIZE; uintptr_t AllocatedOffset{}; @@ -151,7 +151,7 @@ namespace Alloc { LastAllocatedPageOffset = AllocatedOffset + NumberPages; // Now convert this base page to a pointer and return it - return reinterpret_cast(Begin + AllocatedOffset * FHU::FEX_PAGE_SIZE); + return reinterpret_cast(Begin + AllocatedOffset * FEXCore::Utils::FEX_PAGE_SIZE); } return nullptr; @@ -160,8 +160,8 @@ namespace Alloc { void do_deallocate(void* p, std::size_t bytes, std::size_t alignment) override { std::scoped_lock lk{AllocationMutex}; - uintptr_t PageOffset = (reinterpret_cast(p) - Begin) / FHU::FEX_PAGE_SIZE; - size_t NumPages = FEXCore::AlignUp(bytes, FHU::FEX_PAGE_SIZE) / FHU::FEX_PAGE_SIZE; + uintptr_t PageOffset = (reinterpret_cast(p) - Begin) / FEXCore::Utils::FEX_PAGE_SIZE; + size_t NumPages = FEXCore::AlignUp(bytes, FEXCore::Utils::FEX_PAGE_SIZE) / FEXCore::Utils::FEX_PAGE_SIZE; // Walk the allocation list and deallocate uint64_t FreedPages{}; diff --git a/FEXCore/include/FEXCore/Debug/InternalThreadState.h b/FEXCore/include/FEXCore/Debug/InternalThreadState.h index 797728a77a..645291ada6 100644 --- a/FEXCore/include/FEXCore/Debug/InternalThreadState.h +++ b/FEXCore/include/FEXCore/Debug/InternalThreadState.h @@ -9,10 +9,10 @@ #include #include #include +#include #include #include #include -#include #include #include @@ -146,7 +146,7 @@ namespace FEXCore::Core { alignas(16) FEXCore::Core::CpuStateFrame BaseFrameState{}; // Can be reprotected as RO to trigger an interrupt at generated code block entrypoints - alignas(FHU::FEX_PAGE_SIZE) uint8_t InterruptFaultPage[FHU::FEX_PAGE_SIZE]; + alignas(FEXCore::Utils::FEX_PAGE_SIZE) uint8_t InterruptFaultPage[FEXCore::Utils::FEX_PAGE_SIZE]; }; static_assert((offsetof(FEXCore::Core::InternalThreadState, InterruptFaultPage) - offsetof(FEXCore::Core::InternalThreadState, BaseFrameState)) < 4096, diff --git a/FEXHeaderUtils/FEXHeaderUtils/TypeDefines.h b/FEXCore/include/FEXCore/Utils/TypeDefines.h similarity index 93% rename from FEXHeaderUtils/FEXHeaderUtils/TypeDefines.h rename to FEXCore/include/FEXCore/Utils/TypeDefines.h index f19fb99e71..43b6049eb2 100644 --- a/FEXHeaderUtils/FEXHeaderUtils/TypeDefines.h +++ b/FEXCore/include/FEXCore/Utils/TypeDefines.h @@ -2,7 +2,7 @@ #pragma once #include -namespace FHU { +namespace FEXCore::Utils { // FEX assumes an operating page size of 4096 // To work around build systems that build on a 16k/64k page size, define our page size here // Don't use the system provided PAGE_SIZE define because of this. diff --git a/Source/Tools/CommonTools/HarnessHelpers.h b/Source/Tools/CommonTools/HarnessHelpers.h index 278fe5b238..3e3c318aa4 100644 --- a/Source/Tools/CommonTools/HarnessHelpers.h +++ b/Source/Tools/CommonTools/HarnessHelpers.h @@ -17,13 +17,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include namespace FEX::HarnessHelper { @@ -425,7 +425,7 @@ namespace FEX::HarnessHelper { // Map in the memory region for the test file #ifndef _WIN32 - size_t Length = FEXCore::AlignUp(RawASMFile.size(), FHU::FEX_PAGE_SIZE); + size_t Length = FEXCore::AlignUp(RawASMFile.size(), FEXCore::Utils::FEX_PAGE_SIZE); auto ASMPtr = FEXCore::Allocator::VirtualAlloc(reinterpret_cast(Code_start_page), Length, true); #else // Special magic DOS area that starts at 0x1'0000 diff --git a/Source/Tools/FEXLoader/ELFCodeLoader.h b/Source/Tools/FEXLoader/ELFCodeLoader.h index 03ff0fa343..2a866c338c 100644 --- a/Source/Tools/FEXLoader/ELFCodeLoader.h +++ b/Source/Tools/FEXLoader/ELFCodeLoader.h @@ -22,11 +22,11 @@ #include #include #include +#include #include #include #include #include -#include #include #include @@ -514,12 +514,12 @@ class ELFCodeLoader final : public FEXCore::CodeLoader { ASLR_Offset &= (1ULL << ASLR_BITS_32) - 1; } - ASLR_Offset <<= FHU::FEX_PAGE_SHIFT; + ASLR_Offset <<= FEXCore::Utils::FEX_PAGE_SHIFT; ELFLoadHint += ASLR_Offset; } #endif // Align the mapping - ELFLoadHint &= FHU::FEX_PAGE_MASK; + ELFLoadHint &= FEXCore::Utils::FEX_PAGE_MASK; } // load the main elf @@ -584,13 +584,13 @@ class ELFCodeLoader final : public FEXCore::CodeLoader { if (!VSyscallEntry) [[unlikely]] { // If the VDSO thunk doesn't exist then we might not have a vsyscall entry. // Newer glibc requires vsyscall to exist now. So let's allocate a buffer and stick a vsyscall in to it. - auto VSyscallPage = Handler->GuestMmap(nullptr, nullptr, FHU::FEX_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + auto VSyscallPage = Handler->GuestMmap(nullptr, nullptr, FEXCore::Utils::FEX_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); constexpr static uint8_t VSyscallCode[] = { 0xcd, 0x80, // int 0x80 0xc3, // ret }; memcpy(VSyscallPage, VSyscallCode, sizeof(VSyscallCode)); - mprotect(VSyscallPage, FHU::FEX_PAGE_SIZE, PROT_READ); + mprotect(VSyscallPage, FEXCore::Utils::FEX_PAGE_SIZE, PROT_READ); VSyscallEntry = reinterpret_cast(VSyscallPage); } diff --git a/Source/Tools/LinuxEmulation/LinuxSyscalls/LinuxAllocator.cpp b/Source/Tools/LinuxEmulation/LinuxSyscalls/LinuxAllocator.cpp index d78211dd3a..e30196bf7e 100644 --- a/Source/Tools/LinuxEmulation/LinuxSyscalls/LinuxAllocator.cpp +++ b/Source/Tools/LinuxEmulation/LinuxSyscalls/LinuxAllocator.cpp @@ -3,8 +3,8 @@ #include "LinuxSyscalls/Syscalls.h" #include +#include #include -#include #include #include @@ -23,8 +23,8 @@ namespace FEX::HLE { class MemAllocator32Bit final : public FEX::HLE::MemAllocator { private: static constexpr uint64_t BASE_KEY = 16; - const uint64_t TOP_KEY = 0xFFFF'F000ULL >> FHU::FEX_PAGE_SHIFT; - const uint64_t TOP_KEY32BIT = 0x7FFF'F000ULL >> FHU::FEX_PAGE_SHIFT; + const uint64_t TOP_KEY = 0xFFFF'F000ULL >> FEXCore::Utils::FEX_PAGE_SHIFT; + const uint64_t TOP_KEY32BIT = 0x7FFF'F000ULL >> FEXCore::Utils::FEX_PAGE_SHIFT; public: MemAllocator32Bit() { @@ -136,10 +136,10 @@ uint64_t MemAllocator32Bit::FindPageRange_TopDown(uint64_t Start, size_t Pages) void *MemAllocator32Bit::Mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset) { std::scoped_lock lk{AllocMutex}; - size_t PagesLength = FEXCore::AlignUp(length, FHU::FEX_PAGE_SIZE) >> FHU::FEX_PAGE_SHIFT; + size_t PagesLength = FEXCore::AlignUp(length, FEXCore::Utils::FEX_PAGE_SIZE) >> FEXCore::Utils::FEX_PAGE_SHIFT; uintptr_t Addr = reinterpret_cast(addr); - uintptr_t PageAddr = Addr >> FHU::FEX_PAGE_SHIFT; + uintptr_t PageAddr = Addr >> FEXCore::Utils::FEX_PAGE_SHIFT; // Define MAP_FIXED_NOREPLACE ourselves to ensure we always parse this flag constexpr int FEX_MAP_FIXED_NOREPLACE = 0x100000; @@ -147,13 +147,13 @@ void *MemAllocator32Bit::Mmap(void *addr, size_t length, int prot, int flags, in (flags & FEX_MAP_FIXED_NOREPLACE)); // Both Addr and length must be page aligned - if (Addr & ~FHU::FEX_PAGE_MASK) { + if (Addr & ~FEXCore::Utils::FEX_PAGE_MASK) { return reinterpret_cast(-EINVAL); } // If we do have an fd then offset must be page aligned if (fd != -1 && - offset & ~FHU::FEX_PAGE_MASK) { + offset & ~FEXCore::Utils::FEX_PAGE_MASK) { return reinterpret_cast(-EINVAL); } @@ -197,7 +197,7 @@ void *MemAllocator32Bit::Mmap(void *addr, size_t length, int prot, int flags, in { // Try and map the range void *MappedPtr = ::mmap( - reinterpret_cast(LowerPage << FHU::FEX_PAGE_SHIFT), + reinterpret_cast(LowerPage << FEXCore::Utils::FEX_PAGE_SHIFT), length, prot, flags | FEX_MAP_FIXED_NOREPLACE, @@ -209,11 +209,11 @@ void *MemAllocator32Bit::Mmap(void *addr, size_t length, int prot, int flags, in return reinterpret_cast(-errno); } else if (MappedPtr == MAP_FAILED || - MappedPtr >= reinterpret_cast(TOP_KEY << FHU::FEX_PAGE_SHIFT)) { + MappedPtr >= reinterpret_cast(TOP_KEY << FEXCore::Utils::FEX_PAGE_SHIFT)) { // Handles the case where MAP_FIXED_NOREPLACE failed with MAP_FAILED // or if the host system's kernel isn't new enough then it returns the wrong pointer if (MappedPtr != MAP_FAILED && - MappedPtr >= reinterpret_cast(TOP_KEY << FHU::FEX_PAGE_SHIFT)) { + MappedPtr >= reinterpret_cast(TOP_KEY << FEXCore::Utils::FEX_PAGE_SHIFT)) { // Make sure to munmap this so we don't leak memory ::munmap(MappedPtr, length); } @@ -259,14 +259,14 @@ void *MemAllocator32Bit::Mmap(void *addr, size_t length, int prot, int flags, in } else { void *MappedPtr = ::mmap( - reinterpret_cast(PageAddr << FHU::FEX_PAGE_SHIFT), - PagesLength << FHU::FEX_PAGE_SHIFT, + reinterpret_cast(PageAddr << FEXCore::Utils::FEX_PAGE_SHIFT), + PagesLength << FEXCore::Utils::FEX_PAGE_SHIFT, prot, flags, fd, offset); - if (MappedPtr >= reinterpret_cast(TOP_KEY << FHU::FEX_PAGE_SHIFT) && + if (MappedPtr >= reinterpret_cast(TOP_KEY << FEXCore::Utils::FEX_PAGE_SHIFT) && (flags & FEX_MAP_FIXED_NOREPLACE)) { // Handles the case where MAP_FIXED_NOREPLACE isn't handled by the host system's // kernel and returns the wrong pointer @@ -287,19 +287,19 @@ void *MemAllocator32Bit::Mmap(void *addr, size_t length, int prot, int flags, in int MemAllocator32Bit::Munmap(void *addr, size_t length) { std::scoped_lock lk{AllocMutex}; - size_t PagesLength = FEXCore::AlignUp(length, FHU::FEX_PAGE_SIZE) >> FHU::FEX_PAGE_SHIFT; + size_t PagesLength = FEXCore::AlignUp(length, FEXCore::Utils::FEX_PAGE_SIZE) >> FEXCore::Utils::FEX_PAGE_SHIFT; uintptr_t Addr = reinterpret_cast(addr); - uintptr_t PageAddr = Addr >> FHU::FEX_PAGE_SHIFT; + uintptr_t PageAddr = Addr >> FEXCore::Utils::FEX_PAGE_SHIFT; uintptr_t PageEnd = PageAddr + PagesLength; // Both Addr and length must be page aligned - if (Addr & ~FHU::FEX_PAGE_MASK) { + if (Addr & ~FEXCore::Utils::FEX_PAGE_MASK) { return -EINVAL; } - if (length & ~FHU::FEX_PAGE_MASK) { + if (length & ~FEXCore::Utils::FEX_PAGE_MASK) { return -EINVAL; } @@ -315,7 +315,7 @@ int MemAllocator32Bit::Munmap(void *addr, size_t length) { while (PageAddr != PageEnd) { // Always pass to munmap, it may be something allocated we aren't tracking - int Result = ::munmap(reinterpret_cast(PageAddr << FHU::FEX_PAGE_SHIFT), FHU::FEX_PAGE_SIZE); + int Result = ::munmap(reinterpret_cast(PageAddr << FEXCore::Utils::FEX_PAGE_SHIFT), FEXCore::Utils::FEX_PAGE_SIZE); if (Result != 0) { return -errno; } @@ -331,8 +331,8 @@ int MemAllocator32Bit::Munmap(void *addr, size_t length) { } void *MemAllocator32Bit::Mremap(void *old_address, size_t old_size, size_t new_size, int flags, void *new_address) { - size_t OldPagesLength = FEXCore::AlignUp(old_size, FHU::FEX_PAGE_SIZE) >> FHU::FEX_PAGE_SHIFT; - size_t NewPagesLength = FEXCore::AlignUp(new_size, FHU::FEX_PAGE_SIZE) >> FHU::FEX_PAGE_SHIFT; + size_t OldPagesLength = FEXCore::AlignUp(old_size, FEXCore::Utils::FEX_PAGE_SIZE) >> FEXCore::Utils::FEX_PAGE_SHIFT; + size_t NewPagesLength = FEXCore::AlignUp(new_size, FEXCore::Utils::FEX_PAGE_SIZE) >> FEXCore::Utils::FEX_PAGE_SHIFT; { std::scoped_lock lk{AllocMutex}; @@ -343,12 +343,12 @@ void *MemAllocator32Bit::Mremap(void *old_address, size_t old_size, size_t new_s if (!(flags & MREMAP_DONTUNMAP)) { // Unmap the old location uintptr_t OldAddr = reinterpret_cast(old_address); - SetFreePages(OldAddr >> FHU::FEX_PAGE_SHIFT, OldPagesLength); + SetFreePages(OldAddr >> FEXCore::Utils::FEX_PAGE_SHIFT, OldPagesLength); } // Map the new pages uintptr_t NewAddr = reinterpret_cast(MappedPtr); - SetUsedPages(NewAddr >> FHU::FEX_PAGE_SHIFT, NewPagesLength); + SetUsedPages(NewAddr >> FEXCore::Utils::FEX_PAGE_SHIFT, NewPagesLength); } else { return reinterpret_cast(-errno); @@ -356,15 +356,15 @@ void *MemAllocator32Bit::Mremap(void *old_address, size_t old_size, size_t new_s } else { uintptr_t OldAddr = reinterpret_cast(old_address); - uintptr_t OldPageAddr = OldAddr >> FHU::FEX_PAGE_SHIFT; + uintptr_t OldPageAddr = OldAddr >> FEXCore::Utils::FEX_PAGE_SHIFT; if (NewPagesLength < OldPagesLength) { void *MappedPtr = ::mremap(old_address, old_size, new_size, flags & ~MREMAP_MAYMOVE); if (MappedPtr != MAP_FAILED) { // Clear the pages that we just shrunk - size_t NewPagesLength = FEXCore::AlignUp(new_size, FHU::FEX_PAGE_SIZE) >> FHU::FEX_PAGE_SHIFT; - uintptr_t NewPageAddr = reinterpret_cast(MappedPtr) >> FHU::FEX_PAGE_SHIFT; + size_t NewPagesLength = FEXCore::AlignUp(new_size, FEXCore::Utils::FEX_PAGE_SIZE) >> FEXCore::Utils::FEX_PAGE_SHIFT; + uintptr_t NewPageAddr = reinterpret_cast(MappedPtr) >> FEXCore::Utils::FEX_PAGE_SHIFT; SetFreePages(NewPageAddr + NewPagesLength, OldPagesLength - NewPagesLength); return MappedPtr; } @@ -388,9 +388,9 @@ void *MemAllocator32Bit::Mremap(void *old_address, size_t old_size, size_t new_s if (MappedPtr != MAP_FAILED) { // Map the new pages - size_t NewPagesLength = FEXCore::AlignUp(new_size, FHU::FEX_PAGE_SIZE) >> FHU::FEX_PAGE_SHIFT; + size_t NewPagesLength = FEXCore::AlignUp(new_size, FEXCore::Utils::FEX_PAGE_SIZE) >> FEXCore::Utils::FEX_PAGE_SHIFT; uintptr_t NewAddr = reinterpret_cast(MappedPtr); - SetUsedPages(NewAddr >> FHU::FEX_PAGE_SHIFT, NewPagesLength); + SetUsedPages(NewAddr >> FEXCore::Utils::FEX_PAGE_SHIFT, NewPagesLength); return MappedPtr; } else if (!(flags & MREMAP_MAYMOVE)) { @@ -424,13 +424,13 @@ void *MemAllocator32Bit::Mremap(void *old_address, size_t old_size, size_t new_s // If we have both MREMAP_DONTUNMAP not set and the new pointer is at a new location // Make sure to clear the old mapping uintptr_t OldAddr = reinterpret_cast(old_address); - SetFreePages(OldAddr >> FHU::FEX_PAGE_SHIFT , OldPagesLength); + SetFreePages(OldAddr >> FEXCore::Utils::FEX_PAGE_SHIFT , OldPagesLength); } // Map the new pages - size_t NewPagesLength = FEXCore::AlignUp(new_size, FHU::FEX_PAGE_SIZE) >> FHU::FEX_PAGE_SHIFT; + size_t NewPagesLength = FEXCore::AlignUp(new_size, FEXCore::Utils::FEX_PAGE_SIZE) >> FEXCore::Utils::FEX_PAGE_SHIFT; uintptr_t NewAddr = reinterpret_cast(MappedPtr); - SetUsedPages(NewAddr >> FHU::FEX_PAGE_SHIFT, NewPagesLength); + SetUsedPages(NewAddr >> FEXCore::Utils::FEX_PAGE_SHIFT, NewPagesLength); return MappedPtr; } @@ -453,7 +453,7 @@ uint64_t MemAllocator32Bit::Shmat(int shmid, const void* shmaddr, int shmflg, ui } uintptr_t NewAddr = reinterpret_cast(Result); - uintptr_t NewPageAddr = NewAddr >> FHU::FEX_PAGE_SHIFT; + uintptr_t NewPageAddr = NewAddr >> FEXCore::Utils::FEX_PAGE_SHIFT; // Add to the map PageToShm[NewPageAddr] = shmid; @@ -465,7 +465,7 @@ uint64_t MemAllocator32Bit::Shmat(int shmid, const void* shmaddr, int shmflg, ui if (shmctl(shmid, IPC_STAT, &buf) == 0) { // Map the new pages - size_t NewPagesLength = buf.shm_segsz >> FHU::FEX_PAGE_SHIFT; + size_t NewPagesLength = buf.shm_segsz >> FEXCore::Utils::FEX_PAGE_SHIFT; SetUsedPages(NewPageAddr, NewPagesLength); } @@ -483,7 +483,7 @@ uint64_t MemAllocator32Bit::Shmat(int shmid, const void* shmaddr, int shmflg, ui uint64_t PagesLength{}; if (shmctl(shmid, IPC_STAT, &buf) == 0) { - PagesLength = FEXCore::AlignUp(buf.shm_segsz, FHU::FEX_PAGE_SIZE) >> FHU::FEX_PAGE_SHIFT; + PagesLength = FEXCore::AlignUp(buf.shm_segsz, FEXCore::Utils::FEX_PAGE_SIZE) >> FEXCore::Utils::FEX_PAGE_SHIFT; } else { return -EINVAL; @@ -509,7 +509,7 @@ uint64_t MemAllocator32Bit::Shmat(int shmid, const void* shmaddr, int shmflg, ui // Try and map the range void *MappedPtr = ::shmat( shmid, - reinterpret_cast(LowerPage << FHU::FEX_PAGE_SHIFT), + reinterpret_cast(LowerPage << FEXCore::Utils::FEX_PAGE_SHIFT), shmflg); if (MappedPtr == MAP_FAILED) { @@ -554,7 +554,7 @@ uint64_t MemAllocator32Bit::Shmat(int shmid, const void* shmaddr, int shmflg, ui uint64_t MemAllocator32Bit::Shmdt(const void* shmaddr) { std::scoped_lock lk{AllocMutex}; - uint32_t AddrPage = reinterpret_cast(shmaddr) >> FHU::FEX_PAGE_SHIFT; + uint32_t AddrPage = reinterpret_cast(shmaddr) >> FEXCore::Utils::FEX_PAGE_SHIFT; auto it = PageToShm.find(AddrPage); if (it == PageToShm.end()) { diff --git a/Source/Tools/LinuxEmulation/LinuxSyscalls/Syscalls.cpp b/Source/Tools/LinuxEmulation/LinuxSyscalls/Syscalls.cpp index 51c14158da..4f5e54998d 100644 --- a/Source/Tools/LinuxEmulation/LinuxSyscalls/Syscalls.cpp +++ b/Source/Tools/LinuxEmulation/LinuxSyscalls/Syscalls.cpp @@ -38,7 +38,6 @@ desc: Glue logic, brk allocations #include #include #include -#include #include #include diff --git a/Source/Tools/LinuxEmulation/LinuxSyscalls/SyscallsSMCTracking.cpp b/Source/Tools/LinuxEmulation/LinuxSyscalls/SyscallsSMCTracking.cpp index 9f6248dad6..148284c7aa 100644 --- a/Source/Tools/LinuxEmulation/LinuxSyscalls/SyscallsSMCTracking.cpp +++ b/Source/Tools/LinuxEmulation/LinuxSyscalls/SyscallsSMCTracking.cpp @@ -15,11 +15,11 @@ desc: SMC/MMan Tracking #include "LinuxSyscalls/Syscalls.h" -#include #include #include #include #include +#include namespace FEX::HLE { @@ -64,7 +64,7 @@ bool SyscallHandler::HandleSegfault(FEXCore::Core::InternalThreadState *Thread, return false; } - auto FaultBase = FEXCore::AlignDown(FaultAddress, FHU::FEX_PAGE_SIZE); + auto FaultBase = FEXCore::AlignDown(FaultAddress, FEXCore::Utils::FEX_PAGE_SIZE); if (Entry->second.Flags.Shared) { LOGMAN_THROW_A_FMT(Entry->second.Resource, "VMA tracking error"); @@ -80,17 +80,17 @@ bool SyscallHandler::HandleSegfault(FEXCore::Core::InternalThreadState *Thread, auto FaultBaseMirrored = Offset - VMA->Offset + VMA->Base; if (VMA->Prot.Writable) { - _SyscallHandler->TM.InvalidateGuestCodeRange(Thread, FaultBaseMirrored, FHU::FEX_PAGE_SIZE, [](uintptr_t Start, uintptr_t Length) { + _SyscallHandler->TM.InvalidateGuestCodeRange(Thread, FaultBaseMirrored, FEXCore::Utils::FEX_PAGE_SIZE, [](uintptr_t Start, uintptr_t Length) { auto rv = mprotect((void *)Start, Length, PROT_READ | PROT_WRITE); LogMan::Throw::AAFmt(rv == 0, "mprotect({}, {}) failed", Start, Length); }); } else { - _SyscallHandler->TM.InvalidateGuestCodeRange(Thread, FaultBaseMirrored, FHU::FEX_PAGE_SIZE); + _SyscallHandler->TM.InvalidateGuestCodeRange(Thread, FaultBaseMirrored, FEXCore::Utils::FEX_PAGE_SIZE); } } } while ((VMA = VMA->ResourceNextVMA)); } else { - _SyscallHandler->TM.InvalidateGuestCodeRange(Thread, FaultBase, FHU::FEX_PAGE_SIZE, [](uintptr_t Start, uintptr_t Length) { + _SyscallHandler->TM.InvalidateGuestCodeRange(Thread, FaultBase, FEXCore::Utils::FEX_PAGE_SIZE, [](uintptr_t Start, uintptr_t Length) { auto rv = mprotect((void *)Start, Length, PROT_READ | PROT_WRITE); LogMan::Throw::AAFmt(rv == 0, "mprotect({}, {}) failed", Start, Length); }); @@ -101,8 +101,8 @@ bool SyscallHandler::HandleSegfault(FEXCore::Core::InternalThreadState *Thread, } void SyscallHandler::MarkGuestExecutableRange(FEXCore::Core::InternalThreadState *Thread, uint64_t Start, uint64_t Length) { - const auto Base = Start & FHU::FEX_PAGE_MASK; - const auto Top = FEXCore::AlignUp(Start + Length, FHU::FEX_PAGE_SIZE); + const auto Base = Start & FEXCore::Utils::FEX_PAGE_MASK; + const auto Top = FEXCore::AlignUp(Start + Length, FEXCore::Utils::FEX_PAGE_SIZE); { if (SMCChecks != FEXCore::Config::CONFIG_SMC_MTRACK) { @@ -182,7 +182,7 @@ FEXCore::HLE::AOTIRCacheEntryLookupResult SyscallHandler::LookupAOTIRCacheEntry( // MMan Tracking void SyscallHandler::TrackMmap(FEXCore::Core::InternalThreadState *Thread, uintptr_t Base, uintptr_t Size, int Prot, int Flags, int fd, off_t Offset) { - Size = FEXCore::AlignUp(Size, FHU::FEX_PAGE_SIZE); + Size = FEXCore::AlignUp(Size, FEXCore::Utils::FEX_PAGE_SIZE); if (Flags & MAP_SHARED) { CTX->MarkMemoryShared(Thread); @@ -237,7 +237,7 @@ void SyscallHandler::TrackMmap(FEXCore::Core::InternalThreadState *Thread, uintp } void SyscallHandler::TrackMunmap(FEXCore::Core::InternalThreadState *Thread, uintptr_t Base, uintptr_t Size) { - Size = FEXCore::AlignUp(Size, FHU::FEX_PAGE_SIZE); + Size = FEXCore::AlignUp(Size, FEXCore::Utils::FEX_PAGE_SIZE); { // Frontend calls this with nullptr Thread during initialization. @@ -254,7 +254,7 @@ void SyscallHandler::TrackMunmap(FEXCore::Core::InternalThreadState *Thread, uin } void SyscallHandler::TrackMprotect(FEXCore::Core::InternalThreadState *Thread, uintptr_t Base, uintptr_t Size, int Prot) { - Size = FEXCore::AlignUp(Size, FHU::FEX_PAGE_SIZE); + Size = FEXCore::AlignUp(Size, FEXCore::Utils::FEX_PAGE_SIZE); { auto lk = FEXCore::GuardSignalDeferringSection(VMATracking.Mutex, Thread); @@ -268,8 +268,8 @@ void SyscallHandler::TrackMprotect(FEXCore::Core::InternalThreadState *Thread, u } void SyscallHandler::TrackMremap(FEXCore::Core::InternalThreadState *Thread, uintptr_t OldAddress, size_t OldSize, size_t NewSize, int flags, uintptr_t NewAddress) { - OldSize = FEXCore::AlignUp(OldSize, FHU::FEX_PAGE_SIZE); - NewSize = FEXCore::AlignUp(NewSize, FHU::FEX_PAGE_SIZE); + OldSize = FEXCore::AlignUp(OldSize, FEXCore::Utils::FEX_PAGE_SIZE); + NewSize = FEXCore::AlignUp(NewSize, FEXCore::Utils::FEX_PAGE_SIZE); { auto lk = FEXCore::GuardSignalDeferringSection(VMATracking.Mutex, Thread); @@ -364,7 +364,7 @@ void SyscallHandler::TrackShmdt(FEXCore::Core::InternalThreadState *Thread, uint } void SyscallHandler::TrackMadvise(FEXCore::Core::InternalThreadState *Thread, uintptr_t Base, uintptr_t Size, int advice) { - Size = FEXCore::AlignUp(Size, FHU::FEX_PAGE_SIZE); + Size = FEXCore::AlignUp(Size, FEXCore::Utils::FEX_PAGE_SIZE); { auto lk = FEXCore::GuardSignalDeferringSection(VMATracking.Mutex, Thread); // TODO diff --git a/Source/Windows/Common/InvalidationTracker.cpp b/Source/Windows/Common/InvalidationTracker.cpp index 429352c2d4..0ad1da30b5 100644 --- a/Source/Windows/Common/InvalidationTracker.cpp +++ b/Source/Windows/Common/InvalidationTracker.cpp @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT #include +#include #include #include #include "InvalidationTracker.h" @@ -9,8 +10,8 @@ namespace FEX::Windows { void InvalidationTracker::HandleMemoryProtectionNotification(FEXCore::Core::InternalThreadState *Thread, uint64_t Address, uint64_t Size, ULONG Prot) { - const auto AlignedBase = Address & FHU::FEX_PAGE_MASK; - const auto AlignedSize = (Address - AlignedBase + Size + FHU::FEX_PAGE_SIZE - 1) & FHU::FEX_PAGE_MASK; + const auto AlignedBase = Address & FEXCore::Utils::FEX_PAGE_MASK; + const auto AlignedSize = (Address - AlignedBase + Size + FEXCore::Utils::FEX_PAGE_SIZE - 1) & FEXCore::Utils::FEX_PAGE_MASK; if (Prot & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE)) { Thread->CTX->InvalidateGuestCodeRange(Thread, AlignedBase, AlignedSize); @@ -43,8 +44,8 @@ void InvalidationTracker::InvalidateContainingSection(FEXCore::Core::InternalThr } void InvalidationTracker::InvalidateAlignedInterval(FEXCore::Core::InternalThreadState *Thread, uint64_t Address, uint64_t Size, bool Free) { - const auto AlignedBase = Address & FHU::FEX_PAGE_MASK; - const auto AlignedSize = (Address - AlignedBase + Size + FHU::FEX_PAGE_SIZE - 1) & FHU::FEX_PAGE_MASK; + const auto AlignedBase = Address & FEXCore::Utils::FEX_PAGE_MASK; + const auto AlignedSize = (Address - AlignedBase + Size + FEXCore::Utils::FEX_PAGE_SIZE - 1) & FEXCore::Utils::FEX_PAGE_MASK; Thread->CTX->InvalidateGuestCodeRange(Thread, AlignedBase, AlignedSize); if (Free) { @@ -90,7 +91,7 @@ bool InvalidationTracker::HandleRWXAccessViolation(FEXCore::Core::InternalThread if (NeedsInvalidate) { // RWXIntervalsLock cannot be held during invalidation - Thread->CTX->InvalidateGuestCodeRange(Thread, FaultAddress & FHU::FEX_PAGE_MASK, FHU::FEX_PAGE_SIZE); + Thread->CTX->InvalidateGuestCodeRange(Thread, FaultAddress & FEXCore::Utils::FEX_PAGE_MASK, FEXCore::Utils::FEX_PAGE_SIZE); return true; } return false; diff --git a/Source/Windows/WOW64/Module.cpp b/Source/Windows/WOW64/Module.cpp index f0a7a02470..af8efddba2 100644 --- a/Source/Windows/WOW64/Module.cpp +++ b/Source/Windows/WOW64/Module.cpp @@ -23,7 +23,7 @@ desc: Implements the WOW64 BT module API using FEXCore #include #include #include -#include +#include #include "Common/Config.h" #include "Common/InvalidationTracker.h" @@ -309,7 +309,7 @@ namespace Context { } void *TmpAddress = reinterpret_cast(FaultAddress); - SIZE_T TmpSize = FHU::FEX_PAGE_SIZE; + SIZE_T TmpSize = FEXCore::Utils::FEX_PAGE_SIZE; ULONG TmpProt; NtProtectVirtualMemory(NtCurrentProcess(), &TmpAddress, &TmpSize, PAGE_READWRITE, &TmpProt); @@ -570,7 +570,7 @@ NTSTATUS BTCpuSuspendLocalThread(HANDLE Thread, ULONG *Count) { ULONG TmpProt; void *TmpAddress = &TLS.ThreadState()->InterruptFaultPage; - SIZE_T TmpSize = FHU::FEX_PAGE_SIZE; + SIZE_T TmpSize = FEXCore::Utils::FEX_PAGE_SIZE; NtProtectVirtualMemory(NtCurrentProcess(), &TmpAddress, &TmpSize, PAGE_READONLY, &TmpProt); }