Skip to content

Commit

Permalink
Moves FHU TypeDefines to FEXCore includes
Browse files Browse the repository at this point in the history
FEXCore includes was including an FHU header which would result in
compilation failure for external projects trying to link to libFEXCore.

Moves it over to fix this, it was the only FHU usage in FEXCore/include
NFC
  • Loading branch information
Sonicadvance1 committed Mar 29, 2024
1 parent aa26b62 commit d11a36e
Show file tree
Hide file tree
Showing 14 changed files with 115 additions and 115 deletions.
12 changes: 6 additions & 6 deletions FEXCore/Source/Interface/Core/Frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ desc: Extracts instruction & block meta info, frontend multiblock logic
#include <FEXCore/Utils/LogManager.h>
#include <FEXCore/Utils/Profiler.h>
#include <FEXCore/Utils/Telemetry.h>
#include <FEXCore/Utils/TypeDefines.h>
#include <FEXCore/fextl/set.h>
#include <FEXHeaderUtils/TypeDefines.h>

namespace FEXCore::Frontend {
#include "Interface/Core/VSyscall/VSyscall.inc"
Expand Down Expand Up @@ -1126,11 +1126,11 @@ void Decoder::DecodeInstructionsAtEntry(uint8_t const* _InstStream, uint64_t PC,
// Entry is a jump target
BlocksToDecode.emplace(PC);

uint64_t CurrentCodePage = PC & FHU::FEX_PAGE_MASK;
uint64_t CurrentCodePage = PC & FEXCore::Utils::FEX_PAGE_MASK;

fextl::set<uint64_t> CodePages = { CurrentCodePage };

AddContainedCodePage(PC, CurrentCodePage, FHU::FEX_PAGE_SIZE);
AddContainedCodePage(PC, CurrentCodePage, FEXCore::Utils::FEX_PAGE_SIZE);

if (MaxInst == 0) {
MaxInst = CTX->Config.MaxInstPerBlock;
Expand All @@ -1156,8 +1156,8 @@ void Decoder::DecodeInstructionsAtEntry(uint8_t const* _InstStream, uint64_t PC,
auto OpMinAddress = RIPToDecode + PCOffset;
auto OpMaxAddress = OpMinAddress + MAX_INST_SIZE;

auto OpMinPage = OpMinAddress & FHU::FEX_PAGE_MASK;
auto OpMaxPage = OpMaxAddress & FHU::FEX_PAGE_MASK;
auto OpMinPage = OpMinAddress & FEXCore::Utils::FEX_PAGE_MASK;
auto OpMaxPage = OpMaxAddress & FEXCore::Utils::FEX_PAGE_MASK;

if (OpMinPage != CurrentCodePage) {
CurrentCodePage = OpMinPage;
Expand Down Expand Up @@ -1230,7 +1230,7 @@ void Decoder::DecodeInstructionsAtEntry(uint8_t const* _InstStream, uint64_t PC,
}

for (auto CodePage : CodePages) {
AddContainedCodePage(PC, CodePage, FHU::FEX_PAGE_SIZE);
AddContainedCodePage(PC, CodePage, FEXCore::Utils::FEX_PAGE_SIZE);
}

// sort for better branching
Expand Down
4 changes: 2 additions & 2 deletions FEXCore/Source/Interface/IR/Passes/RegisterAllocationPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,14 @@ tags: ir|opts
#include <FEXCore/Utils/LogManager.h>
#include <FEXCore/Utils/MathUtils.h>
#include <FEXCore/Utils/Profiler.h>
#include <FEXCore/Utils/TypeDefines.h>
#include <FEXCore/fextl/fmt.h>
#include <FEXCore/fextl/set.h>
#include <FEXCore/fextl/unordered_map.h>
#include <FEXCore/fextl/unordered_set.h>
#include <FEXCore/fextl/vector.h>

#include <FEXHeaderUtils/BitUtils.h>
#include <FEXHeaderUtils/TypeDefines.h>

#include <algorithm>
#include <cstddef>
Expand Down Expand Up @@ -68,7 +68,7 @@ namespace {
};

static_assert(sizeof(RegisterNode) == 128 * 4);
constexpr size_t REGISTER_NODES_PER_PAGE = FHU::FEX_PAGE_SIZE / sizeof(RegisterNode);
constexpr size_t REGISTER_NODES_PER_PAGE = FEXCore::Utils::FEX_PAGE_SIZE / sizeof(RegisterNode);

struct RegisterSet {
fextl::vector<RegisterClass> Classes;
Expand Down
14 changes: 7 additions & 7 deletions FEXCore/Source/Utils/Allocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@
#include <FEXCore/Utils/Allocator.h>
#include <FEXCore/Utils/CompilerDefs.h>
#include <FEXCore/Utils/LogManager.h>
#include <FEXCore/Utils/TypeDefines.h>
#include <FEXCore/fextl/fmt.h>
#include <FEXCore/fextl/memory_resource.h>
#include <FEXHeaderUtils/Syscalls.h>
#include <FEXHeaderUtils/TypeDefines.h>

#include <array>
#include <cctype>
Expand Down Expand Up @@ -95,8 +95,8 @@ namespace FEXCore::Allocator {
// Now allocate the next page after the sbrk address to ensure it can't grow.
// In most cases at the start of `main` this will already be page aligned, which means subsequent `sbrk`
// calls won't allocate any memory through that.
void* AlignedBRK = reinterpret_cast<void*>(FEXCore::AlignUp(reinterpret_cast<uintptr_t>(StartingSBRK), FHU::FEX_PAGE_SIZE));
void *AfterBRK = mmap(AlignedBRK, FHU::FEX_PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE | MAP_NORESERVE, -1, 0);
void* AlignedBRK = reinterpret_cast<void*>(FEXCore::AlignUp(reinterpret_cast<uintptr_t>(StartingSBRK), FEXCore::Utils::FEX_PAGE_SIZE));
void *AfterBRK = mmap(AlignedBRK, FEXCore::Utils::FEX_PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE | MAP_NORESERVE, -1, 0);
if (AfterBRK == INVALID_PTR) {
// Couldn't allocate the page after the aligned brk? This should never happen.
// FEXCore::LogMan isn't configured yet so we just need to print the message.
Expand All @@ -118,7 +118,7 @@ namespace FEXCore::Allocator {
void ReenableSBRKAllocations(void* Ptr) {
const void* INVALID_PTR = reinterpret_cast<void*>(~0ULL);
if (Ptr != INVALID_PTR) {
munmap(Ptr, FHU::FEX_PAGE_SIZE);
munmap(Ptr, FEXCore::Utils::FEX_PAGE_SIZE);
}
}

Expand Down Expand Up @@ -172,10 +172,10 @@ namespace FEXCore::Allocator {
for (int i = 0; i < 64; ++i) {
// Try grabbing a some of the top pages of the range
// x86 allocates some high pages in the top end
void *Ptr = ::mmap(reinterpret_cast<void*>(Size - FHU::FEX_PAGE_SIZE * i), FHU::FEX_PAGE_SIZE, PROT_NONE, MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
void *Ptr = ::mmap(reinterpret_cast<void*>(Size - FEXCore::Utils::FEX_PAGE_SIZE * i), FEXCore::Utils::FEX_PAGE_SIZE, PROT_NONE, MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (Ptr != (void*)~0ULL) {
::munmap(Ptr, FHU::FEX_PAGE_SIZE);
if (Ptr == (void*)(Size - FHU::FEX_PAGE_SIZE * i)) {
::munmap(Ptr, FEXCore::Utils::FEX_PAGE_SIZE);
if (Ptr == (void*)(Size - FEXCore::Utils::FEX_PAGE_SIZE * i)) {
return true;
}
}
Expand Down
52 changes: 26 additions & 26 deletions FEXCore/Source/Utils/Allocator/64BitAllocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
#include <FEXCore/Utils/LogManager.h>
#include <FEXCore/Utils/MathUtils.h>
#include <FEXCore/Utils/SignalScopeGuards.h>
#include <FEXCore/Utils/TypeDefines.h>
#include <FEXCore/fextl/sstream.h>
#include <FEXHeaderUtils/Syscalls.h>
#include <FEXHeaderUtils/TypeDefines.h>
#include <FEXCore/fextl/memory.h>
#include <FEXCore/fextl/vector.h>

Expand Down Expand Up @@ -70,8 +70,8 @@ namespace Alloc::OSAllocator {
// Lower bound is the starting of the range just past the lower 32bits
constexpr static uintptr_t LOWER_BOUND = 0x1'0000'0000ULL;

uintptr_t UPPER_BOUND_PAGE = UPPER_BOUND / FHU::FEX_PAGE_SIZE;
constexpr static uintptr_t LOWER_BOUND_PAGE = LOWER_BOUND / FHU::FEX_PAGE_SIZE;
uintptr_t UPPER_BOUND_PAGE = UPPER_BOUND / FEXCore::Utils::FEX_PAGE_SIZE;
constexpr static uintptr_t LOWER_BOUND_PAGE = LOWER_BOUND / FEXCore::Utils::FEX_PAGE_SIZE;

struct ReservedVMARegion {
uintptr_t Base;
Expand Down Expand Up @@ -114,22 +114,22 @@ namespace Alloc::OSAllocator {
// 0x100'0000 Pages
// 1 bit per page for tracking means 0x20'0000 (Pages / 8) bytes of flex space
// Which is 2MB of tracking
uint64_t NumElements = (Size >> FHU::FEX_PAGE_SHIFT) * sizeof(FlexBitElementType);
uint64_t NumElements = (Size >> FEXCore::Utils::FEX_PAGE_SHIFT) * sizeof(FlexBitElementType);
return sizeof(LiveVMARegion) + FEXCore::FlexBitSet<FlexBitElementType>::Size(NumElements);
}

static void InitializeVMARegionUsed(LiveVMARegion *Region, size_t AdditionalSize) {
size_t SizeOfLiveRegion = FEXCore::AlignUp(LiveVMARegion::GetSizeWithFlexSet(Region->SlabInfo->RegionSize), FHU::FEX_PAGE_SIZE);
size_t SizeOfLiveRegion = FEXCore::AlignUp(LiveVMARegion::GetSizeWithFlexSet(Region->SlabInfo->RegionSize), FEXCore::Utils::FEX_PAGE_SIZE);
size_t SizePlusManagedData = SizeOfLiveRegion + AdditionalSize;

Region->FreeSpace = Region->SlabInfo->RegionSize - SizePlusManagedData;

size_t NumManagedPages = SizePlusManagedData >> FHU::FEX_PAGE_SHIFT;
size_t ManagedSize = NumManagedPages << FHU::FEX_PAGE_SHIFT;
size_t NumManagedPages = SizePlusManagedData >> FEXCore::Utils::FEX_PAGE_SHIFT;
size_t ManagedSize = NumManagedPages << FEXCore::Utils::FEX_PAGE_SHIFT;

// Use madvise to set the full tracking region to zero.
// This ensures unused pages are zero, while not having the backing pages consuming memory.
::madvise(Region->UsedPages.Memory + ManagedSize, (Region->SlabInfo->RegionSize >> FHU::FEX_PAGE_SHIFT) - ManagedSize, MADV_DONTNEED);
::madvise(Region->UsedPages.Memory + ManagedSize, (Region->SlabInfo->RegionSize >> FEXCore::Utils::FEX_PAGE_SHIFT) - ManagedSize, MADV_DONTNEED);

// Use madvise to claim WILLNEED on the beginning pages for initial state tracking.
// Improves performance of the following MemClear by not doing a page level fault dance for data necessary to track >170TB of used pages.
Expand Down Expand Up @@ -162,7 +162,7 @@ namespace Alloc::OSAllocator {
ReservedRegions->erase(ReservedIterator);

// mprotect the new region we've allocated
size_t SizeOfLiveRegion = FEXCore::AlignUp(LiveVMARegion::GetSizeWithFlexSet(ReservedRegion->RegionSize), FHU::FEX_PAGE_SIZE);
size_t SizeOfLiveRegion = FEXCore::AlignUp(LiveVMARegion::GetSizeWithFlexSet(ReservedRegion->RegionSize), FEXCore::Utils::FEX_PAGE_SIZE);
size_t SizePlusManagedData = UsedSize + SizeOfLiveRegion;

[[maybe_unused]] auto Res = mprotect(reinterpret_cast<void*>(ReservedRegion->Base), SizePlusManagedData, PROT_READ | PROT_WRITE);
Expand Down Expand Up @@ -198,10 +198,10 @@ void OSAllocator_64Bit::DetermineVASize() {
UPPER_BOUND = Size;

#if _M_X86_64 // Last page cannot be allocated on x86
UPPER_BOUND -= FHU::FEX_PAGE_SIZE;
UPPER_BOUND -= FEXCore::Utils::FEX_PAGE_SIZE;
#endif

UPPER_BOUND_PAGE = UPPER_BOUND / FHU::FEX_PAGE_SIZE;
UPPER_BOUND_PAGE = UPPER_BOUND / FEXCore::Utils::FEX_PAGE_SIZE;
}

OSAllocator_64Bit::LiveVMARegion *OSAllocator_64Bit::FindLiveRegionForAddress(uintptr_t Addr, uintptr_t AddrEnd) {
Expand Down Expand Up @@ -250,13 +250,13 @@ void *OSAllocator_64Bit::Mmap(void *addr, size_t length, int prot, int flags, in

uint64_t Addr = reinterpret_cast<uint64_t>(addr);
// Addr must be page aligned
if (Addr & ~FHU::FEX_PAGE_MASK) {
if (Addr & ~FEXCore::Utils::FEX_PAGE_MASK) {
return reinterpret_cast<void*>(-EINVAL);
}

// If FD is provided then offset must also be page aligned
if (fd != -1 &&
offset & ~FHU::FEX_PAGE_MASK) {
offset & ~FEXCore::Utils::FEX_PAGE_MASK) {
return reinterpret_cast<void*>(-EINVAL);
}

Expand All @@ -266,10 +266,10 @@ void *OSAllocator_64Bit::Mmap(void *addr, size_t length, int prot, int flags, in
}

bool Fixed = (flags & MAP_FIXED) || (flags & MAP_FIXED_NOREPLACE);
length = FEXCore::AlignUp(length, FHU::FEX_PAGE_SIZE);
length = FEXCore::AlignUp(length, FEXCore::Utils::FEX_PAGE_SIZE);

uint64_t AddrEnd = Addr + length;
size_t NumberOfPages = length / FHU::FEX_PAGE_SIZE;
size_t NumberOfPages = length / FEXCore::Utils::FEX_PAGE_SIZE;

// This needs a mutex to be thread safe
auto lk = FEXCore::GuardSignalDeferringSectionWithFallback(AllocationMutex, TLSThread);
Expand All @@ -285,14 +285,14 @@ void *OSAllocator_64Bit::Mmap(void *addr, size_t length, int prot, int flags, in

auto CheckIfRangeFits = [&AllocatedOffset](LiveVMARegion *Region, uint64_t length, int prot, int flags, int fd, off_t offset, uint64_t StartingPosition = 0) -> std::pair<LiveVMARegion*, void*> {
uint64_t AllocatedPage{~0ULL};
uint64_t NumberOfPages = length >> FHU::FEX_PAGE_SHIFT;
uint64_t NumberOfPages = length >> FEXCore::Utils::FEX_PAGE_SHIFT;

if (Region->FreeSpace >= length) {
uint64_t LastAllocation =
StartingPosition ?
(StartingPosition - Region->SlabInfo->Base) >> FHU::FEX_PAGE_SHIFT
(StartingPosition - Region->SlabInfo->Base) >> FEXCore::Utils::FEX_PAGE_SHIFT
: Region->LastPageAllocation;
size_t RegionNumberOfPages = Region->SlabInfo->RegionSize >> FHU::FEX_PAGE_SHIFT;
size_t RegionNumberOfPages = Region->SlabInfo->RegionSize >> FEXCore::Utils::FEX_PAGE_SHIFT;


if (Region->HadMunmap) {
Expand All @@ -317,7 +317,7 @@ void *OSAllocator_64Bit::Mmap(void *addr, size_t length, int prot, int flags, in
}

if (AllocatedPage != ~0ULL) {
AllocatedOffset = Region->SlabInfo->Base + AllocatedPage * FHU::FEX_PAGE_SIZE;
AllocatedOffset = Region->SlabInfo->Base + AllocatedPage * FEXCore::Utils::FEX_PAGE_SIZE;

// We need to setup protections for this
void *MMapResult = ::mmap(reinterpret_cast<void*>(AllocatedOffset),
Expand Down Expand Up @@ -407,7 +407,7 @@ void *OSAllocator_64Bit::Mmap(void *addr, size_t length, int prot, int flags, in
if (!LiveRegion) {
// Couldn't find a fit in the live regions
// Allocate a new reserved region
size_t lengthOfLiveRegion = FEXCore::AlignUp(LiveVMARegion::GetSizeWithFlexSet(length), FHU::FEX_PAGE_SIZE);
size_t lengthOfLiveRegion = FEXCore::AlignUp(LiveVMARegion::GetSizeWithFlexSet(length), FEXCore::Utils::FEX_PAGE_SIZE);
size_t lengthPlusManagedData = length + lengthOfLiveRegion;
for (auto it = ReservedRegions->begin(); it != ReservedRegions->end(); ++it) {
if ((*it)->RegionSize >= lengthPlusManagedData) {
Expand All @@ -421,7 +421,7 @@ void *OSAllocator_64Bit::Mmap(void *addr, size_t length, int prot, int flags, in
if (LiveRegion) {
// Mark the pages as used
uintptr_t RegionBegin = LiveRegion->SlabInfo->Base;
uintptr_t MappedBegin = (AllocatedOffset - RegionBegin) >> FHU::FEX_PAGE_SHIFT;
uintptr_t MappedBegin = (AllocatedOffset - RegionBegin) >> FEXCore::Utils::FEX_PAGE_SHIFT;

for (size_t i = 0; i < NumberOfPages; ++i) {
LiveRegion->UsedPages.Set(MappedBegin + i);
Expand All @@ -447,11 +447,11 @@ int OSAllocator_64Bit::Munmap(void *addr, size_t length) {

uint64_t Addr = reinterpret_cast<uint64_t>(addr);

if (Addr & ~FHU::FEX_PAGE_MASK) {
if (Addr & ~FEXCore::Utils::FEX_PAGE_MASK) {
return -EINVAL;
}

if (length & ~FHU::FEX_PAGE_MASK) {
if (length & ~FEXCore::Utils::FEX_PAGE_MASK) {
return -EINVAL;
}

Expand All @@ -462,7 +462,7 @@ int OSAllocator_64Bit::Munmap(void *addr, size_t length) {
// This needs a mutex to be thread safe
auto lk = FEXCore::GuardSignalDeferringSectionWithFallback(AllocationMutex, TLSThread);

length = FEXCore::AlignUp(length, FHU::FEX_PAGE_SIZE);
length = FEXCore::AlignUp(length, FEXCore::Utils::FEX_PAGE_SIZE);

uintptr_t PtrBegin = reinterpret_cast<uintptr_t>(addr);
uintptr_t PtrEnd = PtrBegin + length;
Expand All @@ -476,8 +476,8 @@ int OSAllocator_64Bit::Munmap(void *addr, size_t length) {
// Live region fully encompasses slab range

uint64_t FreedPages{};
uint32_t SlabPageBegin = (PtrBegin - RegionBegin) >> FHU::FEX_PAGE_SHIFT;
uint64_t PagesToFree = length >> FHU::FEX_PAGE_SHIFT;
uint32_t SlabPageBegin = (PtrBegin - RegionBegin) >> FEXCore::Utils::FEX_PAGE_SHIFT;
uint64_t PagesToFree = length >> FEXCore::Utils::FEX_PAGE_SHIFT;

for (size_t i = 0; i < PagesToFree; ++i) {
FreedPages += (*it)->UsedPages.TestAndClear(SlabPageBegin + i) ? 1 : 0;
Expand Down
14 changes: 7 additions & 7 deletions FEXCore/Source/Utils/Allocator/IntrusiveArenaAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#include "HostAllocator.h"

#include <FEXCore/Utils/MathUtils.h>
#include <FEXHeaderUtils/TypeDefines.h>
#include <FEXCore/Utils/TypeDefines.h>

#include <bitset>
#include <cstddef>
Expand Down Expand Up @@ -77,9 +77,9 @@ namespace Alloc {
IntrusiveArenaAllocator(void* Ptr, size_t _Size)
: Begin {reinterpret_cast<uintptr_t>(Ptr)}
, Size {_Size} {
uint64_t NumberOfPages = _Size / FHU::FEX_PAGE_SIZE;
uint64_t NumberOfPages = _Size / FEXCore::Utils::FEX_PAGE_SIZE;
uint64_t UsedBits = FEXCore::AlignUp(sizeof(IntrusiveArenaAllocator) +
Size / FHU::FEX_PAGE_SIZE / 8, FHU::FEX_PAGE_SIZE);
Size / FEXCore::Utils::FEX_PAGE_SIZE / 8, FEXCore::Utils::FEX_PAGE_SIZE);
for (size_t i = 0; i < UsedBits; ++i) {
UsedPages.Set(i);
}
Expand Down Expand Up @@ -107,7 +107,7 @@ namespace Alloc {
void *do_allocate(std::size_t bytes, std::size_t alignment) override {
std::scoped_lock<std::mutex> lk{AllocationMutex};

size_t NumberPages = FEXCore::AlignUp(bytes, FHU::FEX_PAGE_SIZE) / FHU::FEX_PAGE_SIZE;
size_t NumberPages = FEXCore::AlignUp(bytes, FEXCore::Utils::FEX_PAGE_SIZE) / FEXCore::Utils::FEX_PAGE_SIZE;

uintptr_t AllocatedOffset{};

Expand Down Expand Up @@ -151,7 +151,7 @@ namespace Alloc {
LastAllocatedPageOffset = AllocatedOffset + NumberPages;

// Now convert this base page to a pointer and return it
return reinterpret_cast<void*>(Begin + AllocatedOffset * FHU::FEX_PAGE_SIZE);
return reinterpret_cast<void*>(Begin + AllocatedOffset * FEXCore::Utils::FEX_PAGE_SIZE);
}

return nullptr;
Expand All @@ -160,8 +160,8 @@ namespace Alloc {
void do_deallocate(void* p, std::size_t bytes, std::size_t alignment) override {
std::scoped_lock<std::mutex> lk{AllocationMutex};

uintptr_t PageOffset = (reinterpret_cast<uintptr_t>(p) - Begin) / FHU::FEX_PAGE_SIZE;
size_t NumPages = FEXCore::AlignUp(bytes, FHU::FEX_PAGE_SIZE) / FHU::FEX_PAGE_SIZE;
uintptr_t PageOffset = (reinterpret_cast<uintptr_t>(p) - Begin) / FEXCore::Utils::FEX_PAGE_SIZE;
size_t NumPages = FEXCore::AlignUp(bytes, FEXCore::Utils::FEX_PAGE_SIZE) / FEXCore::Utils::FEX_PAGE_SIZE;

// Walk the allocation list and deallocate
uint64_t FreedPages{};
Expand Down
4 changes: 2 additions & 2 deletions FEXCore/include/FEXCore/Debug/InternalThreadState.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@
#include <FEXCore/Utils/Event.h>
#include <FEXCore/Utils/InterruptableConditionVariable.h>
#include <FEXCore/Utils/Threads.h>
#include <FEXCore/Utils/TypeDefines.h>
#include <FEXCore/fextl/memory.h>
#include <FEXCore/fextl/robin_map.h>
#include <FEXCore/fextl/vector.h>
#include <FEXHeaderUtils/TypeDefines.h>

#include <chrono>
#include <shared_mutex>
Expand Down Expand Up @@ -146,7 +146,7 @@ namespace FEXCore::Core {
alignas(16) FEXCore::Core::CpuStateFrame BaseFrameState{};

// Can be reprotected as RO to trigger an interrupt at generated code block entrypoints
alignas(FHU::FEX_PAGE_SIZE) uint8_t InterruptFaultPage[FHU::FEX_PAGE_SIZE];
alignas(FEXCore::Utils::FEX_PAGE_SIZE) uint8_t InterruptFaultPage[FEXCore::Utils::FEX_PAGE_SIZE];
};
static_assert((offsetof(FEXCore::Core::InternalThreadState, InterruptFaultPage) -
offsetof(FEXCore::Core::InternalThreadState, BaseFrameState)) < 4096,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#pragma once
#include <cstddef>

namespace FHU {
namespace FEXCore::Utils {
// FEX assumes an operating page size of 4096
// To work around build systems that build on a 16k/64k page size, define our page size here
// Don't use the system provided PAGE_SIZE define because of this.
Expand Down
4 changes: 2 additions & 2 deletions Source/Tools/CommonTools/HarnessHelpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@
#include <FEXCore/Utils/FileLoading.h>
#include <FEXCore/Utils/LogManager.h>
#include <FEXCore/Utils/MathUtils.h>
#include <FEXCore/Utils/TypeDefines.h>
#include <FEXCore/fextl/fmt.h>
#include <FEXCore/fextl/map.h>
#include <FEXCore/fextl/string.h>
#include <FEXCore/fextl/vector.h>
#include <FEXHeaderUtils/BitUtils.h>
#include <FEXHeaderUtils/Syscalls.h>
#include <FEXHeaderUtils/TypeDefines.h>
#include <unistd.h>

namespace FEX::HarnessHelper {
Expand Down Expand Up @@ -425,7 +425,7 @@ namespace FEX::HarnessHelper {

// Map in the memory region for the test file
#ifndef _WIN32
size_t Length = FEXCore::AlignUp(RawASMFile.size(), FHU::FEX_PAGE_SIZE);
size_t Length = FEXCore::AlignUp(RawASMFile.size(), FEXCore::Utils::FEX_PAGE_SIZE);
auto ASMPtr = FEXCore::Allocator::VirtualAlloc(reinterpret_cast<void*>(Code_start_page), Length, true);
#else
// Special magic DOS area that starts at 0x1'0000
Expand Down
Loading

0 comments on commit d11a36e

Please sign in to comment.