From 4fcd414d1c5c43398761dd2da457f602e45173bb Mon Sep 17 00:00:00 2001 From: Ville Juven Date: Wed, 11 Sep 2024 15:09:56 +0300 Subject: [PATCH] arm64_addrenv: Fix the amount of page table levels The VMSAv8-64 translation system has 4 page table levels in total, ranging from 0-3. The address environment code assumes only 3 levels, from 1-3 but this is wrong; the amount of levels _utilized_ depends on the configured VA size CONFIG_ARM64_VA_BITS. With <= 39 bits 3 levels is enough, while if the va range is larger, the 4th translation table level is taken into use dynamically by shifting the base translation table level. From arm64_mmu.c, where va_bits is the amount of va bits used in address translations: (va_bits <= 21) - base level 3 (22 <= va_bits <= 30) - base level 2 (31 <= va_bits <= 39) - base level 1 (40 <= va_bits <= 48) - base level 0 The base level is what is configured as the page directory root. This also affects the performance of address translations i.e. if the VA range is smaller, address translations are also faster as the page table walk is shorter. --- arch/arm64/include/arch.h | 4 ++-- arch/arm64/src/common/arm64_addrenv_perms.c | 2 +- arch/arm64/src/common/arm64_addrenv_pgmap.c | 2 +- arch/arm64/src/common/arm64_addrenv_utils.c | 2 +- arch/arm64/src/common/arm64_mmu.c | 17 +++++++++++------ arch/arm64/src/common/arm64_mmu.h | 10 ++++++---- arch/arm64/src/common/arm64_pgalloc.c | 6 ++++-- 7 files changed, 26 insertions(+), 17 deletions(-) diff --git a/arch/arm64/include/arch.h b/arch/arm64/include/arch.h index 281d7cf504fbe..14190d4262241 100644 --- a/arch/arm64/include/arch.h +++ b/arch/arm64/include/arch.h @@ -45,9 +45,9 @@ # error Only pages sizes of 4096 are currently supported (CONFIG_ARCH_ADDRENV) #endif -/* All implementations have 3 levels of page tables */ +/* All implementations have 4 levels of page tables */ -#define ARCH_PGT_MAX_LEVELS (3) +#define ARCH_PGT_MAX_LEVELS (4) #define ARCH_SPGTS (ARCH_PGT_MAX_LEVELS - 1) #endif /* CONFIG_ARCH_ADDRENV */ diff --git a/arch/arm64/src/common/arm64_addrenv_perms.c b/arch/arm64/src/common/arm64_addrenv_perms.c index f8081d56b7a1d..f68e98f49e852 100644 --- a/arch/arm64/src/common/arm64_addrenv_perms.c +++ b/arch/arm64/src/common/arm64_addrenv_perms.c @@ -71,7 +71,7 @@ static int modify_region(uintptr_t vstart, uintptr_t vend, uintptr_t setmask) for (vaddr = vstart; vaddr < vend; vaddr += MM_PGSIZE) { for (ptlevel = 1, lnvaddr = l1vaddr; - ptlevel < MMU_PGT_LEVELS; + ptlevel < MMU_PGT_LEVEL_MAX; ptlevel++) { paddr = mmu_pte_to_paddr(mmu_ln_getentry(ptlevel, lnvaddr, vaddr)); diff --git a/arch/arm64/src/common/arm64_addrenv_pgmap.c b/arch/arm64/src/common/arm64_addrenv_pgmap.c index 4f39be0b4dd3d..f7b810c849312 100644 --- a/arch/arm64/src/common/arm64_addrenv_pgmap.c +++ b/arch/arm64/src/common/arm64_addrenv_pgmap.c @@ -90,7 +90,7 @@ uintptr_t up_addrenv_find_page(arch_addrenv_t *addrenv, uintptr_t vaddr) /* Make table walk to find the page */ - for (ptlevel = 1, lnvaddr = pgdir; ptlevel < MMU_PGT_LEVELS; ptlevel++) + for (ptlevel = 1, lnvaddr = pgdir; ptlevel < MMU_PGT_LEVEL_MAX; ptlevel++) { paddr = mmu_pte_to_paddr(mmu_ln_getentry(ptlevel, lnvaddr, vaddr)); lnvaddr = arm64_pgvaddr(paddr); diff --git a/arch/arm64/src/common/arm64_addrenv_utils.c b/arch/arm64/src/common/arm64_addrenv_utils.c index 9d9e00ef7928e..bdfaeaa253e68 100644 --- a/arch/arm64/src/common/arm64_addrenv_utils.c +++ b/arch/arm64/src/common/arm64_addrenv_utils.c @@ -134,7 +134,7 @@ int arm64_map_pages(arch_addrenv_t *addrenv, uintptr_t *pages, uintptr_t ptlevel; uintptr_t paddr; - ptlevel = MMU_PGT_LEVELS; + ptlevel = MMU_PGT_LEVEL_MAX; /* Add the references to pages[] into the caller's address environment */ diff --git a/arch/arm64/src/common/arm64_mmu.c b/arch/arm64/src/common/arm64_mmu.c index bd1283a63789e..97d978493bcd0 100644 --- a/arch/arm64/src/common/arm64_mmu.c +++ b/arch/arm64/src/common/arm64_mmu.c @@ -82,7 +82,7 @@ #define XLAT_TABLE_SIZE (1U << XLAT_TABLE_SIZE_SHIFT) #define XLAT_TABLE_ENTRY_SIZE_SHIFT 3U /* Each table entry is 8 bytes */ -#define XLAT_TABLE_LEVEL_MAX MMU_PGT_LEVELS +#define XLAT_TABLE_LEVEL_MAX MMU_PGT_LEVEL_MAX #define XLAT_TABLE_ENTRIES_SHIFT \ (XLAT_TABLE_SIZE_SHIFT - XLAT_TABLE_ENTRY_SIZE_SHIFT) @@ -207,6 +207,7 @@ static const struct arm_mmu_config g_mmu_nxrt_config = static const size_t g_pgt_sizes[] = { + MMU_L0_PAGE_SIZE, MMU_L1_PAGE_SIZE, MMU_L2_PAGE_SIZE, MMU_L3_PAGE_SIZE @@ -709,7 +710,8 @@ void mmu_ln_setentry(uint32_t ptlevel, uintptr_t lnvaddr, uintptr_t paddr, uintptr_t *lntable = (uintptr_t *)lnvaddr; uint32_t index; - DEBUGASSERT(ptlevel > 0 && ptlevel <= XLAT_TABLE_LEVEL_MAX); + DEBUGASSERT(ptlevel >= XLAT_TABLE_BASE_LEVEL && + ptlevel <= XLAT_TABLE_LEVEL_MAX); /* Calculate index for lntable */ @@ -735,7 +737,8 @@ uintptr_t mmu_ln_getentry(uint32_t ptlevel, uintptr_t lnvaddr, uintptr_t *lntable = (uintptr_t *)lnvaddr; uint32_t index; - DEBUGASSERT(ptlevel > 0 && ptlevel <= XLAT_TABLE_LEVEL_MAX); + DEBUGASSERT(ptlevel >= XLAT_TABLE_BASE_LEVEL && + ptlevel <= XLAT_TABLE_LEVEL_MAX); index = XLAT_TABLE_VA_IDX(vaddr, ptlevel); @@ -753,7 +756,8 @@ void mmu_ln_restore(uint32_t ptlevel, uintptr_t lnvaddr, uintptr_t vaddr, uintptr_t *lntable = (uintptr_t *)lnvaddr; uint32_t index; - DEBUGASSERT(ptlevel > 0 && ptlevel <= XLAT_TABLE_LEVEL_MAX); + DEBUGASSERT(ptlevel >= XLAT_TABLE_BASE_LEVEL && + ptlevel <= XLAT_TABLE_LEVEL_MAX); index = XLAT_TABLE_VA_IDX(vaddr, ptlevel); @@ -771,7 +775,8 @@ void mmu_ln_restore(uint32_t ptlevel, uintptr_t lnvaddr, uintptr_t vaddr, size_t mmu_get_region_size(uint32_t ptlevel) { - DEBUGASSERT(ptlevel > 0 && ptlevel <= XLAT_TABLE_LEVEL_MAX); + DEBUGASSERT(ptlevel >= XLAT_TABLE_BASE_LEVEL && + ptlevel <= XLAT_TABLE_LEVEL_MAX); - return g_pgt_sizes[ptlevel - 1]; + return g_pgt_sizes[ptlevel]; } diff --git a/arch/arm64/src/common/arm64_mmu.h b/arch/arm64/src/common/arm64_mmu.h index 80bd75301fa51..1f0640e7a312b 100644 --- a/arch/arm64/src/common/arm64_mmu.h +++ b/arch/arm64/src/common/arm64_mmu.h @@ -233,13 +233,15 @@ /* Amount of page table levels */ -#define MMU_PGT_LEVELS (3U) +#define MMU_PGT_LEVELS (4U) +#define MMU_PGT_LEVEL_MAX (3U) /* Levels go from 0-3 */ /* Page sizes per page table level */ -#define MMU_L1_PAGE_SIZE (0x40000000) /* 1G */ -#define MMU_L2_PAGE_SIZE (0x200000) /* 2M */ -#define MMU_L3_PAGE_SIZE (0x1000) /* 4K */ +#define MMU_L0_PAGE_SIZE (0x8000000000) /* 512G */ +#define MMU_L1_PAGE_SIZE (0x40000000) /* 1G */ +#define MMU_L2_PAGE_SIZE (0x200000) /* 2M */ +#define MMU_L3_PAGE_SIZE (0x1000) /* 4K */ /* Flags for user page tables */ diff --git a/arch/arm64/src/common/arm64_pgalloc.c b/arch/arm64/src/common/arm64_pgalloc.c index 22115ffcd41c2..758e094e4dce2 100644 --- a/arch/arm64/src/common/arm64_pgalloc.c +++ b/arch/arm64/src/common/arm64_pgalloc.c @@ -92,6 +92,7 @@ uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages) struct tcb_s *tcb = nxsched_self(); struct arch_addrenv_s *addrenv; uintptr_t ptlast; + uintptr_t ptlevel; uintptr_t paddr; uintptr_t vaddr; @@ -113,7 +114,8 @@ uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages) /* Start mapping from the old heap break address */ - vaddr = brkaddr; + vaddr = brkaddr; + ptlevel = MMU_PGT_LEVEL_MAX; /* Sanity checks */ @@ -144,7 +146,7 @@ uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages) /* Then add the reference */ - mmu_ln_setentry(MMU_PGT_LEVELS, ptlast, paddr, vaddr, MMU_UDATA_FLAGS); + mmu_ln_setentry(ptlevel, ptlast, paddr, vaddr, MMU_UDATA_FLAGS); vaddr += MM_PGSIZE; }