Skip to content

Commit

Permalink
misc: Converge with 5.x
Browse files Browse the repository at this point in the history
  • Loading branch information
mintsuki committed Sep 22, 2023
1 parent d9f8ce2 commit f968254
Show file tree
Hide file tree
Showing 12 changed files with 82 additions and 207 deletions.
4 changes: 0 additions & 4 deletions PHILOSOPHY.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,6 @@ way to modify its own EFI executable to bake in the BLAKE2B checksum of the conf
a key added to the firmware's keychain. This prevents modifications to the config file (and in turn the checksums contained there)
from going unnoticed.

### What about ext2/3/4? Why is that supported then?

Simply put, legacy. And because a lot of Linux users expect it to "work that way". ext2/3/4 support has been dropped as of Limine 6.x.

### But I don't want to have a separate FAT boot partition! I don't want it!!!

Well tough luck. It is `$year_following_2012` now and most PCs are equipped with UEFI and simply won't boot without a FAT EFI system partition
Expand Down
22 changes: 12 additions & 10 deletions PROTOCOL.md
Original file line number Diff line number Diff line change
Expand Up @@ -87,20 +87,22 @@ The protocol mandates kernels to load themselves at or above
`0xffffffff80000000`. Lower half kernels are *not supported*.

At handoff, the kernel will be properly loaded and mapped with appropriate
MMU permissions at the requested virtual memory address (provided it is at
MMU permissions, as supervisor, at the requested virtual memory address (provided it is at
or above `0xffffffff80000000`).

No specific physical memory placement is guaranteed, except that the kernel
is guaranteed to be physically contiguous. In order to determine
where the kernel is loaded in physical memory, see the Kernel Address feature
below.

Alongside the loaded kernel, the bootloader will set up memory mappings such
that every usable, bootloader reclaimable, framebuffer, or kernel/modules
memory map region is mapped at HHDM offset + its physical address.
Additionally, the whole 0->4GiB physical memory region will also be mapped
at HHDM offset + physical address, regardless of the contents of the
memory map. These mappings are supervisor, read, write, execute (-rwx).
Alongside the loaded kernel, the bootloader will set up memory mappings as such:
```
Base Physical Address | | Base Virtual Address
0x0000000000001000 | (4 GiB - 0x1000) and any additional memory map region | 0x0000000000001000
0x0000000000000000 | 4 GiB and any additional memory map region | HHDM start
```
Where "HHDM start" is returned by the Higher Half Direct Map feature (see below).
These mappings are supervisor, read, write, execute (-rwx).

The bootloader page tables are in bootloader-reclaimable memory (see Memory Map
feature below), and their specific layout is undefined as long as they provide
Expand All @@ -117,7 +119,7 @@ config).
The kernel executable, loaded at or above `0xffffffff80000000`, sees all of its
segments mapped using write-back (WB) caching at the page tables level.

All HHDM memory regions are mapped using write-back (WB) caching at the page
All HHDM and identity map memory regions are mapped using write-back (WB) caching at the page
tables level, except framebuffer regions which are mapped using write-combining
(WC) caching at the page tables level.

Expand All @@ -140,7 +142,7 @@ The MTRRs are left as the firmware set them up.
The kernel executable, loaded at or above `0xffffffff80000000`, sees all of its
segments mapped using Normal Write-Back RW-Allocate non-transient caching mode.

All HHDM memory regions are mapped using the Normal Write-Back RW-Allocate
All HHDM and identity map memory regions are mapped using the Normal Write-Back RW-Allocate
non-transient caching mode, except for the framebuffer regions, which are
mapped in using an unspecified caching mode, correct for use with the
framebuffer on the platform.
Expand All @@ -155,7 +157,7 @@ is used on its own.

If the `Svpbmt` extension is available, all framebuffer memory regions are mapped
with `PBMT=NC` to enable write-combining optimizations. The kernel executable,
loaded at or above `0xffffffff80000000`, and all HHDM memory regions are mapped
loaded at or above `0xffffffff80000000`, and all HHDM and identity map memory regions are mapped
with the default `PBMT=PMA`.

If the `Svpbmt` extension is not available, no PMAs can be overridden (effectively,
Expand Down
2 changes: 1 addition & 1 deletion common/lib/misc.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ noreturn void enter_in_el1(uint64_t entry, uint64_t sp, uint64_t sctlr,
uint64_t mair, uint64_t tcr, uint64_t ttbr0,
uint64_t ttbr1, uint64_t target_x0);
#elif defined (__riscv64)
noreturn void riscv_spinup(uint64_t entry, uint64_t sp, uint64_t satp, uint64_t direct_map_offset);
noreturn void riscv_spinup(uint64_t entry, uint64_t sp, uint64_t satp);
#if defined (UEFI)
RISCV_EFI_BOOT_PROTOCOL *get_riscv_boot_protocol(void);
#endif
Expand Down
45 changes: 7 additions & 38 deletions common/lib/spinup.asm_aarch64
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

// noreturn void enter_in_el1(uint64_t entry, uint64_t sp, uint64_t sctlr,
// uint64_t mair, uint64_t tcr, uint64_t ttbr0,
// uint64_t ttbr1, uint64_t direct_map_offset)
// uint64_t ttbr1, uint64_t target_x0)
// Potentially drop to EL1 from EL2 (and also disable trapping to EL2), then
// configure EL1 state and jump to kernel.

Expand All @@ -13,59 +13,28 @@ enter_in_el1:
msr spsel, #0
mov sp, x1

PICK_EL x8, 0f, 2f
0:
// Switch to the new page tables

// Point the EL1t handler to the continuation, such that after we page fault,
// execution continues and the kernel is entered.
adrp x8, 1f
add x8, x8, #:lo12:1f
add x8, x8, x7
msr vbar_el1, x8
isb
dsb sy
isb

// Switch the page table registers
// Configure EL1 state
msr mair_el1, x3
msr tcr_el1, x4
msr ttbr0_el1, x5
msr ttbr1_el1, x6
msr sctlr_el1, x2
isb
dsb sy
isb

// Jump to the higher half mapping in case we didn't immediately crash
br x8

// Alignment required by VBAR register
.align 11
1:
// Zero out VBAR to avoid confusion
msr vbar_el1, xzr

PICK_EL x8, 0f, 1f
0:
// Enter kernel in EL1
mov x8, #0x3c4
msr spsr_el1, x8
msr elr_el1, x0

mov x0, xzr
mov x0, x7
ZERO_REGS_EXCEPT_X0

eret

2:
// Configure EL1 state
msr mair_el1, x3
msr tcr_el1, x4
msr ttbr0_el1, x5
msr ttbr1_el1, x6
msr sctlr_el1, x2
dsb sy
isb

1:
// Configure EL2-specific state for EL1

// Don't trap counters to EL2
Expand All @@ -88,7 +57,7 @@ enter_in_el1:
msr spsr_el2, x8
msr elr_el2, x0

mov x0, xzr
mov x0, x7
ZERO_REGS_EXCEPT_X0

eret
10 changes: 1 addition & 9 deletions common/lib/spinup.asm_riscv64
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,11 @@ riscv_spinup:
.option norelax
csrci sstatus, 0x2
csrw sie, zero

lla t0, 0f
add t0, t0, a3
csrw stvec, t0
csrw satp, a2
sfence.vma
unimp
.align 4
0:
csrw stvec, zero

mv t0, a0
mv sp, a1
csrw satp, a2

mv a0, zero
mv a1, zero
Expand Down
61 changes: 36 additions & 25 deletions common/protos/limine.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,33 @@ static pagemap_t build_pagemap(int paging_mode, bool nx, struct elf_range *range
}
}

// Map 0->4GiB range to HHDM
for (uint64_t i = 0; i < 0x100000000; i += 0x40000000) {
// Sub 2MiB mappings
for (uint64_t i = 0; i < 0x200000; i += 0x1000) {
if (i != 0) {
map_page(pagemap, i, i, VMM_FLAG_WRITE, Size4KiB);
}
map_page(pagemap, direct_map_offset + i, i, VMM_FLAG_WRITE, Size4KiB);
}

// Map 2MiB to 4GiB at higher half base and 0
//
// NOTE: We cannot just directly map from 2MiB to 4GiB with 1GiB
// pages because if you do the math.
//
// start = 0x200000
// end = 0x40000000
//
// pages_required = (end - start) / (4096 * 512 * 512)
//
// So we map 2MiB to 1GiB with 2MiB pages and then map the rest
// with 1GiB pages :^)
for (uint64_t i = 0x200000; i < 0x40000000; i += 0x200000) {
map_page(pagemap, i, i, VMM_FLAG_WRITE, Size2MiB);
map_page(pagemap, direct_map_offset + i, i, VMM_FLAG_WRITE, Size2MiB);
}

for (uint64_t i = 0x40000000; i < 0x100000000; i += 0x40000000) {
map_page(pagemap, i, i, VMM_FLAG_WRITE, Size1GiB);
map_page(pagemap, direct_map_offset + i, i, VMM_FLAG_WRITE, Size1GiB);
}

Expand All @@ -74,14 +99,8 @@ static pagemap_t build_pagemap(int paging_mode, bool nx, struct elf_range *range
for (size_t i = 0; i < _memmap_entries; i++)
_memmap[i] = memmap[i];

// Map all free memory regions to the higher half direct map offset
// Map any other region of memory from the memmap
for (size_t i = 0; i < _memmap_entries; i++) {
if (_memmap[i].type != MEMMAP_USABLE
&& _memmap[i].type != MEMMAP_BOOTLOADER_RECLAIMABLE
&& _memmap[i].type != MEMMAP_KERNEL_AND_MODULES) {
continue;
}

uint64_t base = _memmap[i].base;
uint64_t length = _memmap[i].length;
uint64_t top = base + length;
Expand All @@ -100,6 +119,7 @@ static pagemap_t build_pagemap(int paging_mode, bool nx, struct elf_range *range

for (uint64_t j = 0; j < aligned_length; j += 0x40000000) {
uint64_t page = aligned_base + j;
map_page(pagemap, page, page, VMM_FLAG_WRITE, Size1GiB);
map_page(pagemap, direct_map_offset + page, page, VMM_FLAG_WRITE, Size1GiB);
}
}
Expand All @@ -120,17 +140,11 @@ static pagemap_t build_pagemap(int paging_mode, bool nx, struct elf_range *range

for (uint64_t j = 0; j < aligned_length; j += 0x1000) {
uint64_t page = aligned_base + j;
map_page(pagemap, page, page, VMM_FLAG_WRITE | VMM_FLAG_FB, Size4KiB);
map_page(pagemap, direct_map_offset + page, page, VMM_FLAG_WRITE | VMM_FLAG_FB, Size4KiB);
}
}

// XXX we do this as a quick and dirty way to switch to the higher half
#if defined (__x86_64__) || defined (__i386__)
for (uint64_t i = 0; i < 0x100000000; i += 0x40000000) {
map_page(pagemap, i, i, VMM_FLAG_WRITE, Size1GiB);
}
#endif

return pagemap;
}

Expand Down Expand Up @@ -944,10 +958,9 @@ FEAT_START
uint64_t bsp_mpidr;

smp_info = init_smp(&cpu_count, &bsp_mpidr,
pagemap, LIMINE_MAIR(fb_attr), LIMINE_TCR(tsz, pa), LIMINE_SCTLR,
direct_map_offset);
pagemap, LIMINE_MAIR(fb_attr), LIMINE_TCR(tsz, pa), LIMINE_SCTLR);
#elif defined (__riscv64)
smp_info = init_smp(&cpu_count, pagemap, direct_map_offset);
smp_info = init_smp(&cpu_count, pagemap);
#else
#error Unknown architecture
#endif
Expand Down Expand Up @@ -1081,26 +1094,24 @@ FEAT_END

uint64_t reported_stack = reported_addr(stack);

common_spinup(limine_spinup_32, 10,
common_spinup(limine_spinup_32, 8,
paging_mode, (uint32_t)(uintptr_t)pagemap.top_level,
(uint32_t)entry_point, (uint32_t)(entry_point >> 32),
(uint32_t)reported_stack, (uint32_t)(reported_stack >> 32),
(uint32_t)(uintptr_t)local_gdt, nx_available,
(uint32_t)direct_map_offset, (uint32_t)(direct_map_offset >> 32));
(uint32_t)(uintptr_t)local_gdt, nx_available);
#elif defined (__aarch64__)
vmm_assert_4k_pages();

uint64_t reported_stack = reported_addr(stack);

enter_in_el1(entry_point, reported_stack, LIMINE_SCTLR, LIMINE_MAIR(fb_attr), LIMINE_TCR(tsz, pa),
(uint64_t)pagemap.top_level[0],
(uint64_t)pagemap.top_level[1],
direct_map_offset);
(uint64_t)pagemap.top_level[1], 0);
#elif defined (__riscv64)
uint64_t reported_stack = reported_addr(stack);
uint64_t satp = make_satp(pagemap.paging_mode, pagemap.top_level);

riscv_spinup(entry_point, reported_stack, satp, direct_map_offset);
riscv_spinup(entry_point, reported_stack, satp);
#else
#error Unknown architecture
#endif
Expand Down
18 changes: 0 additions & 18 deletions common/protos/limine_32.asm_x86
Original file line number Diff line number Diff line change
Expand Up @@ -67,24 +67,6 @@ bits 64
mov eax, [rsp+28] ; local_gdt
lgdt [rax]

; Jump to higher half
mov rax, qword [rsp+36]
add rsp, rax
call .p2
.p2:
add qword [rsp], .hh - .p2
add qword [rsp], rax
retq
.hh:

; Unmap lower half entirely
mov rsi, cr3
lea rdi, [rsi + rax]
mov rcx, 256
xor rax, rax
rep stosq
mov cr3, rsi

; Push fake return address
mov rsi, [rsp+20] ; stack
sub rsi, 8
Expand Down
Loading

0 comments on commit f968254

Please sign in to comment.