diff --git a/ymir/arch/x86/interrupt.zig b/ymir/arch/x86/interrupt.zig index 97c7ace..117a8e1 100644 --- a/ymir/arch/x86/interrupt.zig +++ b/ymir/arch/x86/interrupt.zig @@ -78,6 +78,8 @@ pub fn init() void { } // Detailed handling for page faults. + // TODO: For page fault, we have to allocate an interrupt stack, + // register it to the TSS, and switch to it because it can be stack overflow. registerHandler(page_fault, unhandledFaultHandler); idt.init(); diff --git a/ymir/linker.ld b/ymir/linker.ld index b5f6ad0..0d81029 100644 --- a/ymir/linker.ld +++ b/ymir/linker.ld @@ -1,24 +1,50 @@ KERNEL_VADDR_BASE = 0xFFFFFFFF80000000; KERNEL_VADDR_TEXT = 0xFFFFFFFF80100000; +STACK_SIZE = 0x5000; + +PHDRS { + text PT_LOAD; + rodata PT_LOAD; + data PT_LOAD; + bss PT_LOAD; + + __stackguard_upper PT_LOAD FLAGS(4); + __stack PT_LOAD FLAGS(6); + __stackguard_lower PT_LOAD FLAGS(4); +} + SECTIONS { . = KERNEL_VADDR_TEXT; .text ALIGN(4K) : AT (ADDR(.text) - KERNEL_VADDR_BASE) { *(.text) *(.ltext) - } + } :text .rodata ALIGN(4K) : AT (ADDR(.rodata) - KERNEL_VADDR_BASE) { *(.rodata) - } + } :rodata .data ALIGN(4K) : AT (ADDR(.data) - KERNEL_VADDR_BASE) { *(.data) - } + } :data .bss ALIGN(4K) : AT (ADDR(.bss) - KERNEL_VADDR_BASE) { *(COMMON) *(.bss) - } + } :bss + + __stackguard_upper ALIGN(4K) (NOLOAD) : AT (. - KERNEL_VADDR_BASE) { + . += 4K; + } :__stackguard_upper + + __stack ALIGN(4K) (NOLOAD) : AT (. - KERNEL_VADDR_BASE) { + . += STACK_SIZE; + } :__stack + + __stackguard_lower ALIGN(4K) (NOLOAD) : AT (. - KERNEL_VADDR_BASE) { + __stackguard_lower = .; + . += 4K; + } :__stackguard_lower } diff --git a/ymir/main.zig b/ymir/main.zig index b2c4cf9..7d0e651 100644 --- a/ymir/main.zig +++ b/ymir/main.zig @@ -19,12 +19,8 @@ const page_size = mem.page_size; pub const panic = ymir.panic.panic_fn; pub const std_options = klog.default_log_options; -/// Size in bytes pages of the kernel stack excluding the guard page. -const kstack_size = page_size * 5; -/// Kernel stack. -/// The first page is used as a guard page. -/// TODO: make the guard page read-only. -var kstack: [kstack_size + page_size]u8 align(page_size) = [_]u8{0} ** (kstack_size + page_size); +/// Guard page placed below the kernel stack. +extern const __stackguard_lower: [*]const u8; /// Kernel entry point called by surtr. /// The function switches stack from the surtr stack to the kernel stack. @@ -33,7 +29,7 @@ export fn kernelEntry() callconv(.Naked) noreturn { \\movq %[new_stack], %%rsp \\call kernelTrampoline : - : [new_stack] "r" (@intFromPtr(&kstack) + kstack_size + page_size), + : [new_stack] "r" (@intFromPtr(&__stackguard_lower) - 0x10), ); }