From 25edf427d46ecbb5a84530383dc9c9a6e763a168 Mon Sep 17 00:00:00 2001 From: Daniel Bittman Date: Fri, 19 Apr 2024 15:58:34 -0700 Subject: [PATCH] Run rustfmt. --- src/bin/netmgr/src/link/ethernet.rs | 12 +- src/bin/netmgr/src/link/mod.rs | 5 +- src/bin/netmgr/src/network/ipv4.rs | 2 +- src/bin/netmgr/src/nics/loopback.rs | 6 +- src/bin/netmgr/src/nics/mod.rs | 6 +- src/bin/netmgr/src/route.rs | 1 + src/bin/netmgr/src/transport/icmp.rs | 3 +- src/bin/netmgr/src/transport/tcp.rs | 1 + src/bin/netmgr/src/transport/udp.rs | 1 + src/bin/pager/src/main.rs | 9 +- src/bin/pager/src/nvme/controller.rs | 11 +- src/bin/pager/src/store.rs | 3 +- src/kernel/macros/src/lib.rs | 9 +- src/kernel/src/arch/aarch64/address.rs | 41 +- src/kernel/src/arch/aarch64/cntp.rs | 14 +- src/kernel/src/arch/aarch64/context.rs | 7 +- src/kernel/src/arch/aarch64/exception.rs | 7 +- src/kernel/src/arch/aarch64/image.rs | 1 - src/kernel/src/arch/aarch64/interrupt.rs | 45 +- src/kernel/src/arch/aarch64/memory.rs | 1 - src/kernel/src/arch/aarch64/memory/frame.rs | 4 +- src/kernel/src/arch/aarch64/memory/mmio.rs | 21 +- .../src/arch/aarch64/memory/pagetables.rs | 3 +- .../aarch64/memory/pagetables/consistency.rs | 23 +- .../arch/aarch64/memory/pagetables/entry.rs | 3 +- .../arch/aarch64/memory/pagetables/mair.rs | 156 +- .../arch/aarch64/memory/pagetables/table.rs | 10 +- src/kernel/src/arch/aarch64/mod.rs | 27 +- src/kernel/src/arch/aarch64/processor.rs | 19 +- src/kernel/src/arch/aarch64/start.rs | 98 +- src/kernel/src/arch/aarch64/syscall.rs | 18 +- src/kernel/src/arch/aarch64/thread.rs | 30 +- src/kernel/src/arch/amd64/acpi.rs | 4 +- src/kernel/src/arch/amd64/address.rs | 28 +- src/kernel/src/arch/amd64/apic/ipi.rs | 3 +- src/kernel/src/arch/amd64/apic/local.rs | 3 +- src/kernel/src/arch/amd64/context.rs | 3 +- src/kernel/src/arch/amd64/gdt.rs | 19 +- src/kernel/src/arch/amd64/image.rs | 1 - src/kernel/src/arch/amd64/interrupt.rs | 11 +- src/kernel/src/arch/amd64/ioapic.rs | 6 +- src/kernel/src/arch/amd64/memory.rs | 22 +- .../amd64/memory/pagetables/consistency.rs | 27 +- .../src/arch/amd64/memory/pagetables/table.rs | 11 +- src/kernel/src/arch/amd64/mod.rs | 192 +-- src/kernel/src/arch/amd64/processor.rs | 17 +- src/kernel/src/arch/amd64/start.rs | 9 +- src/kernel/src/arch/amd64/syscall.rs | 17 +- src/kernel/src/arch/amd64/thread.rs | 16 +- src/kernel/src/arch/amd64/tsc.rs | 4 +- src/kernel/src/arch/mod.rs | 22 +- src/kernel/src/clock.rs | 12 +- src/kernel/src/condvar.rs | 5 +- src/kernel/src/device.rs | 2 +- src/kernel/src/idcounter.rs | 10 +- src/kernel/src/image.rs | 26 +- src/kernel/src/initrd.rs | 14 +- src/kernel/src/interrupt.rs | 1 + src/kernel/src/log.rs | 776 +++++----- src/kernel/src/machine/arm/common/boot/mod.rs | 20 +- .../src/machine/arm/common/boot/psci.rs | 36 +- .../src/machine/arm/common/gicv2/gicc.rs | 8 +- .../src/machine/arm/common/gicv2/gicd.rs | 38 +- .../src/machine/arm/common/gicv2/mod.rs | 11 +- src/kernel/src/machine/arm/common/mmio.rs | 4 +- src/kernel/src/machine/arm/common/uart.rs | 38 +- src/kernel/src/machine/arm/virt/info.rs | 36 +- src/kernel/src/machine/arm/virt/interrupt.rs | 4 +- src/kernel/src/machine/arm/virt/memory.rs | 20 +- src/kernel/src/machine/arm/virt/mod.rs | 2 +- src/kernel/src/machine/arm/virt/processor.rs | 7 +- src/kernel/src/machine/arm/virt/serial.rs | 23 +- src/kernel/src/machine/mod.rs | 62 +- src/kernel/src/machine/pc/mod.rs | 14 +- src/kernel/src/machine/pc/pcie.rs | 40 +- src/kernel/src/machine/pc/serial.rs | 451 +++--- src/kernel/src/machine/time.rs | 2 +- src/kernel/src/memory/allocator.rs | 296 ++-- src/kernel/src/memory/context.rs | 81 +- src/kernel/src/memory/context/virtmem.rs | 35 +- src/kernel/src/memory/frame.rs | 1252 +++++++++-------- src/kernel/src/memory/mod.rs | 94 +- src/kernel/src/memory/pagetables.rs | 6 +- .../src/memory/pagetables/consistency.rs | 3 +- src/kernel/src/memory/pagetables/cursor.rs | 6 +- src/kernel/src/memory/pagetables/mapper.rs | 30 +- .../src/memory/pagetables/phys_provider.rs | 3 +- src/kernel/src/memory/pagetables/reader.rs | 23 +- src/kernel/src/memory/pagetables/table.rs | 3 +- src/kernel/src/mutex.rs | 9 +- src/kernel/src/obj/copy.rs | 102 +- src/kernel/src/obj/mod.rs | 16 +- src/kernel/src/obj/pages.rs | 21 +- src/kernel/src/obj/pagevec.rs | 3 +- src/kernel/src/obj/range.rs | 12 +- src/kernel/src/obj/thread_sync.rs | 7 +- src/kernel/src/once.rs | 3 +- src/kernel/src/operations.rs | 1 + src/kernel/src/panic.rs | 310 ++-- src/kernel/src/processor.rs | 1163 ++++++++------- src/kernel/src/queue.rs | 2 +- src/kernel/src/sched.rs | 6 +- src/kernel/src/security.rs | 9 +- src/kernel/src/syscall/mod.rs | 13 +- src/kernel/src/syscall/object.rs | 1 + src/kernel/src/syscall/sync.rs | 2 +- src/kernel/src/thread.rs | 11 +- src/kernel/src/thread/entry.rs | 24 +- src/kernel/src/thread/suspend.rs | 34 +- src/kernel/src/userinit.rs | 1 + src/kernel/src/utils.rs | 158 +-- src/lib/twizzler-abi/src/aux.rs | 3 +- src/lib/twizzler-abi/src/device/mod.rs | 6 +- src/lib/twizzler-abi/src/marker.rs | 3 +- src/lib/twizzler-abi/src/meta.rs | 7 +- src/lib/twizzler-abi/src/object.rs | 3 +- src/lib/twizzler-abi/src/runtime.rs | 13 +- src/lib/twizzler-abi/src/runtime/alloc.rs | 7 +- src/lib/twizzler-abi/src/runtime/core.rs | 9 +- src/lib/twizzler-abi/src/runtime/debug.rs | 6 +- src/lib/twizzler-abi/src/runtime/idcounter.rs | 6 +- src/lib/twizzler-abi/src/runtime/load_elf.rs | 9 +- src/lib/twizzler-abi/src/runtime/object.rs | 7 +- .../twizzler-abi/src/runtime/object/handle.rs | 2 +- .../twizzler-abi/src/runtime/object/slot.rs | 14 +- .../twizzler-abi/src/runtime/simple_mutex.rs | 10 +- src/lib/twizzler-abi/src/runtime/stdio.rs | 3 +- src/lib/twizzler-abi/src/runtime/thread.rs | 14 +- src/lib/twizzler-abi/src/runtime/time.rs | 3 +- src/lib/twizzler-abi/src/syscall/console.rs | 3 +- src/lib/twizzler-abi/src/syscall/create.rs | 5 +- src/lib/twizzler-abi/src/syscall/handle.rs | 3 +- src/lib/twizzler-abi/src/syscall/info.rs | 3 +- src/lib/twizzler-abi/src/syscall/kaction.rs | 3 +- src/lib/twizzler-abi/src/syscall/map.rs | 3 +- src/lib/twizzler-abi/src/syscall/mod.rs | 3 +- .../src/syscall/object_control.rs | 3 +- .../twizzler-abi/src/syscall/object_stat.rs | 3 +- src/lib/twizzler-abi/src/syscall/security.rs | 3 +- src/lib/twizzler-abi/src/syscall/spawn.rs | 14 +- .../src/syscall/thread_control.rs | 17 +- .../twizzler-abi/src/syscall/thread_sync.rs | 23 +- .../twizzler-abi/src/syscall/time/clock.rs | 28 +- src/lib/twizzler-abi/src/syscall/time/mod.rs | 16 +- src/lib/twizzler-abi/src/thread.rs | 7 +- src/lib/twizzler-async/src/async_source.rs | 13 +- src/lib/twizzler-async/src/block_on.rs | 8 +- src/lib/twizzler-async/src/future.rs | 9 +- src/lib/twizzler-async/src/lib.rs | 16 +- src/lib/twizzler-async/src/run.rs | 10 +- src/lib/twizzler-async/src/task.rs | 15 +- src/lib/twizzler-async/src/thread_local.rs | 5 +- src/lib/twizzler-async/src/throttle.rs | 3 +- src/lib/twizzler-driver/src/device/mod.rs | 11 +- src/lib/twizzler-driver/src/dma/mod.rs | 9 +- src/lib/twizzler-driver/src/dma/object.rs | 3 +- src/lib/twizzler-driver/src/dma/pool.rs | 11 +- src/lib/twizzler-driver/src/dma/region.rs | 19 +- .../twizzler-driver/src/request/inflight.rs | 9 +- src/lib/twizzler-driver/src/request/mod.rs | 9 +- .../twizzler-driver/src/request/requester.rs | 10 +- src/lib/twizzler-net/src/lib.rs | 13 +- src/lib/twizzler-net/src/nm_handle.rs | 5 +- src/lib/twizzler-object/src/init.rs | 2 +- src/lib/twizzler-queue-raw/src/lib.rs | 22 +- src/lib/twizzler-queue/src/lib.rs | 3 +- src/lib/twizzler-queue/src/queue.rs | 19 +- src/lib/twizzler-runtime-api/src/lib.rs | 80 +- src/lib/twizzler-runtime-api/src/rt0.rs | 3 +- src/runtime/dynlink/src/arch/aarch64.rs | 16 +- src/runtime/dynlink/src/arch/x86_64.rs | 16 +- src/runtime/dynlink/src/compartment.rs | 2 +- src/runtime/dynlink/src/compartment/tls.rs | 3 +- src/runtime/dynlink/src/context.rs | 27 +- src/runtime/dynlink/src/context/deps.rs | 6 +- src/runtime/dynlink/src/context/load.rs | 34 +- src/runtime/dynlink/src/context/relocate.rs | 28 +- src/runtime/dynlink/src/context/runtime.rs | 3 +- src/runtime/dynlink/src/context/syms.rs | 8 +- src/runtime/dynlink/src/lib.rs | 114 +- src/runtime/dynlink/src/library.rs | 8 +- src/runtime/dynlink/src/symbol.rs | 7 +- src/runtime/dynlink/src/tls.rs | 12 +- src/runtime/monitor-api/src/lib.rs | 17 +- src/runtime/monitor/secapi/gates.rs | 3 +- src/runtime/monitor/src/thread.rs | 3 +- src/runtime/secgate/src/lib.rs | 21 +- src/runtime/twz-rt/src/lib.rs | 4 +- src/runtime/twz-rt/src/runtime.rs | 10 +- src/runtime/twz-rt/src/runtime/alloc.rs | 29 +- src/runtime/twz-rt/src/runtime/core.rs | 3 +- src/runtime/twz-rt/src/runtime/debug.rs | 3 +- src/runtime/twz-rt/src/runtime/object.rs | 3 +- src/runtime/twz-rt/src/runtime/slot.rs | 52 +- src/runtime/twz-rt/src/runtime/stdio.rs | 10 +- src/runtime/twz-rt/src/runtime/thread.rs | 4 +- .../twz-rt/src/runtime/thread/internal.rs | 3 +- src/runtime/twz-rt/src/runtime/thread/mgr.rs | 7 +- src/runtime/twz-rt/src/runtime/thread/tcb.rs | 3 +- tools/image_builder/src/main.rs | 51 +- tools/initrd_gen/src/main.rs | 7 +- tools/xtask/src/toolchain.rs | 3 +- 202 files changed, 3809 insertions(+), 3694 deletions(-) diff --git a/src/bin/netmgr/src/link/ethernet.rs b/src/bin/netmgr/src/link/ethernet.rs index 149765dc..fc8cd86a 100644 --- a/src/bin/netmgr/src/link/ethernet.rs +++ b/src/bin/netmgr/src/link/ethernet.rs @@ -1,12 +1,12 @@ use std::sync::Arc; -use byteorder::ByteOrder; -use byteorder::NetworkEndian; +use byteorder::{ByteOrder, NetworkEndian}; -use crate::header::Header; -use crate::link::nic::NicBuffer; -use crate::link::IncomingPacketInfo; -use crate::network::ipv4::handle_incoming_ipv4_packet; +use crate::{ + header::Header, + link::{nic::NicBuffer, IncomingPacketInfo}, + network::ipv4::handle_incoming_ipv4_packet, +}; #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug, Default)] #[repr(C)] diff --git a/src/bin/netmgr/src/link/mod.rs b/src/bin/netmgr/src/link/mod.rs index d9677456..8a2be7d8 100644 --- a/src/bin/netmgr/src/link/mod.rs +++ b/src/bin/netmgr/src/link/mod.rs @@ -1,8 +1,7 @@ use std::sync::Arc; -use crate::header::Header; - use self::nic::NicBuffer; +use crate::header::Header; pub mod ethernet; pub mod nic; @@ -56,7 +55,7 @@ impl IncomingPacketInfo { Some(self) } -#[allow(dead_code)] + #[allow(dead_code)] pub fn packet_len(&self) -> usize { self.buffer.packet_len() } diff --git a/src/bin/netmgr/src/network/ipv4.rs b/src/bin/netmgr/src/network/ipv4.rs index 4ee10dcc..bde73f48 100644 --- a/src/bin/netmgr/src/network/ipv4.rs +++ b/src/bin/netmgr/src/network/ipv4.rs @@ -5,9 +5,9 @@ use twizzler_net::addr::{Ipv4Addr, ServiceAddr}; use crate::{ header::Header, - link::nic::{NicBuffer, SendableBuffer}, link::{ ethernet::{EtherType, EthernetAddr, EthernetHeader}, + nic::{NicBuffer, SendableBuffer}, IncomingPacketInfo, }, transport::handle_packet, diff --git a/src/bin/netmgr/src/nics/loopback.rs b/src/bin/netmgr/src/nics/loopback.rs index 16c9e7fc..5e0d786c 100644 --- a/src/bin/netmgr/src/nics/loopback.rs +++ b/src/bin/netmgr/src/nics/loopback.rs @@ -5,9 +5,9 @@ use std::{ use twizzler_async::FlagBlock; -use crate::{ - link::ethernet::{EthernetAddr, EthernetError}, - link::nic::{NetworkInterface, NicBuffer, SendableBuffer}, +use crate::link::{ + ethernet::{EthernetAddr, EthernetError}, + nic::{NetworkInterface, NicBuffer, SendableBuffer}, }; struct LoopbackInner { diff --git a/src/bin/netmgr/src/nics/mod.rs b/src/bin/netmgr/src/nics/mod.rs index d44c0a94..42e156ce 100644 --- a/src/bin/netmgr/src/nics/mod.rs +++ b/src/bin/netmgr/src/nics/mod.rs @@ -5,9 +5,9 @@ use std::{ use twizzler_async::Task; -use crate::{ - link::ethernet::{handle_incoming_ethernet_packets, EthernetAddr}, - link::nic::NetworkInterface, +use crate::link::{ + ethernet::{handle_incoming_ethernet_packets, EthernetAddr}, + nic::NetworkInterface, }; mod loopback; diff --git a/src/bin/netmgr/src/route.rs b/src/bin/netmgr/src/route.rs index e69de29b..8b137891 100644 --- a/src/bin/netmgr/src/route.rs +++ b/src/bin/netmgr/src/route.rs @@ -0,0 +1 @@ + diff --git a/src/bin/netmgr/src/transport/icmp.rs b/src/bin/netmgr/src/transport/icmp.rs index fde8e45b..d1e61b34 100644 --- a/src/bin/netmgr/src/transport/icmp.rs +++ b/src/bin/netmgr/src/transport/icmp.rs @@ -6,6 +6,7 @@ use twizzler_net::{ ListenFlags, PacketData, RxRequest, TxCompletion, }; +use super::TransportProto; use crate::{ endpoint::{foreach_endpoint, EndPointKey}, header::Header, @@ -17,8 +18,6 @@ use crate::{ HandleRef, }; -use super::TransportProto; - pub struct Icmp; #[async_trait::async_trait] diff --git a/src/bin/netmgr/src/transport/tcp.rs b/src/bin/netmgr/src/transport/tcp.rs index e69de29b..8b137891 100644 --- a/src/bin/netmgr/src/transport/tcp.rs +++ b/src/bin/netmgr/src/transport/tcp.rs @@ -0,0 +1 @@ + diff --git a/src/bin/netmgr/src/transport/udp.rs b/src/bin/netmgr/src/transport/udp.rs index e69de29b..8b137891 100644 --- a/src/bin/netmgr/src/transport/udp.rs +++ b/src/bin/netmgr/src/transport/udp.rs @@ -0,0 +1 @@ + diff --git a/src/bin/pager/src/main.rs b/src/bin/pager/src/main.rs index 5fba9768..9b3b922d 100644 --- a/src/bin/pager/src/main.rs +++ b/src/bin/pager/src/main.rs @@ -1,15 +1,12 @@ -use std::time::Duration; +use std::{collections::BTreeMap, time::Duration}; +use tickv::{success_codes::SuccessCode, ErrorCode}; use twizzler_abi::pager::{ CompletionToKernel, CompletionToPager, KernelCompletionData, RequestFromKernel, RequestFromPager, }; use twizzler_object::{ObjID, Object, ObjectInitFlags, Protections}; -use std::collections::BTreeMap; - -use tickv::{success_codes::SuccessCode, ErrorCode}; - use crate::store::{Key, KeyValueStore}; mod nvme; @@ -117,7 +114,7 @@ impl<'a> Tester<'a> { let _ = self.get(k); let num = quick_random() % 3; if num == 0 || num == 2 { - let _ = self.put(k, Foo { x: x }); + let _ = self.put(k, Foo { x }); } else if num == 1 { let _ = self.del(k); } diff --git a/src/bin/pager/src/nvme/controller.rs b/src/bin/pager/src/nvme/controller.rs index 63587e07..517c9a3c 100644 --- a/src/bin/pager/src/nvme/controller.rs +++ b/src/bin/pager/src/nvme/controller.rs @@ -4,7 +4,7 @@ use std::{ }; use nvme::{ - admin::CreateIOCompletionQueue, + admin::{CreateIOCompletionQueue, CreateIOSubmissionQueue}, ds::{ controller::properties::config::ControllerConfig, identify::controller::IdentifyControllerDataStructure, @@ -13,10 +13,9 @@ use nvme::{ comentry::CommonCompletion, subentry::CommonCommand, CommandId, QueueId, QueuePriority, }, }, - hosted::memory::PrpMode, + hosted::memory::{PhysicalPageCollection, PrpMode}, nvm::{ReadDword13, WriteDword13}, }; -use nvme::{admin::CreateIOSubmissionQueue, hosted::memory::PhysicalPageCollection}; use twizzler_async::Task; use twizzler_driver::{ dma::{DmaOptions, DmaPool, DMA_PAGE_SIZE}, @@ -25,9 +24,8 @@ use twizzler_driver::{ }; use volatile::map_field; -use crate::nvme::dma::NvmeDmaSliceRegion; - use super::{dma::NvmeDmaRegion, requester::NvmeRequester}; +use crate::nvme::dma::NvmeDmaSliceRegion; pub struct NvmeController { requester: RwLock>>, @@ -239,7 +237,8 @@ impl NvmeController { .unwrap(); { - // TODO: we should save these NvmeDmaRegions so they don't drop (dropping is okay, but this leaks memory ) + // TODO: we should save these NvmeDmaRegions so they don't drop (dropping is okay, but + // this leaks memory ) let cmd = CreateIOCompletionQueue::new( CommandId::new(), cqid, diff --git a/src/bin/pager/src/store.rs b/src/bin/pager/src/store.rs index 31319e01..480c4299 100644 --- a/src/bin/pager/src/store.rs +++ b/src/bin/pager/src/store.rs @@ -69,7 +69,8 @@ pub fn hasher(t: &T) -> u64 { let mut h = DefaultHasher::new(); t.hash(&mut h); let x = h.finish(); - // Don't ever hash to 0, 1, MAX, or MAX-1. Makes the open addressing easier, and 0 and MAX-1 are required for tickv. + // Don't ever hash to 0, 1, MAX, or MAX-1. Makes the open addressing easier, and 0 and MAX-1 are + // required for tickv. match x { 0 => 2, u64::MAX => u64::MAX - 2, diff --git a/src/kernel/macros/src/lib.rs b/src/kernel/macros/src/lib.rs index 492fdbdb..e5af778e 100644 --- a/src/kernel/macros/src/lib.rs +++ b/src/kernel/macros/src/lib.rs @@ -1,13 +1,12 @@ #![feature(extend_one)] -use proc_macro::TokenStream; -use proc_macro::TokenTree; +use proc_macro::{TokenStream, TokenTree}; extern crate proc_macro; #[proc_macro_attribute] -// Okay, look, I know what you're gonna say. Why do we need to get this complicated just to do tests. -// The answer is names. See, our friends in the rust community have not fully implemented this issue: -// https://github.com/rust-lang/rust/issues/50297. Until this is implemented, I don't know how to grab +// Okay, look, I know what you're gonna say. Why do we need to get this complicated just to do +// tests. The answer is names. See, our friends in the rust community have not fully implemented +// this issue: https://github.com/rust-lang/rust/issues/50297. Until this is implemented, I don't know how to grab // name from a test function in a way that makes the test _runner_ know the names of the tests it's // running. So we just embed the name ourselves using #[test_case]. pub fn kernel_test(_attr: TokenStream, items: TokenStream) -> TokenStream { diff --git a/src/kernel/src/arch/aarch64/address.rs b/src/kernel/src/arch/aarch64/address.rs index ab185104..b04061b1 100644 --- a/src/kernel/src/arch/aarch64/address.rs +++ b/src/kernel/src/arch/aarch64/address.rs @@ -3,15 +3,16 @@ /// https://developer.arm.com/documentation/100940/0101/?lang=en /// and the Arm Architecture Reference Manual for A-profile architecture /// https://developer.arm.com/documentation/ddi0487/latest - -use core::{fmt::LowerHex, ops::{Sub, RangeInclusive}}; +use core::{ + fmt::LowerHex, + ops::{RangeInclusive, Sub}, +}; use arm64::registers::ID_AA64MMFR0_EL1; use registers::interfaces::Readable; -use crate::once::Once; - use super::memory::phys_to_virt; +use crate::once::Once; /// A representation of a canonical virtual address. #[derive(Clone, Copy, PartialEq, PartialOrd, Ord, Eq)] @@ -32,7 +33,7 @@ impl VirtAddr { /// The start of the kernel object mapping. const KOBJ_START: Self = Self(0xFFFF_F000_0000_0000); - + // TTBR0_EL1 points to a page table root for addresses ranging from // 0x0 to 0x0000_FFFF_FFFF_FFFF. Generally this is used to cover // user accessible memory (EL0). @@ -40,18 +41,18 @@ impl VirtAddr { // The start range of valid addresses that TTBR0 covers 0x0000_0000_0000_0000, // The end range of valid addresses that TTBR0 covers - 0x0000_FFFF_FFFF_FFFF + 0x0000_FFFF_FFFF_FFFF, ); // TTBR1_EL1 -> a pt root for addresses ranging from // 0xFFFF_FFFF_FFFF_FFFF to 0xFFFF_0000_0000_0000 - // Generally this is used to cover exclusively + // Generally this is used to cover exclusively // kernel accessible memory (EL1). const TTBR1_EL1: RangeInclusive = RangeInclusive::new( // The start range of valid addresses that TTBR1 covers 0xFFFF_0000_0000_0000, // The end range of valid addresses that TTBR1 covers - 0xFFFF_FFFF_FFFF_FFFF + 0xFFFF_FFFF_FFFF_FFFF, ); // The size of the virtual address range reserved for MMIO. @@ -64,7 +65,7 @@ impl VirtAddr { // The start range of addresses used for MMIO *Self::TTBR1_EL1.start(), // The end range of addresses used for MMIO - *Self::TTBR1_EL1.start() + Self::MMIO_RANGE_SIZE + *Self::TTBR1_EL1.start() + Self::MMIO_RANGE_SIZE, ); /// The bits that are valid which are used in address translation @@ -73,11 +74,11 @@ impl VirtAddr { const VALID_HIGH_ADDRESS: u64 = 0xFFFF; /// The valid value for the upper bits of a low address const VALID_LOW_ADDRESS: u64 = 0x0; - + pub const fn start_kernel_memory() -> Self { Self(*Self::TTBR1_EL1.start()) } - + pub const fn start_kernel_object_memory() -> Self { Self::KOBJ_START } @@ -98,22 +99,21 @@ impl VirtAddr { Self(*Self::TTBR0_EL1.end()) } - /// Construct a new virtual address from the provided addr value, only if the provided value is a valid, canonical - /// address. If not, returns Err. + /// Construct a new virtual address from the provided addr value, only if the provided value is + /// a valid, canonical address. If not, returns Err. pub const fn new(addr: u64) -> Result { - // The most significant 16 bits of an address must be 0xFFFF or 0x0000. + // The most significant 16 bits of an address must be 0xFFFF or 0x0000. // Any attempt to use a different bit value triggers a fault. // For now we assume that virtual address tagging is disabled. - let top_two_bytes = addr - .checked_shr(Self::VALID_ADDR_BITS) - .unwrap(); + let top_two_bytes = addr.checked_shr(Self::VALID_ADDR_BITS).unwrap(); match top_two_bytes { Self::VALID_HIGH_ADDRESS | Self::VALID_LOW_ADDRESS => Ok(Self(addr)), _ => Err(NonCanonical), } } - /// Construct a new virtual address from a u64 without verifying that it is a valid virtual address. + /// Construct a new virtual address from a u64 without verifying that it is a valid virtual + /// address. /// /// # Safety /// The provided address must be canonical. @@ -239,7 +239,7 @@ impl PhysAddr { Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_44) => 44, Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_48) => 48, Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_52) => 52, - _ => unimplemented!("unknown PA size") + _ => unimplemented!("unknown PA size"), } }) } @@ -253,7 +253,8 @@ impl PhysAddr { } } - /// Construct a new physical address from a u64 without verifying that it is a valid physical address. + /// Construct a new physical address from a u64 without verifying that it is a valid physical + /// address. /// /// # Safety /// The provided address must be a valid address. diff --git a/src/kernel/src/arch/aarch64/cntp.rs b/src/kernel/src/arch/aarch64/cntp.rs index e44d67f7..4be5bc98 100644 --- a/src/kernel/src/arch/aarch64/cntp.rs +++ b/src/kernel/src/arch/aarch64/cntp.rs @@ -1,14 +1,12 @@ /// The `ClockHardware` interface for the CNTP_EL0 timer /// This timer is local to a single core, and timestamps /// are synchronized to a global system timer count - use arm64::registers::{CNTFRQ_EL0, CNTPCT_EL0, CNTP_CTL_EL0, CNTP_TVAL_EL0}; -use registers::interfaces::{Readable, Writeable, ReadWriteable}; +use registers::interfaces::{ReadWriteable, Readable, Writeable}; +use twizzler_abi::syscall::{ClockFlags, ClockInfo, FemtoSeconds, TimeSpan, FEMTOS_PER_SEC}; use crate::time::{ClockHardware, Ticks}; -use twizzler_abi::syscall::{ClockFlags, ClockInfo, FemtoSeconds, TimeSpan, FEMTOS_PER_SEC}; - /// The Non-secure physical timer `CNTP` for EL0. pub struct PhysicalTimer { info: ClockInfo, @@ -44,7 +42,7 @@ impl PhysicalTimer { // round up // ticks = time / rate => span as femtos / rate (in femtos) - let ticks = span.as_femtos() / self.info.resolution().0 as u128; + let ticks = span.as_femtos() / self.info.resolution().0 as u128; // configure the timer to fire after a certain amount of ticks have passed // @@ -53,9 +51,7 @@ impl PhysicalTimer { CNTP_TVAL_EL0.set(ticks as u64); // clear the interrupt mask and enable the timer - CNTP_CTL_EL0.modify( - CNTP_CTL_EL0::IMASK::CLEAR + CNTP_CTL_EL0::ENABLE::SET - ); + CNTP_CTL_EL0.modify(CNTP_CTL_EL0::IMASK::CLEAR + CNTP_CTL_EL0::ENABLE::SET); } } @@ -81,7 +77,7 @@ pub fn cntp_interrupt_handler() { // handle the timer interrupt by advancing the scheduler ticks crate::clock::oneshot_clock_hardtick(); - // Disable the timer to clear the interrupt. Software must clear + // Disable the timer to clear the interrupt. Software must clear // the interrupt before deactivating the interrupt in the // interrupt controller, otherwise it will keep firing. // diff --git a/src/kernel/src/arch/aarch64/context.rs b/src/kernel/src/arch/aarch64/context.rs index 784fdd40..2ecd74bf 100644 --- a/src/kernel/src/arch/aarch64/context.rs +++ b/src/kernel/src/arch/aarch64/context.rs @@ -88,10 +88,11 @@ impl ArchContext { /// Switch to a target context. /// /// # Safety - /// This function must be called with a target that comes from an ArchContext that lives long enough. + /// This function must be called with a target that comes from an ArchContext that lives long + /// enough. pub unsafe fn switch_to_target(tgt: &ArchContextTarget) { - // TODO: If the incoming target is already the current user table, this should be a no-op. Also, we don't - // need to set the kernel tables each time. + // TODO: If the incoming target is already the current user table, this should be a no-op. + // Also, we don't need to set the kernel tables each time. // write TTBR1 TTBR1_EL1.set_baddr(KERNEL_TABLE_ADDR.raw()); // write TTBR0 diff --git a/src/kernel/src/arch/aarch64/exception.rs b/src/kernel/src/arch/aarch64/exception.rs index 3e8aa9e2..6653ada5 100644 --- a/src/kernel/src/arch/aarch64/exception.rs +++ b/src/kernel/src/arch/aarch64/exception.rs @@ -12,9 +12,10 @@ use registers::{ interfaces::{Readable, Writeable}, registers::InMemoryRegister, }; - -use twizzler_abi::arch::syscall::SYSCALL_MAGIC; -use twizzler_abi::upcall::{MemoryAccessKind, UpcallFrame}; +use twizzler_abi::{ + arch::syscall::SYSCALL_MAGIC, + upcall::{MemoryAccessKind, UpcallFrame}, +}; use super::thread::UpcallAble; use crate::memory::{context::virtmem::PageFaultFlags, VirtAddr}; diff --git a/src/kernel/src/arch/aarch64/image.rs b/src/kernel/src/arch/aarch64/image.rs index 64663727..783f7101 100644 --- a/src/kernel/src/arch/aarch64/image.rs +++ b/src/kernel/src/arch/aarch64/image.rs @@ -1,5 +1,4 @@ /// TLS initialization for the kernel image - use crate::{ image::{TlsInfo, TlsVariant}, memory::VirtAddr, diff --git a/src/kernel/src/arch/aarch64/interrupt.rs b/src/kernel/src/arch/aarch64/interrupt.rs index cf8eff77..39d7c3b2 100644 --- a/src/kernel/src/arch/aarch64/interrupt.rs +++ b/src/kernel/src/arch/aarch64/interrupt.rs @@ -3,25 +3,22 @@ /// External interrupt sources, or simply interrupts in /// general orignate from a device or another processor /// which can be routed by an interrupt controller - use arm64::registers::DAIF; use registers::interfaces::Readable; +use twizzler_abi::kso::{InterruptAllocateOptions, InterruptPriority}; -use twizzler_abi::{ - kso::{InterruptAllocateOptions, InterruptPriority}, +use super::{ + cntp::{cntp_interrupt_handler, PhysicalTimer}, + exception::{exception_handler, restore_stack_pointer, save_stack_pointer, ExceptionContext}, }; - use crate::{ - interrupt::{DynamicInterrupt, Destination, TriggerMode, PinPolarity}, + interrupt::{Destination, DynamicInterrupt, PinPolarity, TriggerMode}, + machine::{ + interrupt::INTERRUPT_CONTROLLER, + serial::{serial_interrupt_handler, SERIAL_INT_ID}, + }, processor::current_processor, }; -use crate::machine::interrupt::INTERRUPT_CONTROLLER; -use crate::machine::serial::{SERIAL_INT_ID, serial_interrupt_handler}; - -use super::exception::{ - ExceptionContext, exception_handler, save_stack_pointer, restore_stack_pointer, -}; -use super::cntp::{PhysicalTimer, cntp_interrupt_handler}; // interrupt vector table size/num vectors pub const GENERIC_IPI_VECTOR: u32 = 0; // Used for IPI @@ -133,17 +130,17 @@ pub(super) fn irq_exception_handler(_ctx: &mut ExceptionContext) { // Get pending IRQ number from GIC CPU Interface. // Doing so acknowledges the pending interrupt. let irq_number = INTERRUPT_CONTROLLER.pending_interrupt(); - + match irq_number { PhysicalTimer::INTERRUPT_ID => { // call timer interrupt handler cntp_interrupt_handler(); - }, + } _ if irq_number == *SERIAL_INT_ID => { // call the serial interrupt handler serial_interrupt_handler(); - }, - _ => panic!("unknown irq number! {}", irq_number) + } + _ => panic!("unknown irq number! {}", irq_number), } // signal the GIC that we have serviced the IRQ INTERRUPT_CONTROLLER.finish_active_interrupt(irq_number); @@ -167,7 +164,7 @@ pub fn allocate_interrupt_vector( todo!() } -// code for IPI signal to send +// code for IPI signal to send // needed by generic IPI code pub enum InterProcessorInterrupt { Reschedule = 0, /* TODO */ @@ -179,10 +176,13 @@ impl Drop for DynamicInterrupt { } } -pub fn init_interrupts() { +pub fn init_interrupts() { let cpu = current_processor(); - emerglogln!("[arch::interrupt] processor {} initializing interrupts", cpu.id); + emerglogln!( + "[arch::interrupt] processor {} initializing interrupts", + cpu.id + ); // initialize interrupt controller if cpu.is_bsp() { @@ -193,10 +193,7 @@ pub fn init_interrupts() { // enable this CPU to recieve interrupts from the timer // by configuring the interrupt controller to route // the timer's interrupt to us - INTERRUPT_CONTROLLER.route_interrupt( - PhysicalTimer::INTERRUPT_ID, - cpu.id, - ); + INTERRUPT_CONTROLLER.route_interrupt(PhysicalTimer::INTERRUPT_ID, cpu.id); INTERRUPT_CONTROLLER.enable_interrupt(PhysicalTimer::INTERRUPT_ID); } @@ -209,7 +206,7 @@ pub fn set_interrupt( ) { match destination { Destination::Bsp => INTERRUPT_CONTROLLER.route_interrupt(num, current_processor().bsp_id()), - _ => todo!("routing interrupt: {:?}", destination) + _ => todo!("routing interrupt: {:?}", destination), } INTERRUPT_CONTROLLER.enable_interrupt(num); } diff --git a/src/kernel/src/arch/aarch64/memory.rs b/src/kernel/src/arch/aarch64/memory.rs index 032c7d4a..8390a656 100755 --- a/src/kernel/src/arch/aarch64/memory.rs +++ b/src/kernel/src/arch/aarch64/memory.rs @@ -1,4 +1,3 @@ - use super::address::{PhysAddr, VirtAddr}; pub mod frame; diff --git a/src/kernel/src/arch/aarch64/memory/frame.rs b/src/kernel/src/arch/aarch64/memory/frame.rs index 3dd72d6d..87616602 100644 --- a/src/kernel/src/arch/aarch64/memory/frame.rs +++ b/src/kernel/src/arch/aarch64/memory/frame.rs @@ -4,10 +4,10 @@ /// In this implementation we go with 4 KiB pages. pub const FRAME_SIZE: usize = FrameSize::Size4KiB as usize; -/// The possible frame sizes for a page of memory +/// The possible frame sizes for a page of memory /// mapped by a page table. enum FrameSize { Size4KiB = 1 << 12, Size16KiB = 1 << 14, Size64KiB = 1 << 16, -} \ No newline at end of file +} diff --git a/src/kernel/src/arch/aarch64/memory/mmio.rs b/src/kernel/src/arch/aarch64/memory/mmio.rs index 387cdcee..ba9a007a 100644 --- a/src/kernel/src/arch/aarch64/memory/mmio.rs +++ b/src/kernel/src/arch/aarch64/memory/mmio.rs @@ -1,9 +1,8 @@ /// An allocator that allocates MMIO addresses from a given range. - use core::alloc::Layout; -use crate::{memory::VirtAddr, spinlock::Spinlock}; use super::frame::FRAME_SIZE; +use crate::{memory::VirtAddr, spinlock::Spinlock}; /// A simple bump allocator that does not reclaim memory. /// This intended operating mode is okay for now. Addresses @@ -30,15 +29,14 @@ impl BumpAlloc { self.start.raw() as usize + self.length } - pub fn alloc(&mut self, size: usize) -> Result { + pub fn alloc(&mut self, size: usize) -> Result { // create a layout and allocate a range of addresses // based on an aligned allocation size - let layout = Layout::from_size_align(size, FRAME_SIZE) - .expect("failed to allocate region"); + let layout = Layout::from_size_align(size, FRAME_SIZE).expect("failed to allocate region"); // reserve space for this allocation size let new_marker = self.marker.raw() as usize + layout.size(); if new_marker > self.end() { - return Err(()) + return Err(()); } else { let vaddr = self.marker; self.marker = VirtAddr::try_from(new_marker).map_err(|_| ())?; @@ -48,9 +46,8 @@ impl BumpAlloc { } pub static MMIO_ALLOCATOR: Spinlock = Spinlock::new({ - let mmio_range_start = *VirtAddr::MMIO_RANGE.start(); - let vaddr_start = unsafe { VirtAddr::new_unchecked(mmio_range_start) }; - let length = VirtAddr::MMIO_RANGE_SIZE as usize; - BumpAlloc::new(vaddr_start, length) - } -); + let mmio_range_start = *VirtAddr::MMIO_RANGE.start(); + let vaddr_start = unsafe { VirtAddr::new_unchecked(mmio_range_start) }; + let length = VirtAddr::MMIO_RANGE_SIZE as usize; + BumpAlloc::new(vaddr_start, length) +}); diff --git a/src/kernel/src/arch/aarch64/memory/pagetables.rs b/src/kernel/src/arch/aarch64/memory/pagetables.rs index 0760778a..338f920d 100644 --- a/src/kernel/src/arch/aarch64/memory/pagetables.rs +++ b/src/kernel/src/arch/aarch64/memory/pagetables.rs @@ -5,6 +5,5 @@ mod table; pub use consistency::{ArchCacheLineMgr, ArchTlbMgr}; pub use entry::{Entry, EntryFlags}; -pub use table::Table; - pub use mair::{memory_attr_manager, MemoryAttribute}; +pub use table::Table; diff --git a/src/kernel/src/arch/aarch64/memory/pagetables/consistency.rs b/src/kernel/src/arch/aarch64/memory/pagetables/consistency.rs index 6ccd755a..aa0793a6 100644 --- a/src/kernel/src/arch/aarch64/memory/pagetables/consistency.rs +++ b/src/kernel/src/arch/aarch64/memory/pagetables/consistency.rs @@ -12,9 +12,10 @@ pub struct ArchCacheLineMgr { } impl ArchCacheLineMgr { - /// Flush a given cache line when this [ArchCacheLineMgr] is dropped. Subsequent flush requests for the same cache - /// line will be batched. Flushes for different cache lines will cause older requests to flush immediately, and the - /// new request will be flushed when this object is dropped. + /// Flush a given cache line when this [ArchCacheLineMgr] is dropped. Subsequent flush requests + /// for the same cache line will be batched. Flushes for different cache lines will cause + /// older requests to flush immediately, and the new request will be flushed when this + /// object is dropped. pub fn flush(&mut self, line: VirtAddr) { // logln!("[arch::cacheln] flush called on: {:#018x}", line.raw()); let addr: u64 = line.into(); @@ -97,16 +98,16 @@ impl TlbInvData { // A queue of TLB invalidations containg the data arguments struct TlbInvQueue { data: [TlbInvData; Self::MAX_OUTSTANDING_INVALIDATIONS], - len: u8 + len: u8, } impl TlbInvQueue { const MAX_OUTSTANDING_INVALIDATIONS: usize = 16; fn new() -> Self { - Self { - data: [TlbInvData::default(); Self::MAX_OUTSTANDING_INVALIDATIONS], - len: 0 + Self { + data: [TlbInvData::default(); Self::MAX_OUTSTANDING_INVALIDATIONS], + len: 0, } } @@ -137,7 +138,7 @@ impl TlbInvQueue { /// A management object for TLB invalidations that occur during a page table operation. pub struct ArchTlbMgr { queue: TlbInvQueue, - root: PhysAddr + root: PhysAddr, } impl ArchTlbMgr { @@ -149,8 +150,8 @@ impl ArchTlbMgr { } } - /// Enqueue a new TLB invalidation. is_global should be set iff the page is global, and is_terminal should be set - /// iff the invalidation is for a leaf. + /// Enqueue a new TLB invalidation. is_global should be set iff the page is global, and + /// is_terminal should be set iff the invalidation is for a leaf. pub fn enqueue(&mut self, addr: VirtAddr, _is_global: bool, is_terminal: bool, _level: usize) { // only invalidate leaves if is_terminal { @@ -168,4 +169,4 @@ impl Drop for ArchTlbMgr { fn drop(&mut self) { self.finish(); } -} \ No newline at end of file +} diff --git a/src/kernel/src/arch/aarch64/memory/pagetables/entry.rs b/src/kernel/src/arch/aarch64/memory/pagetables/entry.rs index 85d23e6c..9d9985f6 100644 --- a/src/kernel/src/arch/aarch64/memory/pagetables/entry.rs +++ b/src/kernel/src/arch/aarch64/memory/pagetables/entry.rs @@ -1,12 +1,11 @@ use twizzler_abi::{device::CacheType, object::Protections}; +use super::mair::{memory_attr_manager, AttributeIndex}; use crate::{ arch::address::PhysAddr, memory::pagetables::{MappingFlags, MappingSettings}, }; -use super::mair::{memory_attr_manager, AttributeIndex}; - #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Ord, Eq)] #[repr(transparent)] /// The type of a single entry in a page table. diff --git a/src/kernel/src/arch/aarch64/memory/pagetables/mair.rs b/src/kernel/src/arch/aarch64/memory/pagetables/mair.rs index 82b8472a..dee3a152 100644 --- a/src/kernel/src/arch/aarch64/memory/pagetables/mair.rs +++ b/src/kernel/src/arch/aarch64/memory/pagetables/mair.rs @@ -1,16 +1,15 @@ /// An abstraction to manage state in the MAIR_EL1 system register. -/// +/// /// The MAIR_EL1 register is responsible for storing the memory /// attributes used by the page tables (cache type, device vs normal, etc.) -/// +/// /// A description of the MAIR_EL1 register can be found in section /// D17.2.97 of the "Arm Architecture Reference Manual" - use arm64::registers::MAIR_EL1; -use registers::interfaces::{Readable, Writeable}; -use registers::LocalRegisterCopy; -use registers::register_bitfields; - +use registers::{ + interfaces::{Readable, Writeable}, + register_bitfields, LocalRegisterCopy, +}; use twizzler_abi::device::CacheType; // TODO: check the bounds of this @@ -24,7 +23,7 @@ pub struct MemoryAttribute { impl MemoryAttribute { fn new(attr: u8) -> Self { Self { - attr: LocalRegisterCopy::new(attr) + attr: LocalRegisterCopy::new(attr), } } @@ -34,29 +33,28 @@ impl MemoryAttribute { Some(MEM_ATTR::Normal_Outer::Value::Device) => { // if bit 1 is not set then we have a valid device attribute self.attr.get() & 0b10 == 0 - }, + } // we have normal memory Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_Transient_WriteAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_Transient_ReadAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_Transient_ReadWriteAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_Transient_WriteAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_Transient_ReadAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_Transient_ReadWriteAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient) - | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient_WriteAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient_ReadWriteAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient) - | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient_WriteAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient_ReadAlloc) - => { - // unpredictable if lower bits are not 0 (WriteThrough_Transient) - match self.attr.read_as_enum(MEM_ATTR::Normal_Inner) { - Some(MEM_ATTR::Normal_Inner::Value::WriteThrough_Transient) => true, - _ => false - } - }, + | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_Transient_ReadAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_Transient_ReadWriteAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_Transient_WriteAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_Transient_ReadAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_Transient_ReadWriteAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient) + | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient_WriteAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient_ReadWriteAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient) + | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient_WriteAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient_ReadAlloc) => { + // unpredictable if lower bits are not 0 (WriteThrough_Transient) + match self.attr.read_as_enum(MEM_ATTR::Normal_Inner) { + Some(MEM_ATTR::Normal_Inner::Value::WriteThrough_Transient) => true, + _ => false, + } + } None => todo!("unrecognized cache type"), - Some(_) => true // other memory attribute encodings are valid, e.g. noncacheable + Some(_) => true, // other memory attribute encodings are valid, e.g. noncacheable } } @@ -67,13 +65,13 @@ impl MemoryAttribute { #[derive(Debug)] pub enum AttributeError { - NoEntry, // might not need + NoEntry, // might not need Exists(u8), // could get around this - Full, // is needed + Full, // is needed } pub struct MemoryAttributeManager { - mair: [MemoryAttribute; 8] + mair: [MemoryAttribute; 8], } // TODO: in the future we might want a replace entry method @@ -86,17 +84,17 @@ impl MemoryAttributeManager { // convert u64 MAIR value to a slice const MAIR_LEN: u64 = 8; const MAIR_MASK: u64 = 0xFF; - let attr0 = (mair >> (0 * MAIR_LEN)) & MAIR_MASK; - let attr1 = (mair >> (1 * MAIR_LEN)) & MAIR_MASK; - let attr2 = (mair >> (2 * MAIR_LEN)) & MAIR_MASK; - let attr3 = (mair >> (3 * MAIR_LEN)) & MAIR_MASK; - let attr4 = (mair >> (4 * MAIR_LEN)) & MAIR_MASK; - let attr5 = (mair >> (5 * MAIR_LEN)) & MAIR_MASK; - let attr6 = (mair >> (6 * MAIR_LEN)) & MAIR_MASK; - let attr7 = (mair >> (7 * MAIR_LEN)) & MAIR_MASK; + let attr0 = (mair >> (0 * MAIR_LEN)) & MAIR_MASK; + let attr1 = (mair >> (1 * MAIR_LEN)) & MAIR_MASK; + let attr2 = (mair >> (2 * MAIR_LEN)) & MAIR_MASK; + let attr3 = (mair >> (3 * MAIR_LEN)) & MAIR_MASK; + let attr4 = (mair >> (4 * MAIR_LEN)) & MAIR_MASK; + let attr5 = (mair >> (5 * MAIR_LEN)) & MAIR_MASK; + let attr6 = (mair >> (6 * MAIR_LEN)) & MAIR_MASK; + let attr7 = (mair >> (7 * MAIR_LEN)) & MAIR_MASK; Self { mair: [ - MemoryAttribute::new(attr0 as u8), + MemoryAttribute::new(attr0 as u8), MemoryAttribute::new(attr1 as u8), MemoryAttribute::new(attr2 as u8), MemoryAttribute::new(attr3 as u8), @@ -104,14 +102,14 @@ impl MemoryAttributeManager { MemoryAttribute::new(attr5 as u8), MemoryAttribute::new(attr6 as u8), MemoryAttribute::new(attr7 as u8), - ] + ], } } // read entries of the register state pub fn read_entry(&self, index: AttributeIndex) -> Option { // we assume we keep an up to date copy of the MAIR register - // we assume (for now) that the index is in bounds + // we assume (for now) that the index is in bounds let attr = self.mair[index as usize]; if attr.is_valid() { Some(attr) @@ -136,14 +134,14 @@ impl MemoryAttributeManager { // set the index to the desired entry self.mair[void as usize] = entry; // write out state - MAIR_EL1.set( - u64::from_le_bytes( // assumes we are on a le machine - unsafe { // TODO: test this ... - // don't know if transmute works now with local reg copy??? - core::mem::transmute::<[MemoryAttribute; 8], [u8; 8]>(self.mair) - } - ) - ); + MAIR_EL1.set(u64::from_le_bytes( + // assumes we are on a le machine + unsafe { + // TODO: test this ... + // don't know if transmute works now with local reg copy??? + core::mem::transmute::<[MemoryAttribute; 8], [u8; 8]>(self.mair) + }, + )); Ok(void) } } @@ -157,7 +155,7 @@ impl MemoryAttributeManager { if entry.is_valid() { // check for equality if entry.raw() == attr.raw() { - return Some(index as u8) + return Some(index as u8); } } } @@ -165,10 +163,11 @@ impl MemoryAttributeManager { } // returns the index of an invalid entry (if any) - fn find_invalid_index(&self) -> Result { // could be option + fn find_invalid_index(&self) -> Result { + // could be option for (index, entry) in self.mair.iter().enumerate() { if !entry.is_valid() { - return Ok(index as AttributeIndex) + return Ok(index as AttributeIndex); } } Err(AttributeError::Full) @@ -179,11 +178,17 @@ impl From for MemoryAttribute { fn from(memory: CacheType) -> Self { match memory { // we map all device mmio as strict device memory - CacheType::MemoryMappedIO => MemoryAttribute::new(MEM_ATTR::Device::Value::nonGathering_nonReordering_noEarlyWriteAck as u8), + CacheType::MemoryMappedIO => MemoryAttribute::new( + MEM_ATTR::Device::Value::nonGathering_nonReordering_noEarlyWriteAck as u8, + ), // map cache type to memory attribute - CacheType::Uncacheable => MemoryAttribute::new(MEM_ATTR::Normal_Outer::Value::NonCacheable as u8), + CacheType::Uncacheable => { + MemoryAttribute::new(MEM_ATTR::Normal_Outer::Value::NonCacheable as u8) + } // default all normal memory to write back - CacheType::WriteBack | _ => MemoryAttribute::new(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient_ReadWriteAlloc as u8), + CacheType::WriteBack | _ => MemoryAttribute::new( + MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient_ReadWriteAlloc as u8, + ), } } } @@ -200,20 +205,24 @@ impl From for CacheType { Some(MEM_ATTR::Normal_Outer::Value::NonCacheable) => CacheType::Uncacheable, // is this memory write-through? Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_Transient_WriteAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_Transient_ReadAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_Transient_ReadWriteAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient) - | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient_WriteAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient_ReadAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient_ReadWriteAlloc) => CacheType::WriteThrough, + | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_Transient_ReadAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_Transient_ReadWriteAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient) + | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient_WriteAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient_ReadAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteThrough_NonTransient_ReadWriteAlloc) => { + CacheType::WriteThrough + } // is this memory write-back? Some(MEM_ATTR::Normal_Outer::Value::WriteBack_Transient_WriteAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_Transient_ReadAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_Transient_ReadWriteAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient) - | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient_WriteAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient_ReadAlloc) - | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient_ReadWriteAlloc) => CacheType::WriteBack, + | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_Transient_ReadAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_Transient_ReadWriteAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient) + | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient_WriteAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient_ReadAlloc) + | Some(MEM_ATTR::Normal_Outer::Value::WriteBack_NonTransient_ReadWriteAlloc) => { + CacheType::WriteBack + } None => todo!("unrecognized cache type"), } } @@ -230,10 +239,13 @@ pub fn memory_attr_manager() -> &'static MemoryAttributeManager { // unpredictable states // 0b0000dd1x UNPREDICTABLE. -// 0b01000000 If FEAT_XS is implemented: Normal Inner Non-cacheable, Outer Non-cacheable memory with the XS attribute set to 0. Otherwise, UNPREDICTABLE. -// 0b10100000 If FEAT_XS is implemented: Normal Inner Write-through Cacheable, Outer Write-through Cacheable, Read-Allocate, No-Write Allocate, Non-transient memory with the XS attribute set to 0. Otherwise, UNPREDICTABLE. -// 0b11110000 If FEAT_MTE2 is implemented: Tagged Normal Inner Write-Back, Outer Write-Back, Read-Allocate, Write-Allocate Non-transient memory. Otherwise, UNPREDICTABLE. -// 0bxxxx0000, (xxxx != 0000, xxxx != 0100, xxxx != 1010, xxxx != 1111) UNPREDICTABLE. +// 0b01000000 If FEAT_XS is implemented: Normal Inner Non-cacheable, Outer Non-cacheable memory +// with the XS attribute set to 0. Otherwise, UNPREDICTABLE. 0b10100000 If FEAT_XS is implemented: +// Normal Inner Write-through Cacheable, Outer Write-through Cacheable, Read-Allocate, No-Write +// Allocate, Non-transient memory with the XS attribute set to 0. Otherwise, UNPREDICTABLE. +// 0b11110000 If FEAT_MTE2 is implemented: Tagged Normal Inner Write-Back, Outer Write-Back, +// Read-Allocate, Write-Allocate Non-transient memory. Otherwise, UNPREDICTABLE. 0bxxxx0000, (xxxx +// != 0000, xxxx != 0100, xxxx != 1010, xxxx != 1111) UNPREDICTABLE. register_bitfields! {u8, pub MEM_ATTR [ diff --git a/src/kernel/src/arch/aarch64/memory/pagetables/table.rs b/src/kernel/src/arch/aarch64/memory/pagetables/table.rs index 70f464a0..0bdce074 100644 --- a/src/kernel/src/arch/aarch64/memory/pagetables/table.rs +++ b/src/kernel/src/arch/aarch64/memory/pagetables/table.rs @@ -3,9 +3,8 @@ use core::ops::{Index, IndexMut}; use arm64::registers::TTBR1_EL1; use registers::interfaces::Readable; -use crate::{arch::address::VirtAddr, memory::PhysAddr}; - use super::Entry; +use crate::{arch::address::VirtAddr, memory::PhysAddr}; #[repr(transparent)] /// Representation of a page table. Can be indexed with []. @@ -72,8 +71,9 @@ impl Table { /// Set the current count of used entries. /// /// Note: On some architectures that make available bits in the page table entries, - /// this function may choose to do something clever, like store the count in the available bits. But it could also - /// make this function a no-op, and make [Table::read_count] just count the entries. + /// this function may choose to do something clever, like store the count in the available bits. + /// But it could also make this function a no-op, and make [Table::read_count] just count + /// the entries. pub fn set_count(&mut self, _count: usize) { // for now let's make this a no-op // the pt entries on arm does have some spare bits @@ -107,7 +107,7 @@ impl Table { /// Get the page size of a given level. pub fn level_to_page_size(level: usize) -> usize { // frame size * num entries ** (3-level) - 1 << (12 + 9 * (Self::MAX_LEVEL - level)) + 1 << (12 + 9 * (Self::MAX_LEVEL - level)) } /// Get the level of the last page table. diff --git a/src/kernel/src/arch/aarch64/mod.rs b/src/kernel/src/arch/aarch64/mod.rs index f365bb6e..a6ad0dff 100644 --- a/src/kernel/src/arch/aarch64/mod.rs +++ b/src/kernel/src/arch/aarch64/mod.rs @@ -1,16 +1,11 @@ -use arm64::registers::{TPIDR_EL1, SPSel}; +use arm64::registers::{SPSel, TPIDR_EL1}; use registers::{ - registers::InMemoryRegister, interfaces::{Readable, Writeable}, + registers::InMemoryRegister, }; - use twizzler_abi::syscall::TimeSpan; -use crate::{ - clock::Nanoseconds, - BootInfo, - syscall::SyscallContext, -}; +use crate::{clock::Nanoseconds, syscall::SyscallContext, BootInfo}; pub mod address; mod cntp; @@ -20,12 +15,12 @@ pub mod image; pub mod interrupt; pub mod memory; pub mod processor; +mod start; mod syscall; pub mod thread; -mod start; -pub use address::{VirtAddr, PhysAddr}; -pub use interrupt::{send_ipi, init_interrupts, set_interrupt}; +pub use address::{PhysAddr, VirtAddr}; +pub use interrupt::{init_interrupts, send_ipi, set_interrupt}; pub use start::BootInfoSystemTable; pub fn init(boot_info: &B) { @@ -40,7 +35,7 @@ pub fn init(boot_info: &B) { // Initialize the machine specific enumeration state (e.g., DeviceTree, ACPI) crate::machine::info::init(boot_info); - + // check if SPSel is already set to use SP_EL1 let spsel: InMemoryRegister = InMemoryRegister::new(SPSel.get()); if spsel.matches_all(SPSel::SP::EL0) { @@ -81,7 +76,7 @@ pub fn init(boot_info: &B) { pub fn init_secondary() { // initialize exceptions by setting up our exception vectors exception::init(); - + // check if SPSel is already set to use SP_EL1 let spsel: InMemoryRegister = InMemoryRegister::new(SPSel.get()); if spsel.matches_all(SPSel::SP::EL0) { @@ -137,7 +132,11 @@ pub fn schedule_oneshot_tick(time: Nanoseconds) { /// Jump into userspace /// # Safety /// The stack and target must be valid addresses. -pub unsafe fn jump_to_user(target: crate::memory::VirtAddr, stack: crate::memory::VirtAddr, arg: u64) { +pub unsafe fn jump_to_user( + target: crate::memory::VirtAddr, + stack: crate::memory::VirtAddr, + arg: u64, +) { let ctx = syscall::Armv8SyscallContext::create_jmp_context(target, stack, arg); crate::thread::exit_kernel(); syscall::return_to_user(&ctx); diff --git a/src/kernel/src/arch/aarch64/processor.rs b/src/kernel/src/arch/aarch64/processor.rs index d185c9f3..cffab583 100755 --- a/src/kernel/src/arch/aarch64/processor.rs +++ b/src/kernel/src/arch/aarch64/processor.rs @@ -1,21 +1,22 @@ use alloc::vec::Vec; use core::sync::atomic::{AtomicBool, Ordering::SeqCst}; -use arm64::registers::{TPIDR_EL1, MPIDR_EL1}; -use arm64::asm::{wfe, sev}; +use arm64::{ + asm::{sev, wfe}, + registers::{MPIDR_EL1, TPIDR_EL1}, +}; use registers::interfaces::{Readable, Writeable}; +#[allow(unused_imports)] // DEBUG +use super::interrupt::InterProcessorInterrupt; use crate::{ - machine::processor::{BootMethod, BootArgs}, + current_processor, + machine::processor::{BootArgs, BootMethod}, memory::VirtAddr, - processor::Processor, once::Once, - current_processor, + processor::Processor, }; -#[allow(unused_imports)] // DEBUG -use super::{interrupt::InterProcessorInterrupt}; - // initialize processor and any processor specific features pub fn init(tls: VirtAddr) { // Save thread local storage to an unused variable. @@ -41,7 +42,7 @@ pub fn enumerate_cpus() -> u32 { /// on the processor and register them in the time subsystem. pub fn enumerate_clocks() { // for now we utlize the physical timer (CNTPCT_EL0) - + // save reference to the CNTP clock source into global array crate::time::register_clock(super::cntp::PhysicalTimer::new()); } diff --git a/src/kernel/src/arch/aarch64/start.rs b/src/kernel/src/arch/aarch64/start.rs index ce79da4b..86502316 100644 --- a/src/kernel/src/arch/aarch64/start.rs +++ b/src/kernel/src/arch/aarch64/start.rs @@ -4,7 +4,7 @@ use limine::*; use crate::{ initrd::BootModule, - memory::{MemoryRegion, MemoryRegionKind, VirtAddr, PhysAddr}, + memory::{MemoryRegion, MemoryRegionKind, PhysAddr, VirtAddr}, BootInfo, }; @@ -22,13 +22,13 @@ struct Armv8BootInfo { memory: Vec, /// A reference to the kernel's ELF file in memory. - /// + /// /// This contains other useful information such as the kernel's /// command line parameters. kernel: &'static File, /// A list of user programs loaded into memory. - /// + /// /// This is essentially our initial ramdisk used /// to start userspace. modules: Vec, @@ -54,25 +54,18 @@ impl BootInfo for Armv8BootInfo { match table { BootInfoSystemTable::Dtb => match DTB_REQ.get_response().get() { Some(resp) => VirtAddr::new(resp.dtb_ptr.as_ptr().unwrap() as u64).unwrap(), - None => VirtAddr::new(0).unwrap() + None => VirtAddr::new(0).unwrap(), }, - BootInfoSystemTable::Efi => todo!("get EFI system table") + BootInfoSystemTable::Efi => todo!("get EFI system table"), } - } + } fn get_cmd_line(&self) -> &'static str { if let Some(cmd) = self.kernel.cmdline.as_ptr() { let ptr = cmd as *const u8; const MAX_CMD_LINE_LEN: usize = 0x1000; - let slice = unsafe { - core::slice::from_raw_parts(ptr, MAX_CMD_LINE_LEN) - }; - let slice = &slice[ - 0..slice - .iter() - .position(|r| *r == 0) - .unwrap_or(0) - ]; + let slice = unsafe { core::slice::from_raw_parts(ptr, MAX_CMD_LINE_LEN) }; + let slice = &slice[0..slice.iter().position(|r| *r == 0).unwrap_or(0)]; core::str::from_utf8(slice).unwrap() } else { "" @@ -91,8 +84,7 @@ impl From for MemoryRegionKind { } #[used] -static ENTRY_POINT: EntryPointRequest = EntryPointRequest::new(0) - .entry(Ptr::new(limine_entry)); +static ENTRY_POINT: EntryPointRequest = EntryPointRequest::new(0).entry(Ptr::new(limine_entry)); #[used] static MEMORY_MAP: MemmapRequest = MemmapRequest::new(0); @@ -147,7 +139,7 @@ fn limine_entry() -> ! { // } // TODO: it should be ok if it is empty when -k is passed on the command line - let modules = USER_MODULES + let modules = USER_MODULES .get_response() .get() .expect("no modules specified for kernel -- no way to start init") @@ -205,49 +197,63 @@ fn limine_entry() -> ! { } // function splits a memory region in half based on a reserved region - fn split(memmap: &NonNullPtr, reserved: &MemoryRegion) -> (Option, Option) { + fn split( + memmap: &NonNullPtr, + reserved: &MemoryRegion, + ) -> (Option, Option) { let lhs = memmap.base; let rhs = memmap.base + memmap.len; // case 1: take lhs range if reserved.start.raw() == lhs { - (None, Some(MemoryRegion { - kind: memmap.typ.into(), - start: PhysAddr::new(memmap.base + reserved.length as u64).unwrap(), - length: memmap.len as usize - reserved.length, - })) - } + ( + None, + Some(MemoryRegion { + kind: memmap.typ.into(), + start: PhysAddr::new(memmap.base + reserved.length as u64).unwrap(), + length: memmap.len as usize - reserved.length, + }), + ) + } // case 2: take rhs range else if reserved.start.raw() + reserved.length as u64 == rhs { - (Some(MemoryRegion { - kind: memmap.typ.into(), - start: PhysAddr::new(memmap.base).unwrap(), - length: memmap.len as usize - reserved.length, - }), None) + ( + Some(MemoryRegion { + kind: memmap.typ.into(), + start: PhysAddr::new(memmap.base).unwrap(), + length: memmap.len as usize - reserved.length, + }), + None, + ) } // case 3: split in the middle else { - (Some(MemoryRegion { - kind: memmap.typ.into(), - start: PhysAddr::new(memmap.base).unwrap(), - length: (reserved.start.raw() - memmap.base) as usize, - }), - Some(MemoryRegion { - kind: memmap.typ.into(), - start: PhysAddr::new(reserved.start.raw() + reserved.length as u64).unwrap(), - length: (memmap.len - reserved.length as u64 - (reserved.start.raw() - memmap.base)) as usize, - })) + ( + Some(MemoryRegion { + kind: memmap.typ.into(), + start: PhysAddr::new(memmap.base).unwrap(), + length: (reserved.start.raw() - memmap.base) as usize, + }), + Some(MemoryRegion { + kind: memmap.typ.into(), + start: PhysAddr::new(reserved.start.raw() + reserved.length as u64).unwrap(), + length: (memmap.len + - reserved.length as u64 + - (reserved.start.raw() - memmap.base)) + as usize, + }), + ) } } // convert module representation from bootloader to boot module boot_info.modules = modules - .iter() - .map(|m| BootModule { - start: VirtAddr::new(m.base.as_ptr().unwrap() as u64).unwrap(), - length: m.length as usize, - }) - .collect(); + .iter() + .map(|m| BootModule { + start: VirtAddr::new(m.base.as_ptr().unwrap() as u64).unwrap(), + length: m.length as usize, + }) + .collect(); crate::kernel_main(&mut boot_info) } diff --git a/src/kernel/src/arch/aarch64/syscall.rs b/src/kernel/src/arch/aarch64/syscall.rs index ada0ca22..9cdf3cb3 100644 --- a/src/kernel/src/arch/aarch64/syscall.rs +++ b/src/kernel/src/arch/aarch64/syscall.rs @@ -5,16 +5,13 @@ /// /// "Procedure Call Standard for the Arm® 64-bit Architecture (AArch64)": /// https://github.com/ARM-software/abi-aa/releases/download/2023Q1/aapcs64.pdf - -use arm64::registers::{ELR_EL1, SP_EL0, SPSR_EL1}; +use arm64::registers::{ELR_EL1, SPSR_EL1, SP_EL0}; use registers::interfaces::Writeable; - use twizzler_abi::upcall::UpcallFrame; +use super::{exception::ExceptionContext, thread::UpcallAble}; use crate::{memory::VirtAddr, syscall::SyscallContext}; -use super::{thread::UpcallAble, exception::ExceptionContext}; - /// The register state needed to transition between kernel and user. /// /// According to the ARM PCS Section 6, arguments/return values are @@ -110,8 +107,11 @@ pub unsafe fn return_to_user(context: &Armv8SyscallContext) -> ! { // - use sp_el0 stack pointer // - aarch64 execution state SPSR_EL1.write( - SPSR_EL1::D::Masked + SPSR_EL1::A::Masked + SPSR_EL1::I::Unmasked - + SPSR_EL1::F::Masked + SPSR_EL1::M::EL0t + SPSR_EL1::D::Masked + + SPSR_EL1::A::Masked + + SPSR_EL1::I::Unmasked + + SPSR_EL1::F::Masked + + SPSR_EL1::M::EL0t, ); // TODO: zero out/copy all registers @@ -141,9 +141,9 @@ pub fn handle_syscall(ctx: &mut ExceptionContext) { crate::thread::enter_kernel(); crate::interrupt::set(true); - + crate::syscall::syscall_entry(&mut context); - + crate::interrupt::set(false); crate::thread::exit_kernel(); diff --git a/src/kernel/src/arch/aarch64/thread.rs b/src/kernel/src/arch/aarch64/thread.rs index e6c5ddf2..80fabea7 100644 --- a/src/kernel/src/arch/aarch64/thread.rs +++ b/src/kernel/src/arch/aarch64/thread.rs @@ -1,22 +1,18 @@ /// CPU context (register state) switching. -/// +/// /// NOTE: According to section 6.1.1 of the 64-bit ARM /// Procedure Call Standard (PCS), not all registers /// need to be saved, only those needed for a subroutine call. -/// +/// /// A full detailed explanation can be found in the /// "Procedure Call Standard for the Arm® 64-bit Architecture (AArch64)": /// https://github.com/ARM-software/abi-aa/releases/download/2023Q1/aapcs64.pdf - use arm64::registers::TPIDR_EL0; use registers::interfaces::Writeable; - use twizzler_abi::upcall::{UpcallFrame, UpcallInfo, UpcallTarget}; -use crate::thread::Thread; -use crate::memory::VirtAddr; - -use super::{exception::ExceptionContext, syscall::Armv8SyscallContext, interrupt::DAIFMaskBits}; +use super::{exception::ExceptionContext, interrupt::DAIFMaskBits, syscall::Armv8SyscallContext}; +use crate::{memory::VirtAddr, thread::Thread}; #[derive(Copy, Clone)] pub enum Registers { @@ -26,7 +22,7 @@ pub enum Registers { } /// Registers that need to be saved between context switches. -/// +/// /// According to section 6.1.1, we only need to preserve /// registers x19-x30 and the stack pointer (sp). #[derive(Default)] @@ -63,7 +59,7 @@ unsafe impl Send for ArchThread {} impl ArchThread { pub fn new() -> Self { - Self { + Self { context: RegisterContext::default(), } } @@ -90,8 +86,8 @@ where // The alignment of addresses use by the stack const CHECKED_STACK_ALIGNMENT: usize = 16; -/// Compute the top of the stack. -/// +/// Compute the top of the stack. +/// /// # Safety /// The range from [stack_base, stack_base+stack_size] must be valid addresses. pub fn new_stack_top(stack_base: usize, stack_size: usize) -> VirtAddr { @@ -103,7 +99,9 @@ pub fn new_stack_top(stack_base: usize, stack_size: usize) -> VirtAddr { if stack_from_args.is_aligned_to(CHECKED_STACK_ALIGNMENT) { stack_from_args } else { - stack_from_args.align_down(CHECKED_STACK_ALIGNMENT as u64).unwrap() + stack_from_args + .align_down(CHECKED_STACK_ALIGNMENT as u64) + .unwrap() } } @@ -125,7 +123,7 @@ impl Thread { } /// Architechture specific CPU context switch. - /// + /// /// On 64-bit ARM systems, we only need to save a few registers /// then switch thread stacks before changing control flow. #[inline(never)] @@ -133,8 +131,8 @@ impl Thread { // The switch (1) saves registers x19-x30 and the stack pointer (sp) // onto the current thread's context save area (old_thread). // According to the 64-bit ARM PCS, this amount of context is fine. - // Other registers are either caller saved, or pushed onto - // the stack when taking an exception. + // Other registers are either caller saved, or pushed onto + // the stack when taking an exception. // Then we (2) restore the registes from the next thread's (self) context // save area, (3) switch stacks, (4) and return control by returning // to the address in the link register (x30). diff --git a/src/kernel/src/arch/amd64/acpi.rs b/src/kernel/src/arch/amd64/acpi.rs index cf028e47..06335890 100644 --- a/src/kernel/src/arch/amd64/acpi.rs +++ b/src/kernel/src/arch/amd64/acpi.rs @@ -2,10 +2,8 @@ use core::ptr::NonNull; use acpi::AcpiTables; -use crate::memory::PhysAddr; -use crate::once::Once; - use super::memory::phys_to_virt; +use crate::{memory::PhysAddr, once::Once}; #[derive(Clone, Copy, Debug)] pub struct AcpiHandlerImpl {} diff --git a/src/kernel/src/arch/amd64/address.rs b/src/kernel/src/arch/amd64/address.rs index 7da53dc6..c583ed1e 100644 --- a/src/kernel/src/arch/amd64/address.rs +++ b/src/kernel/src/arch/amd64/address.rs @@ -1,8 +1,7 @@ use core::{fmt::LowerHex, ops::Sub}; -use crate::once::Once; - use super::memory::phys_to_virt; +use crate::once::Once; /// A representation of a canonical virtual address. #[derive(Clone, Copy, PartialEq, PartialOrd, Ord, Eq)] @@ -26,7 +25,8 @@ impl VirtAddr { const PHYS_START: Self = Self(0xffff800000000000); pub const fn start_kernel_memory() -> Self { - // This is defined by the definitions of the two canonical regions of the virtual memory space. + // This is defined by the definitions of the two canonical regions of the virtual memory + // space. Self(0xffff800000000000) } @@ -39,19 +39,22 @@ impl VirtAddr { } pub const fn start_user_memory() -> Self { - // This is defined by the definitions of the two canonical regions of the virtual memory space. + // This is defined by the definitions of the two canonical regions of the virtual memory + // space. Self(0x0) } pub const fn end_user_memory() -> Self { - // This is defined by the definitions of the two canonical regions of the virtual memory space. + // This is defined by the definitions of the two canonical regions of the virtual memory + // space. Self(0x0000800000000000) } - /// Construct a new virtual address from the provided addr value, only if the provided value is a valid, canonical - /// address. If not, returns Err. + /// Construct a new virtual address from the provided addr value, only if the provided value is + /// a valid, canonical address. If not, returns Err. pub const fn new(addr: u64) -> Result { - // This is defined by the definitions of the two canonical regions of the virtual memory space. + // This is defined by the definitions of the two canonical regions of the virtual memory + // space. if addr >= 0xFFFF800000000000 || addr <= 0x00007fffffffffff { Ok(Self(addr)) } else { @@ -59,7 +62,8 @@ impl VirtAddr { } } - /// Construct a new virtual address from a u64 without verifying that it is a valid virtual address. + /// Construct a new virtual address from a u64 without verifying that it is a valid virtual + /// address. /// /// # Safety /// The provided address must be canonical. @@ -80,7 +84,8 @@ impl VirtAddr { } pub fn is_kernel(&self) -> bool { - // This is defined by the definitions of the two canonical regions of the virtual memory space. + // This is defined by the definitions of the two canonical regions of the virtual memory + // space. self.0 >= 0xffff800000000000 } @@ -193,7 +198,8 @@ impl PhysAddr { } } - /// Construct a new physical address from a u64 without verifying that it is a valid physical address. + /// Construct a new physical address from a u64 without verifying that it is a valid physical + /// address. /// /// # Safety /// The provided address must be a valid address. diff --git a/src/kernel/src/arch/amd64/apic/ipi.rs b/src/kernel/src/arch/amd64/apic/ipi.rs index 866238b4..55e9dade 100644 --- a/src/kernel/src/arch/amd64/apic/ipi.rs +++ b/src/kernel/src/arch/amd64/apic/ipi.rs @@ -1,6 +1,5 @@ -use crate::{interrupt::Destination, processor}; - use super::local::{get_lapic, LAPIC_ICRHI, LAPIC_ICRLO, LAPIC_ICRLO_STATUS_PEND}; +use crate::{interrupt::Destination, processor}; const DEST_SHORT_NONE: u32 = 0; const _DEST_SHORT_SELF: u32 = 1; diff --git a/src/kernel/src/arch/amd64/apic/local.rs b/src/kernel/src/arch/amd64/apic/local.rs index 3ac54e9b..6b1047a1 100644 --- a/src/kernel/src/arch/amd64/apic/local.rs +++ b/src/kernel/src/arch/amd64/apic/local.rs @@ -162,7 +162,8 @@ impl Lapic { LAPIC_TIMER, LAPIC_TIMER_VECTOR as u32 | LAPIC_TIMER_DEADLINE, ); - // Intel 3A:11.5.4.1 requires an mfence here, between the MMIO write (if we're in xAPIC mode) and the MSR write. + // Intel 3A:11.5.4.1 requires an mfence here, between the MMIO write (if we're in + // xAPIC mode) and the MSR write. if get_lapic().version == ApicVersion::XApic { asm!("mfence;"); } diff --git a/src/kernel/src/arch/amd64/context.rs b/src/kernel/src/arch/amd64/context.rs index 3ebbfc16..68821c01 100644 --- a/src/kernel/src/arch/amd64/context.rs +++ b/src/kernel/src/arch/amd64/context.rs @@ -62,7 +62,8 @@ impl ArchContext { /// Switch to a given set of page tables. /// /// # Safety - /// The specified target must be a root page table that will live as long as we are switched to it. + /// The specified target must be a root page table that will live as long as we are switched to + /// it. pub unsafe fn switch_to_target(tgt: &ArchContextTarget) { unsafe { if tgt.0 != x86::controlregs::cr3() { diff --git a/src/kernel/src/arch/amd64/gdt.rs b/src/kernel/src/arch/amd64/gdt.rs index 66d09441..55bc0368 100644 --- a/src/kernel/src/arch/amd64/gdt.rs +++ b/src/kernel/src/arch/amd64/gdt.rs @@ -1,15 +1,14 @@ use core::mem::size_of; -use x86::current::task::TaskStateSegment; -use x86::dtables::DescriptorTablePointer; -use x86::segmentation::BuildDescriptor; -use x86::segmentation::CodeSegmentType; -use x86::segmentation::DataSegmentType; -use x86::segmentation::Descriptor; -use x86::segmentation::DescriptorBuilder; -use x86::segmentation::SegmentDescriptorBuilder; -use x86::segmentation::SegmentSelector; -use x86::Ring; +use x86::{ + current::task::TaskStateSegment, + dtables::DescriptorTablePointer, + segmentation::{ + BuildDescriptor, CodeSegmentType, DataSegmentType, Descriptor, DescriptorBuilder, + SegmentDescriptorBuilder, SegmentSelector, + }, + Ring, +}; use crate::memory::VirtAddr; diff --git a/src/kernel/src/arch/amd64/image.rs b/src/kernel/src/arch/amd64/image.rs index 27d742e1..71421ee7 100644 --- a/src/kernel/src/arch/amd64/image.rs +++ b/src/kernel/src/arch/amd64/image.rs @@ -1,5 +1,4 @@ /// TLS initialization for the kernel image - use crate::{ image::{TlsInfo, TlsVariant}, memory::VirtAddr, diff --git a/src/kernel/src/arch/amd64/interrupt.rs b/src/kernel/src/arch/amd64/interrupt.rs index 1816308c..3cc45bbb 100644 --- a/src/kernel/src/arch/amd64/interrupt.rs +++ b/src/kernel/src/arch/amd64/interrupt.rs @@ -7,6 +7,11 @@ use twizzler_abi::{ }; use x86::current::rflags::RFlags; +use super::{ + gdt::user_selectors, + set_interrupt, + thread::{Registers, UpcallAble}, +}; use crate::{ arch::amd64::apic::get_lapic, interrupt::{Destination, DynamicInterrupt}, @@ -15,12 +20,6 @@ use crate::{ thread::current_thread_ref, }; -use super::{ - gdt::user_selectors, - set_interrupt, - thread::{Registers, UpcallAble}, -}; - pub const GENERIC_IPI_VECTOR: u32 = 200; pub const TLB_SHOOTDOWN_VECTOR: u32 = 201; pub const TIMER_VECTOR: u32 = 32; diff --git a/src/kernel/src/arch/amd64/ioapic.rs b/src/kernel/src/arch/amd64/ioapic.rs index 27ced78f..39184df8 100644 --- a/src/kernel/src/arch/amd64/ioapic.rs +++ b/src/kernel/src/arch/amd64/ioapic.rs @@ -1,14 +1,14 @@ -use acpi::{madt::Madt, sdt::Signature, InterruptModel}; use alloc::vec::Vec; +use acpi::{madt::Madt, sdt::Signature, InterruptModel}; + +use super::{acpi::get_acpi_root, memory::phys_to_virt, processor::get_bsp_id}; use crate::{ interrupt::{Destination, PinPolarity, TriggerMode}, memory::PhysAddr, spinlock::Spinlock, }; -use super::{acpi::get_acpi_root, memory::phys_to_virt, processor::get_bsp_id}; - struct IOApic { address: PhysAddr, gsi_base: u32, diff --git a/src/kernel/src/arch/amd64/memory.rs b/src/kernel/src/arch/amd64/memory.rs index 7919db40..7cede603 100755 --- a/src/kernel/src/arch/amd64/memory.rs +++ b/src/kernel/src/arch/amd64/memory.rs @@ -1,11 +1,11 @@ -use super::address::{PhysAddr, VirtAddr}; - -pub mod frame; -pub mod pagetables; - -const PHYS_MEM_OFFSET: u64 = 0xffff800000000000; -/* TODO: hide this */ -pub fn phys_to_virt(pa: PhysAddr) -> VirtAddr { - let raw: u64 = pa.into(); - VirtAddr::new(raw + PHYS_MEM_OFFSET).unwrap() -} +use super::address::{PhysAddr, VirtAddr}; + +pub mod frame; +pub mod pagetables; + +const PHYS_MEM_OFFSET: u64 = 0xffff800000000000; +/* TODO: hide this */ +pub fn phys_to_virt(pa: PhysAddr) -> VirtAddr { + let raw: u64 = pa.into(); + VirtAddr::new(raw + PHYS_MEM_OFFSET).unwrap() +} diff --git a/src/kernel/src/arch/amd64/memory/pagetables/consistency.rs b/src/kernel/src/arch/amd64/memory/pagetables/consistency.rs index 2dbc8812..aa8d60a7 100644 --- a/src/kernel/src/arch/amd64/memory/pagetables/consistency.rs +++ b/src/kernel/src/arch/amd64/memory/pagetables/consistency.rs @@ -78,8 +78,9 @@ impl TlbInvData { self.set_global(); self.set_full(); } else { - // Otherwise, the flags are OR'd, and the instructions concatenated. Order doesn't matter. - // If we'd have too many instructions, just fall back to full invalidation. + // Otherwise, the flags are OR'd, and the instructions concatenated. Order doesn't + // matter. If we'd have too many instructions, just fall back to full + // invalidation. if other.full() { self.set_full(); } @@ -226,9 +227,10 @@ pub struct ArchCacheLineMgr { const CACHE_LINE_SIZE: u64 = 64; impl ArchCacheLineMgr { - /// Flush a given cache line when this [ArchCacheLineMgr] is dropped. Subsequent flush requests for the same cache - /// line will be batched. Flushes for different cache lines will cause older requests to flush immediately, and the - /// new request will be flushed when this object is dropped. + /// Flush a given cache line when this [ArchCacheLineMgr] is dropped. Subsequent flush requests + /// for the same cache line will be batched. Flushes for different cache lines will cause + /// older requests to flush immediately, and the new request will be flushed when this + /// object is dropped. pub fn flush(&mut self, line: VirtAddr) { let addr: u64 = line.into(); let addr = addr & !(CACHE_LINE_SIZE - 1); @@ -272,8 +274,8 @@ impl ArchTlbMgr { this } - /// Enqueue a new TLB invalidation. is_global should be set iff the page is global, and is_terminal should be set - /// iff the invalidation is for a leaf. + /// Enqueue a new TLB invalidation. is_global should be set iff the page is global, and + /// is_terminal should be set iff the invalidation is for a leaf. pub fn enqueue(&mut self, addr: VirtAddr, is_global: bool, is_terminal: bool, level: usize) { self.data.enqueue(InvInstruction::new( addr, @@ -356,9 +358,10 @@ pub struct TlbShootdownInfo { lock: AtomicBool, // Maintain a list of a few invalidation command slots we can use, in case multiple CPUs send // out invalidation commands at the same time. Note that in the case that this array is full of - // entries, we just merge any incoming commands into another command. This is possible because there - // is always a least-upper-bound merge between two invalidation commands that always invalidates - // all data from both commands. In the worst case, this merge is simply a full, global invalidation. + // entries, we just merge any incoming commands into another command. This is possible because + // there is always a least-upper-bound merge between two invalidation commands that always + // invalidates all data from both commands. In the worst case, this merge is simply a full, + // global invalidation. data: UnsafeCell<[Option; NUM_TLB_SHOOTDOWN_ENTRIES]>, } @@ -393,8 +396,8 @@ impl TlbShootdownInfo { return; } } - // Choose the 0'th entry because if this makes it a full or global entry, we want to be able to - // exit the handling loop early. + // Choose the 0'th entry because if this makes it a full or global entry, we want to be + // able to exit the handling loop early. // Unwrap-Ok: we know that all slots are Some from the first loop. data[0].as_mut().unwrap().merge(new_data); self.lock.store(false, Ordering::Release); diff --git a/src/kernel/src/arch/amd64/memory/pagetables/table.rs b/src/kernel/src/arch/amd64/memory/pagetables/table.rs index 51051899..036e4a6a 100644 --- a/src/kernel/src/arch/amd64/memory/pagetables/table.rs +++ b/src/kernel/src/arch/amd64/memory/pagetables/table.rs @@ -1,8 +1,7 @@ use core::ops::{Index, IndexMut}; -use crate::{arch::address::VirtAddr, memory::PhysAddr}; - use super::Entry; +use crate::{arch::address::VirtAddr, memory::PhysAddr}; #[repr(transparent)] /// Representation of a page table. Can be indexed with []. @@ -46,10 +45,12 @@ impl Table { /// Set the current count of used entries. /// /// Note: On some architectures that make available bits in the page table entries, - /// this function may choose to do something clever, like store the count in the available bits. But it could also - /// make this function a no-op, and make [Table::read_count] just count the entries. + /// this function may choose to do something clever, like store the count in the available bits. + /// But it could also make this function a no-op, and make [Table::read_count] just count + /// the entries. pub fn set_count(&mut self, count: usize) { - // NOTE: this function doesn't need cache line or TLB flushing because the hardware never reads these bits. + // NOTE: this function doesn't need cache line or TLB flushing because the hardware never + // reads these bits. for b in 0..16 { if count & (1 << b) == 0 { self[b].set_avail_bit(false); diff --git a/src/kernel/src/arch/amd64/mod.rs b/src/kernel/src/arch/amd64/mod.rs index 62e181fb..0af1c1cf 100755 --- a/src/kernel/src/arch/amd64/mod.rs +++ b/src/kernel/src/arch/amd64/mod.rs @@ -1,96 +1,96 @@ -use core::sync::atomic::Ordering; - -pub use address::{PhysAddr, VirtAddr}; - -use crate::{ - clock::Nanoseconds, - interrupt::{Destination, PinPolarity, TriggerMode}, - thread::current_thread_ref, - BootInfo, -}; - -pub mod acpi; -pub mod address; -mod apic; -pub mod context; -mod gdt; -pub mod image; -pub mod interrupt; -pub mod ioapic; -pub mod memory; -mod pit; -pub mod processor; -mod start; -mod syscall; -pub mod thread; -mod tsc; -pub use apic::{poke_cpu, send_ipi}; -pub use start::BootInfoSystemTable; - -use self::apic::get_lapic; -pub fn init(boot_info: &B) { - gdt::init(); - interrupt::init_idt(); - apic::init(true); - - let rsdp = boot_info.get_system_table(BootInfoSystemTable::Rsdp); - acpi::init(rsdp.raw()); -} - -pub fn init_secondary() { - gdt::init_secondary(); - interrupt::init_idt(); - apic::init(false); -} - -pub fn init_interrupts() { - ioapic::init() -} - -pub fn start_clock(statclock_hz: u64, stat_cb: fn(Nanoseconds)) { - pit::setup_freq(statclock_hz, stat_cb); -} - -pub fn schedule_oneshot_tick(time: Nanoseconds) { - get_lapic().setup_oneshot_timer(time) -} - -/// Jump into userspace -/// # Safety -/// The stack and target must be valid addresses. -pub unsafe fn jump_to_user( - target: crate::memory::VirtAddr, - stack: crate::memory::VirtAddr, - arg: u64, -) { - use crate::syscall::SyscallContext; - let ctx = syscall::X86SyscallContext::create_jmp_context(target, stack, arg); - crate::thread::exit_kernel(); - - { - /* we need this scope the drop the current thread ref before returning to user */ - let user_fs = current_thread_ref() - .unwrap() - .arch - .user_fs - .load(Ordering::SeqCst); - x86::msr::wrmsr(x86::msr::IA32_FS_BASE, user_fs); - } - syscall::return_to_user(&ctx as *const syscall::X86SyscallContext); -} - -pub fn set_interrupt( - num: u32, - masked: bool, - trigger: TriggerMode, - polarity: PinPolarity, - destination: Destination, -) { - ioapic::set_interrupt(num - 32, num, masked, trigger, polarity, destination); -} - -pub fn debug_shutdown(code: u32) { - unsafe { - x86::io::outw(0xf4, code as u16); - } -} +use core::sync::atomic::Ordering; + +pub use address::{PhysAddr, VirtAddr}; + +use crate::{ + clock::Nanoseconds, + interrupt::{Destination, PinPolarity, TriggerMode}, + thread::current_thread_ref, + BootInfo, +}; + +pub mod acpi; +pub mod address; +mod apic; +pub mod context; +mod gdt; +pub mod image; +pub mod interrupt; +pub mod ioapic; +pub mod memory; +mod pit; +pub mod processor; +mod start; +mod syscall; +pub mod thread; +mod tsc; +pub use apic::{poke_cpu, send_ipi}; +pub use start::BootInfoSystemTable; + +use self::apic::get_lapic; +pub fn init(boot_info: &B) { + gdt::init(); + interrupt::init_idt(); + apic::init(true); + + let rsdp = boot_info.get_system_table(BootInfoSystemTable::Rsdp); + acpi::init(rsdp.raw()); +} + +pub fn init_secondary() { + gdt::init_secondary(); + interrupt::init_idt(); + apic::init(false); +} + +pub fn init_interrupts() { + ioapic::init() +} + +pub fn start_clock(statclock_hz: u64, stat_cb: fn(Nanoseconds)) { + pit::setup_freq(statclock_hz, stat_cb); +} + +pub fn schedule_oneshot_tick(time: Nanoseconds) { + get_lapic().setup_oneshot_timer(time) +} + +/// Jump into userspace +/// # Safety +/// The stack and target must be valid addresses. +pub unsafe fn jump_to_user( + target: crate::memory::VirtAddr, + stack: crate::memory::VirtAddr, + arg: u64, +) { + use crate::syscall::SyscallContext; + let ctx = syscall::X86SyscallContext::create_jmp_context(target, stack, arg); + crate::thread::exit_kernel(); + + { + /* we need this scope the drop the current thread ref before returning to user */ + let user_fs = current_thread_ref() + .unwrap() + .arch + .user_fs + .load(Ordering::SeqCst); + x86::msr::wrmsr(x86::msr::IA32_FS_BASE, user_fs); + } + syscall::return_to_user(&ctx as *const syscall::X86SyscallContext); +} + +pub fn set_interrupt( + num: u32, + masked: bool, + trigger: TriggerMode, + polarity: PinPolarity, + destination: Destination, +) { + ioapic::set_interrupt(num - 32, num, masked, trigger, polarity, destination); +} + +pub fn debug_shutdown(code: u32) { + unsafe { + x86::io::outw(0xf4, code as u16); + } +} diff --git a/src/kernel/src/arch/amd64/processor.rs b/src/kernel/src/arch/amd64/processor.rs index 832d3085..5617d486 100755 --- a/src/kernel/src/arch/amd64/processor.rs +++ b/src/kernel/src/arch/amd64/processor.rs @@ -1,7 +1,11 @@ -use core::sync::atomic::{AtomicU64, Ordering}; - use alloc::{boxed::Box, vec::Vec}; +use core::sync::atomic::{AtomicU64, Ordering}; +use super::{ + acpi::get_acpi_root, + interrupt::InterProcessorInterrupt, + memory::pagetables::{tlb_shootdown_handler, TlbShootdownInfo}, +}; use crate::{ interrupt::Destination, memory::VirtAddr, @@ -9,12 +13,6 @@ use crate::{ processor::{current_processor, Processor}, }; -use super::{ - acpi::get_acpi_root, - interrupt::InterProcessorInterrupt, - memory::pagetables::{tlb_shootdown_handler, TlbShootdownInfo}, -}; - #[repr(C)] struct GsScratch { kernel_stack: u64, @@ -60,7 +58,8 @@ pub fn init(tls: VirtAddr) { let cpuid = x86::cpuid::CpuId::new().get_extended_feature_info(); let mut gs_scratch = Box::new(GsScratch::new()); gs_scratch.kernel_fs = tls.raw(); - // Intentionally leak this memory, we don't need to reference it again outside interrupt assembly code. + // Intentionally leak this memory, we don't need to reference it again outside interrupt + // assembly code. let gs_scratch = Box::into_raw(gs_scratch); if let Some(ef) = cpuid { if ef.has_fsgsbase() { diff --git a/src/kernel/src/arch/amd64/start.rs b/src/kernel/src/arch/amd64/start.rs index cbc0b0e2..081bfcb1 100644 --- a/src/kernel/src/arch/amd64/start.rs +++ b/src/kernel/src/arch/amd64/start.rs @@ -1,8 +1,8 @@ use alloc::vec::Vec; + use limine::{ - BootInfoRequest, EntryPointRequest, File, FramebufferRequest, - KernelFileRequest, MemoryMapEntryType, MemmapRequest, ModuleRequest, - Ptr, RsdpRequest, + BootInfoRequest, EntryPointRequest, File, FramebufferRequest, KernelFileRequest, MemmapRequest, + MemoryMapEntryType, ModuleRequest, Ptr, RsdpRequest, }; use crate::{ @@ -131,8 +131,7 @@ fn limine_entry() -> ! { } static LIMINE_BOOTINFO: BootInfoRequest = BootInfoRequest::new(0); -static LIMINE_ENTRY: EntryPointRequest = - EntryPointRequest::new(0).entry(Ptr::new(limine_entry)); +static LIMINE_ENTRY: EntryPointRequest = EntryPointRequest::new(0).entry(Ptr::new(limine_entry)); static LIMINE_FB: FramebufferRequest = FramebufferRequest::new(0); static LIMINE_MOD: ModuleRequest = ModuleRequest::new(0); static LIMINE_MEM: MemmapRequest = MemmapRequest::new(0); diff --git a/src/kernel/src/arch/amd64/syscall.rs b/src/kernel/src/arch/amd64/syscall.rs index b10f971e..53b0ce6d 100644 --- a/src/kernel/src/arch/amd64/syscall.rs +++ b/src/kernel/src/arch/amd64/syscall.rs @@ -2,14 +2,13 @@ use core::sync::atomic::Ordering; use twizzler_abi::{arch::XSAVE_LEN, upcall::UpcallFrame}; -use crate::{ - arch::thread::use_xsave, memory::VirtAddr, syscall::SyscallContext, thread::current_thread_ref, -}; - use super::{ interrupt::{return_with_frame_to_user, IsrContext}, thread::{Registers, UpcallAble}, }; +use crate::{ + arch::thread::use_xsave, memory::VirtAddr, syscall::SyscallContext, thread::current_thread_ref, +}; #[derive(Default, Clone, Copy, Debug)] #[repr(C)] @@ -185,18 +184,20 @@ unsafe extern "C" fn syscall_entry_c(context: *mut X86SyscallContext, kernel_fs: // we'll use the ISR return path, which doesn't. let mut rf = cur_th.arch.upcall_restore_frame.borrow_mut(); if let Some(up_frame) = rf.take() { - // we MUST manually drop this, _and_ the current thread ref (a bit later), because otherwise we leave - // them hanging when we trampoline back into userspace. + // we MUST manually drop this, _and_ the current thread ref (a bit later), because + // otherwise we leave them hanging when we trampoline back into userspace. drop(rf); - // Restore the sse registers. These don't get restored by the isr return path, so we have to do it ourselves. + // Restore the sse registers. These don't get restored by the isr return path, so we + // have to do it ourselves. if use_xsave() { core::arch::asm!("xrstor [{}]", in(reg) up_frame.xsave_region.as_ptr(), in("rax") 3, in("rdx") 0); } else { core::arch::asm!("fxrstor [{}]", in(reg) up_frame.xsave_region.as_ptr()); } - // Restore the thread pointer (it might have changed, and we also allow for it to change inside the upcall frame during the upcall) + // Restore the thread pointer (it might have changed, and we also allow for it to change + // inside the upcall frame during the upcall) cur_th .arch .user_fs diff --git a/src/kernel/src/arch/amd64/thread.rs b/src/kernel/src/arch/amd64/thread.rs index 4c17b8d7..412c5c11 100644 --- a/src/kernel/src/arch/amd64/thread.rs +++ b/src/kernel/src/arch/amd64/thread.rs @@ -11,6 +11,7 @@ use twizzler_abi::{ }, }; +use super::{interrupt::IsrContext, syscall::X86SyscallContext}; use crate::{ arch::amd64::gdt::set_kernel_stack, memory::VirtAddr, @@ -18,8 +19,6 @@ use crate::{ thread::{current_thread_ref, Thread}, }; -use super::{interrupt::IsrContext, syscall::X86SyscallContext}; - #[derive(Copy, Clone, Debug)] pub enum Registers { None, @@ -90,7 +89,8 @@ unsafe extern "C" fn __do_switch( "test rax, rax", "jnz sw_wait", "do_the_switch:", - /* we can just store to the new switch lock, since we're guaranteed to be the only CPU here */ + /* we can just store to the new switch lock, since we're guaranteed to be the only CPU + * here */ "lock mov qword ptr [rdx], 1", "mfence", /* okay, now load the new stack pointer and restore */ @@ -106,7 +106,8 @@ unsafe extern "C" fn __do_switch( "pop rax", "jmp rax", "sw_wait:", - /* okay, so we have to wait. Just keep retrying to read zero from the lock, pausing in the meantime */ + /* okay, so we have to wait. Just keep retrying to read zero from the lock, pausing in + * the meantime */ "pause", "mov rax, [rdx]", "test rax, rax", @@ -204,8 +205,8 @@ where return false; } - // TODO: once security contexts are more implemented, we'll need to do a bunch of permission checks - // on the stack and target jump addresses. + // TODO: once security contexts are more implemented, we'll need to do a bunch of permission + // checks on the stack and target jump addresses. // Don't touch the red zone for the function we were in. let stack_top = stack_pointer - RED_ZONE_SIZE as u64; @@ -250,7 +251,8 @@ where let frame_ptr = frame_start as usize as *mut UpcallFrame; let mut frame: UpcallFrame = (*regs).into(); - // Step 3a: we need to fill out some extra stuff in the upcall frame, like the thread pointer and fpu state. + // Step 3a: we need to fill out some extra stuff in the upcall frame, like the thread pointer + // and fpu state. frame.thread_ptr = current_thread_ref() .unwrap() .arch diff --git a/src/kernel/src/arch/amd64/tsc.rs b/src/kernel/src/arch/amd64/tsc.rs index e82c5bea..fcd1a9d4 100644 --- a/src/kernel/src/arch/amd64/tsc.rs +++ b/src/kernel/src/arch/amd64/tsc.rs @@ -1,7 +1,7 @@ -use crate::time::{ClockHardware, Ticks}; - use twizzler_abi::syscall::{ClockFlags, ClockInfo, FemtoSeconds, TimeSpan}; +use crate::time::{ClockHardware, Ticks}; + // 1 ms = 1000000 ns // 200 milliseconds const SLEEP_TIME: u64 = 200 * 1_000_000; diff --git a/src/kernel/src/arch/mod.rs b/src/kernel/src/arch/mod.rs index 0f2cab40..2369a2eb 100755 --- a/src/kernel/src/arch/mod.rs +++ b/src/kernel/src/arch/mod.rs @@ -1,11 +1,11 @@ -#[cfg(target_arch = "x86_64")] -mod amd64; - -#[cfg(target_arch = "x86_64")] -pub use amd64::*; - -#[cfg(target_arch = "aarch64")] -mod aarch64; - -#[cfg(target_arch = "aarch64")] -pub use aarch64::*; +#[cfg(target_arch = "x86_64")] +mod amd64; + +#[cfg(target_arch = "x86_64")] +pub use amd64::*; + +#[cfg(target_arch = "aarch64")] +mod aarch64; + +#[cfg(target_arch = "aarch64")] +pub use aarch64::*; diff --git a/src/kernel/src/clock.rs b/src/kernel/src/clock.rs index 62167afb..1046175e 100644 --- a/src/kernel/src/clock.rs +++ b/src/kernel/src/clock.rs @@ -1,6 +1,9 @@ +use alloc::{boxed::Box, vec::Vec}; use core::sync::atomic::{AtomicU64, Ordering}; -use alloc::{boxed::Box, vec::Vec}; +use twizzler_abi::syscall::{ + Clock, ClockID, ClockInfo, ClockKind, FemtoSeconds, ReadClockListError, +}; use crate::{ condvar::CondVar, @@ -11,10 +14,6 @@ use crate::{ time::{ClockHardware, Ticks, CLOCK_OFFSET, TICK_SOURCES}, }; -use twizzler_abi::syscall::{ - Clock, ClockID, ClockInfo, ClockKind, FemtoSeconds, ReadClockListError, -}; - // TODO: replace with NanoSeconds from twizzler-abi. pub type Nanoseconds = u64; @@ -93,7 +92,8 @@ pub struct TimeoutKey { } impl TimeoutKey { - /// Remove all timeouts with this key. Returns true if a key was actually removed (timeout hasn't fired). + /// Remove all timeouts with this key. Returns true if a key was actually removed (timeout + /// hasn't fired). pub fn release(self) -> bool { let did_remove = TIMEOUT_QUEUE.lock().remove(&self); // Our destructor just calls remove, above, so skip it when doing this manual release. diff --git a/src/kernel/src/condvar.rs b/src/kernel/src/condvar.rs index 9f5eddf6..66ece0b7 100644 --- a/src/kernel/src/condvar.rs +++ b/src/kernel/src/condvar.rs @@ -77,18 +77,17 @@ impl Drop for CondVar { #[cfg(test)] mod tests { + use alloc::sync::Arc; use core::time::Duration; - use alloc::sync::Arc; use twizzler_kernel_macros::kernel_test; + use super::CondVar; use crate::{ spinlock::Spinlock, thread::{entry::run_closure_in_new_thread, priority::Priority}, }; - use super::CondVar; - #[kernel_test] fn test_condvar() { //logln!("a: {}", crate::interrupt::disable()); diff --git a/src/kernel/src/device.rs b/src/kernel/src/device.rs index 5c357c47..f2068bda 100644 --- a/src/kernel/src/device.rs +++ b/src/kernel/src/device.rs @@ -1,6 +1,6 @@ +use alloc::{borrow::ToOwned, collections::BTreeMap, string::String, sync::Arc, vec::Vec}; use core::mem::size_of; -use alloc::{borrow::ToOwned, collections::BTreeMap, string::String, sync::Arc, vec::Vec}; use memoffset::offset_of; use twizzler_abi::{ device::{ diff --git a/src/kernel/src/idcounter.rs b/src/kernel/src/idcounter.rs index 73367549..b3794b85 100644 --- a/src/kernel/src/idcounter.rs +++ b/src/kernel/src/idcounter.rs @@ -1,12 +1,10 @@ +use alloc::vec::Vec; use core::{ fmt::Display, sync::atomic::{AtomicU64, Ordering}, }; -use crate::once::Once; -use alloc::vec::Vec; - -use crate::mutex::Mutex; +use crate::{mutex::Mutex, once::Once}; pub struct IdCounter { counter: AtomicU64, @@ -74,8 +72,8 @@ impl IdCounter { fn release(&self, id: u64) { assert!(id > 0); self.reuse.call_once(|| Mutex::new(Vec::new())); - //TODO: we could optimize here by trying to subtract from ID_COUNTER using CAS if the thread ID - //is the current top value of the counter + //TODO: we could optimize here by trying to subtract from ID_COUNTER using CAS if the + // thread ID is the current top value of the counter let mut reuser = self.reuse.wait().lock(); reuser.push(id); } diff --git a/src/kernel/src/image.rs b/src/kernel/src/image.rs index e5f32dc6..b6319b1e 100644 --- a/src/kernel/src/image.rs +++ b/src/kernel/src/image.rs @@ -2,8 +2,7 @@ use core::alloc::Layout; use xmas_elf::program::{self}; -use crate::memory::VirtAddr; -use crate::once::Once; +use crate::{memory::VirtAddr, once::Once}; static KERNEL_IMAGE: Once<&'static [u8]> = Once::new(); pub fn init(kernel_image: &'static [u8]) { @@ -60,12 +59,12 @@ fn variant1(tls_template: TlsInfo) -> VirtAddr { let tls_region = unsafe { // allocate a region of memory initialized to zero let tcb_base = alloc::alloc::alloc_zeroed(layout); - + // copy from the kernel's ELF TLS to the allocated region of memory // the layout of this region in memory is architechture dependent. // - // Architechtures that use TLS Variant I (e.g. ARM) have the thread pointer - // point to the start of the TCB and thread-local vars are defined + // Architechtures that use TLS Variant I (e.g. ARM) have the thread pointer + // point to the start of the TCB and thread-local vars are defined // before this in higher memory addresses. So accessing a thread // local var adds some offset to the thread pointer @@ -73,14 +72,18 @@ fn variant1(tls_template: TlsInfo) -> VirtAddr { // the pointer offset by sizeof u8 bytes. let tls_base = tcb_base.add(reserved_bytes); - core::ptr::copy_nonoverlapping(tls_template.start_addr.as_ptr(), tls_base, tls_template.file_size); + core::ptr::copy_nonoverlapping( + tls_template.start_addr.as_ptr(), + tls_base, + tls_template.file_size, + ); tcb_base }; - + // the TP points to the base of the TCB which exists in lower memory. let tcb_base = VirtAddr::from_ptr(tls_region); - + tcb_base } @@ -119,7 +122,6 @@ fn variant2(tls_template: TlsInfo) -> VirtAddr { tcb_base } - #[cfg(test)] mod test { use twizzler_kernel_macros::kernel_test; @@ -133,8 +135,10 @@ mod test { #[kernel_test] fn tls_test() { // get the initial value of TLS var - assert_eq!(SOME_INT, TLS_TEST_MAGIC, - "TLS var not initialized correctly: {:#x}", SOME_INT + assert_eq!( + SOME_INT, TLS_TEST_MAGIC, + "TLS var not initialized correctly: {:#x}", + SOME_INT ); } } diff --git a/src/kernel/src/initrd.rs b/src/kernel/src/initrd.rs index 25d1b6bd..e1543779 100644 --- a/src/kernel/src/initrd.rs +++ b/src/kernel/src/initrd.rs @@ -1,12 +1,10 @@ -use crate::once::Once; -use alloc::borrow::ToOwned; -use alloc::sync::Arc; +use alloc::{borrow::ToOwned, collections::BTreeMap, string::String, sync::Arc}; -use crate::memory::VirtAddr; -use crate::obj::ObjectRef; -use crate::obj::{self, pages::Page}; -use alloc::collections::BTreeMap; -use alloc::string::String; +use crate::{ + memory::VirtAddr, + obj::{self, pages::Page, ObjectRef}, + once::Once, +}; pub struct BootModule { pub start: VirtAddr, pub length: usize, diff --git a/src/kernel/src/interrupt.rs b/src/kernel/src/interrupt.rs index 19f1f374..66480fa8 100644 --- a/src/kernel/src/interrupt.rs +++ b/src/kernel/src/interrupt.rs @@ -1,4 +1,5 @@ use alloc::vec::Vec; + use twizzler_abi::kso::{InterruptAllocateOptions, InterruptPriority}; use crate::{ diff --git a/src/kernel/src/log.rs b/src/kernel/src/log.rs index 09bf41de..33cf2b87 100755 --- a/src/kernel/src/log.rs +++ b/src/kernel/src/log.rs @@ -1,388 +1,388 @@ -use core::{ - fmt::Write, - sync::atomic::{AtomicU64, Ordering}, -}; - -use twizzler_abi::syscall::{ - KernelConsoleReadBufferError, KernelConsoleReadError, KernelConsoleReadFlags, -}; - -use crate::{interrupt, spinlock::Spinlock}; - -const KEC_BUFFER_LEN: usize = 4096; -const MAX_SINGLE_WRITE: usize = KEC_BUFFER_LEN / 2; -struct KernelConsoleInner { - state: AtomicU64, - buffer: core::cell::UnsafeCell<[u8; KEC_BUFFER_LEN]>, -} -unsafe impl Sync for KernelConsoleInner {} -pub trait MessageLevel {} -pub struct EmergencyMessage; -impl MessageLevel for EmergencyMessage {} -pub struct NormalMessage; -impl MessageLevel for NormalMessage {} - -pub struct ConsoleWriteError; - -const INPUT_BUFFER_SIZE: usize = 1024; -pub struct KernelConsoleReadBuffer { - buf: [u8; INPUT_BUFFER_SIZE], - pos: usize, -} - -impl KernelConsoleReadBuffer { - const fn new() -> Self { - Self { - buf: [0; INPUT_BUFFER_SIZE], - pos: 0, - } - } - pub fn push_input_byte(&mut self, byte: u8) { - if self.pos == INPUT_BUFFER_SIZE { - return; - } - self.buf[self.pos] = byte; - self.pos += 1; - } - - pub fn read_byte(&mut self) -> Option { - if self.pos == 0 { - return None; - } - let byte = self.buf[0]; - self.buf.copy_within(1.., 0); - self.pos -= 1; - Some(byte) - } -} - -pub struct KernelConsole { - inner: &'static KernelConsoleInner, - hardware: T, - lock: Spinlock<()>, - read_lock: Spinlock, - _pd: core::marker::PhantomData, -} -unsafe impl Sync for KernelConsole {} - -static KERNEL_CONSOLE_MAIN: KernelConsoleInner = KernelConsoleInner { - state: AtomicU64::new(0), - buffer: core::cell::UnsafeCell::new([0; KEC_BUFFER_LEN]), -}; - -pub trait KernelConsoleHardware { - fn write(&self, data: &[u8], flags: KernelConsoleWriteFlags); -} - -impl core::fmt::Write for KernelConsole { - fn write_str(&mut self, s: &str) -> core::fmt::Result { - let _ = self.write(s.as_bytes(), KernelConsoleWriteFlags::empty()); - Ok(()) - } -} - -impl core::fmt::Write for KernelConsole { - fn write_str(&mut self, s: &str) -> core::fmt::Result { - let _ = self.write(s.as_bytes(), KernelConsoleWriteFlags::empty()); - Ok(()) - } -} - -bitflags::bitflags! { - #[derive(Clone, Copy)] - pub struct KernelConsoleWriteFlags: u32 { - const DISCARD_ON_FULL = 1; - } -} - -impl From for KernelConsoleWriteFlags { - fn from(x: twizzler_abi::syscall::KernelConsoleWriteFlags) -> Self { - if x.contains(twizzler_abi::syscall::KernelConsoleWriteFlags::DISCARD_ON_FULL) { - Self::DISCARD_ON_FULL - } else { - Self::empty() - } - } -} - -fn write_head(s: u64) -> u64 { - (s >> 32) & 0xffff -} - -fn write_resv(s: u64) -> u64 { - (s >> 16) & 0xffff -} - -fn read_head(s: u64) -> u64 { - s & 0xffff -} - -fn new_state(rh: u64, wh: u64, wr: u64) -> u64 { - ((rh % KEC_BUFFER_LEN as u64) & 0xffff) - | (((wh % KEC_BUFFER_LEN as u64) & 0xffff) << 32) - | (((wr % KEC_BUFFER_LEN as u64) & 0xffff) << 16) -} - -fn did_pass(x: u64, y: u64, l: u64, n: u64) -> bool { - assert!(l < n); - let next_x = (x + l) % n; - let did_wrap = next_x < x; - if x < y { - did_wrap || next_x >= y - } else { - next_x >= y && did_wrap - } -} - -fn reserve_write(state: u64, len: usize) -> u64 { - let len = len as u64; - let wr = write_resv(state); - let mut wh = write_head(state); - let mut rh = read_head(state); - - let passed_rh = did_pass(wr, rh, len, KEC_BUFFER_LEN as u64); - let passed_wh = did_pass(wr, wh, len, KEC_BUFFER_LEN as u64); - - let wr = (wr + len) % KEC_BUFFER_LEN as u64; - - if passed_rh { - rh = wr; - } - - if passed_wh { - wh = (wr - len) % KEC_BUFFER_LEN as u64; - } - - new_state(rh, wh, wr) -} - -fn commit_write(state: u64, len: usize) -> u64 { - let wh = write_head(state); - let wr = write_resv(state); - new_state(read_head(state), wh + len as u64, wr) -} - -fn reserve_space(state: u64, len: usize, toss: bool) -> (bool, u64, u64) { - let new_state = reserve_write(state, len); - ( - read_head(state) == read_head(new_state) || !toss, - new_state, - write_head(state), - ) -} - -impl KernelConsoleInner { - fn try_commit(&self, old: u64, new: u64) -> bool { - self.state - .compare_exchange(old, new, Ordering::SeqCst, Ordering::SeqCst) - .is_ok() - } - - fn write_buffer( - &self, - data: &[u8], - flags: KernelConsoleWriteFlags, - ) -> Result<(), ConsoleWriteError> { - let data = &data[0..core::cmp::min(data.len(), MAX_SINGLE_WRITE)]; - - loop { - let state = self.state.load(Ordering::SeqCst); - let (ok, new_state, copy_offset) = reserve_space( - state, - data.len(), - flags.contains(KernelConsoleWriteFlags::DISCARD_ON_FULL), - ); - if !ok { - return Err(ConsoleWriteError {}); - } - - if !self.try_commit(state, new_state) { - continue; - } - - let (first_len, second_len) = if copy_offset + data.len() as u64 > KEC_BUFFER_LEN as u64 - { - let first_len = KEC_BUFFER_LEN as u64 - copy_offset; - (first_len, data.len() as u64 - first_len) - } else { - (data.len() as u64, 0) - }; - (&mut unsafe { *self.buffer.get() }) - [copy_offset as usize..(copy_offset + first_len) as usize] - .copy_from_slice(&data[0..first_len as usize]); - (&mut unsafe { *self.buffer.get() })[0..second_len as usize] - .copy_from_slice(&data[first_len as usize..(first_len + second_len) as usize]); - let new_committed_state = commit_write(new_state, data.len()); - if self.try_commit(new_state, new_committed_state) { - break; - } - } - Ok(()) - } -} - -impl KernelConsole { - pub fn write( - &self, - data: &[u8], - flags: KernelConsoleWriteFlags, - ) -> Result<(), ConsoleWriteError> { - self.hardware.write(data, flags); - self.inner.write_buffer(data, flags) - } -} - -impl KernelConsole { - pub fn write( - &self, - data: &[u8], - flags: KernelConsoleWriteFlags, - ) -> Result<(), ConsoleWriteError> { - self.hardware.write(data, flags); - self.inner.write_buffer(data, flags) - } -} - -impl KernelConsole { - fn read_buffer_bytes(&self, _slice: &mut [u8]) -> Result { - todo!() - } - - fn read_bytes( - &self, - slice: &mut [u8], - flags: KernelConsoleReadFlags, - ) -> Result { - let mut i = 0; - loop { - if i == slice.len() { - break; - } - let b = &mut slice[i]; - let read = self.read_lock.lock().read_byte(); - if let Some(x) = read { - *b = match x { - 4 => return Ok(i), - _ => x, - }; - i += 1; - } else if flags.contains(KernelConsoleReadFlags::NONBLOCKING) || i > 0 { - return Ok(i); - } else { - // TODO: sleep - crate::sched::schedule(true); - } - } - Ok(slice.len()) - } -} - -pub fn write_bytes(slice: &[u8], flags: KernelConsoleWriteFlags) -> Result<(), ConsoleWriteError> { - unsafe { NORMAL_CONSOLE.write(slice, flags) } -} - -pub fn read_bytes( - slice: &mut [u8], - flags: KernelConsoleReadFlags, -) -> Result { - unsafe { NORMAL_CONSOLE.read_bytes(slice, flags) } -} - -pub fn read_buffer_bytes(slice: &mut [u8]) -> Result { - unsafe { NORMAL_CONSOLE.read_buffer_bytes(slice) } -} - -pub fn push_input_byte(byte: u8) { - unsafe { - let byte = match byte { - 13 => 10, - 127 => 8, - x => x, - }; - NORMAL_CONSOLE.read_lock.lock().push_input_byte(byte); - if byte == 8 { - let _ = write_bytes(&[8, b' '], KernelConsoleWriteFlags::DISCARD_ON_FULL); - } - let _ = write_bytes(&[byte], KernelConsoleWriteFlags::DISCARD_ON_FULL); - } -} - -static mut EMERGENCY_CONSOLE: KernelConsole< - crate::machine::MachineConsoleHardware, - EmergencyMessage, -> = KernelConsole { - inner: &KERNEL_CONSOLE_MAIN, - hardware: crate::machine::MachineConsoleHardware::new(), - _pd: core::marker::PhantomData, - lock: Spinlock::new(()), - read_lock: Spinlock::new(KernelConsoleReadBuffer::new()), -}; - -static mut NORMAL_CONSOLE: KernelConsole = - KernelConsole { - inner: &KERNEL_CONSOLE_MAIN, - hardware: crate::machine::MachineConsoleHardware::new(), - _pd: core::marker::PhantomData, - lock: Spinlock::new(()), - read_lock: Spinlock::new(KernelConsoleReadBuffer::new()), - }; - -#[doc(hidden)] -pub fn _print_normal(args: ::core::fmt::Arguments) { - let istate = interrupt::disable(); - unsafe { - let _guard = NORMAL_CONSOLE.lock.lock(); - NORMAL_CONSOLE - .write_fmt(args) - .expect("printing to serial failed"); - } - interrupt::set(istate); -} - -pub fn _print_emergency(args: ::core::fmt::Arguments) { - unsafe { - EMERGENCY_CONSOLE - .write_fmt(args) - .expect("printing to serial failed"); - } -} - -#[macro_export] -macro_rules! log { - ($($arg:tt)*) => { - $crate::log::_print_normal(format_args!($($arg)*)) - }; -} - -#[macro_export] -macro_rules! logln { - () => { - $crate::log!("\n") - }; - ($fmt:expr) => { - $crate::log!(concat!($fmt, "\n")) - }; - ($fmt:expr, $($arg:tt)*) => { - $crate::log!(concat!($fmt, "\n"), $($arg)*) - }; -} - -#[macro_export] -macro_rules! emerglog { - ($($arg:tt)*) => { - $crate::log::_print_emergency(format_args!($($arg)*)) - }; -} - -#[macro_export] -macro_rules! emerglogln { - () => { - $crate::emerglog!("\n") - }; - ($fmt:expr) => { - $crate::emerglog!(concat!($fmt, "\n")) - }; - ($fmt:expr, $($arg:tt)*) => { - $crate::emerglog!(concat!($fmt, "\n"), $($arg)*) - }; -} +use core::{ + fmt::Write, + sync::atomic::{AtomicU64, Ordering}, +}; + +use twizzler_abi::syscall::{ + KernelConsoleReadBufferError, KernelConsoleReadError, KernelConsoleReadFlags, +}; + +use crate::{interrupt, spinlock::Spinlock}; + +const KEC_BUFFER_LEN: usize = 4096; +const MAX_SINGLE_WRITE: usize = KEC_BUFFER_LEN / 2; +struct KernelConsoleInner { + state: AtomicU64, + buffer: core::cell::UnsafeCell<[u8; KEC_BUFFER_LEN]>, +} +unsafe impl Sync for KernelConsoleInner {} +pub trait MessageLevel {} +pub struct EmergencyMessage; +impl MessageLevel for EmergencyMessage {} +pub struct NormalMessage; +impl MessageLevel for NormalMessage {} + +pub struct ConsoleWriteError; + +const INPUT_BUFFER_SIZE: usize = 1024; +pub struct KernelConsoleReadBuffer { + buf: [u8; INPUT_BUFFER_SIZE], + pos: usize, +} + +impl KernelConsoleReadBuffer { + const fn new() -> Self { + Self { + buf: [0; INPUT_BUFFER_SIZE], + pos: 0, + } + } + pub fn push_input_byte(&mut self, byte: u8) { + if self.pos == INPUT_BUFFER_SIZE { + return; + } + self.buf[self.pos] = byte; + self.pos += 1; + } + + pub fn read_byte(&mut self) -> Option { + if self.pos == 0 { + return None; + } + let byte = self.buf[0]; + self.buf.copy_within(1.., 0); + self.pos -= 1; + Some(byte) + } +} + +pub struct KernelConsole { + inner: &'static KernelConsoleInner, + hardware: T, + lock: Spinlock<()>, + read_lock: Spinlock, + _pd: core::marker::PhantomData, +} +unsafe impl Sync for KernelConsole {} + +static KERNEL_CONSOLE_MAIN: KernelConsoleInner = KernelConsoleInner { + state: AtomicU64::new(0), + buffer: core::cell::UnsafeCell::new([0; KEC_BUFFER_LEN]), +}; + +pub trait KernelConsoleHardware { + fn write(&self, data: &[u8], flags: KernelConsoleWriteFlags); +} + +impl core::fmt::Write for KernelConsole { + fn write_str(&mut self, s: &str) -> core::fmt::Result { + let _ = self.write(s.as_bytes(), KernelConsoleWriteFlags::empty()); + Ok(()) + } +} + +impl core::fmt::Write for KernelConsole { + fn write_str(&mut self, s: &str) -> core::fmt::Result { + let _ = self.write(s.as_bytes(), KernelConsoleWriteFlags::empty()); + Ok(()) + } +} + +bitflags::bitflags! { + #[derive(Clone, Copy)] + pub struct KernelConsoleWriteFlags: u32 { + const DISCARD_ON_FULL = 1; + } +} + +impl From for KernelConsoleWriteFlags { + fn from(x: twizzler_abi::syscall::KernelConsoleWriteFlags) -> Self { + if x.contains(twizzler_abi::syscall::KernelConsoleWriteFlags::DISCARD_ON_FULL) { + Self::DISCARD_ON_FULL + } else { + Self::empty() + } + } +} + +fn write_head(s: u64) -> u64 { + (s >> 32) & 0xffff +} + +fn write_resv(s: u64) -> u64 { + (s >> 16) & 0xffff +} + +fn read_head(s: u64) -> u64 { + s & 0xffff +} + +fn new_state(rh: u64, wh: u64, wr: u64) -> u64 { + ((rh % KEC_BUFFER_LEN as u64) & 0xffff) + | (((wh % KEC_BUFFER_LEN as u64) & 0xffff) << 32) + | (((wr % KEC_BUFFER_LEN as u64) & 0xffff) << 16) +} + +fn did_pass(x: u64, y: u64, l: u64, n: u64) -> bool { + assert!(l < n); + let next_x = (x + l) % n; + let did_wrap = next_x < x; + if x < y { + did_wrap || next_x >= y + } else { + next_x >= y && did_wrap + } +} + +fn reserve_write(state: u64, len: usize) -> u64 { + let len = len as u64; + let wr = write_resv(state); + let mut wh = write_head(state); + let mut rh = read_head(state); + + let passed_rh = did_pass(wr, rh, len, KEC_BUFFER_LEN as u64); + let passed_wh = did_pass(wr, wh, len, KEC_BUFFER_LEN as u64); + + let wr = (wr + len) % KEC_BUFFER_LEN as u64; + + if passed_rh { + rh = wr; + } + + if passed_wh { + wh = (wr - len) % KEC_BUFFER_LEN as u64; + } + + new_state(rh, wh, wr) +} + +fn commit_write(state: u64, len: usize) -> u64 { + let wh = write_head(state); + let wr = write_resv(state); + new_state(read_head(state), wh + len as u64, wr) +} + +fn reserve_space(state: u64, len: usize, toss: bool) -> (bool, u64, u64) { + let new_state = reserve_write(state, len); + ( + read_head(state) == read_head(new_state) || !toss, + new_state, + write_head(state), + ) +} + +impl KernelConsoleInner { + fn try_commit(&self, old: u64, new: u64) -> bool { + self.state + .compare_exchange(old, new, Ordering::SeqCst, Ordering::SeqCst) + .is_ok() + } + + fn write_buffer( + &self, + data: &[u8], + flags: KernelConsoleWriteFlags, + ) -> Result<(), ConsoleWriteError> { + let data = &data[0..core::cmp::min(data.len(), MAX_SINGLE_WRITE)]; + + loop { + let state = self.state.load(Ordering::SeqCst); + let (ok, new_state, copy_offset) = reserve_space( + state, + data.len(), + flags.contains(KernelConsoleWriteFlags::DISCARD_ON_FULL), + ); + if !ok { + return Err(ConsoleWriteError {}); + } + + if !self.try_commit(state, new_state) { + continue; + } + + let (first_len, second_len) = if copy_offset + data.len() as u64 > KEC_BUFFER_LEN as u64 + { + let first_len = KEC_BUFFER_LEN as u64 - copy_offset; + (first_len, data.len() as u64 - first_len) + } else { + (data.len() as u64, 0) + }; + (&mut unsafe { *self.buffer.get() }) + [copy_offset as usize..(copy_offset + first_len) as usize] + .copy_from_slice(&data[0..first_len as usize]); + (&mut unsafe { *self.buffer.get() })[0..second_len as usize] + .copy_from_slice(&data[first_len as usize..(first_len + second_len) as usize]); + let new_committed_state = commit_write(new_state, data.len()); + if self.try_commit(new_state, new_committed_state) { + break; + } + } + Ok(()) + } +} + +impl KernelConsole { + pub fn write( + &self, + data: &[u8], + flags: KernelConsoleWriteFlags, + ) -> Result<(), ConsoleWriteError> { + self.hardware.write(data, flags); + self.inner.write_buffer(data, flags) + } +} + +impl KernelConsole { + pub fn write( + &self, + data: &[u8], + flags: KernelConsoleWriteFlags, + ) -> Result<(), ConsoleWriteError> { + self.hardware.write(data, flags); + self.inner.write_buffer(data, flags) + } +} + +impl KernelConsole { + fn read_buffer_bytes(&self, _slice: &mut [u8]) -> Result { + todo!() + } + + fn read_bytes( + &self, + slice: &mut [u8], + flags: KernelConsoleReadFlags, + ) -> Result { + let mut i = 0; + loop { + if i == slice.len() { + break; + } + let b = &mut slice[i]; + let read = self.read_lock.lock().read_byte(); + if let Some(x) = read { + *b = match x { + 4 => return Ok(i), + _ => x, + }; + i += 1; + } else if flags.contains(KernelConsoleReadFlags::NONBLOCKING) || i > 0 { + return Ok(i); + } else { + // TODO: sleep + crate::sched::schedule(true); + } + } + Ok(slice.len()) + } +} + +pub fn write_bytes(slice: &[u8], flags: KernelConsoleWriteFlags) -> Result<(), ConsoleWriteError> { + unsafe { NORMAL_CONSOLE.write(slice, flags) } +} + +pub fn read_bytes( + slice: &mut [u8], + flags: KernelConsoleReadFlags, +) -> Result { + unsafe { NORMAL_CONSOLE.read_bytes(slice, flags) } +} + +pub fn read_buffer_bytes(slice: &mut [u8]) -> Result { + unsafe { NORMAL_CONSOLE.read_buffer_bytes(slice) } +} + +pub fn push_input_byte(byte: u8) { + unsafe { + let byte = match byte { + 13 => 10, + 127 => 8, + x => x, + }; + NORMAL_CONSOLE.read_lock.lock().push_input_byte(byte); + if byte == 8 { + let _ = write_bytes(&[8, b' '], KernelConsoleWriteFlags::DISCARD_ON_FULL); + } + let _ = write_bytes(&[byte], KernelConsoleWriteFlags::DISCARD_ON_FULL); + } +} + +static mut EMERGENCY_CONSOLE: KernelConsole< + crate::machine::MachineConsoleHardware, + EmergencyMessage, +> = KernelConsole { + inner: &KERNEL_CONSOLE_MAIN, + hardware: crate::machine::MachineConsoleHardware::new(), + _pd: core::marker::PhantomData, + lock: Spinlock::new(()), + read_lock: Spinlock::new(KernelConsoleReadBuffer::new()), +}; + +static mut NORMAL_CONSOLE: KernelConsole = + KernelConsole { + inner: &KERNEL_CONSOLE_MAIN, + hardware: crate::machine::MachineConsoleHardware::new(), + _pd: core::marker::PhantomData, + lock: Spinlock::new(()), + read_lock: Spinlock::new(KernelConsoleReadBuffer::new()), + }; + +#[doc(hidden)] +pub fn _print_normal(args: ::core::fmt::Arguments) { + let istate = interrupt::disable(); + unsafe { + let _guard = NORMAL_CONSOLE.lock.lock(); + NORMAL_CONSOLE + .write_fmt(args) + .expect("printing to serial failed"); + } + interrupt::set(istate); +} + +pub fn _print_emergency(args: ::core::fmt::Arguments) { + unsafe { + EMERGENCY_CONSOLE + .write_fmt(args) + .expect("printing to serial failed"); + } +} + +#[macro_export] +macro_rules! log { + ($($arg:tt)*) => { + $crate::log::_print_normal(format_args!($($arg)*)) + }; +} + +#[macro_export] +macro_rules! logln { + () => { + $crate::log!("\n") + }; + ($fmt:expr) => { + $crate::log!(concat!($fmt, "\n")) + }; + ($fmt:expr, $($arg:tt)*) => { + $crate::log!(concat!($fmt, "\n"), $($arg)*) + }; +} + +#[macro_export] +macro_rules! emerglog { + ($($arg:tt)*) => { + $crate::log::_print_emergency(format_args!($($arg)*)) + }; +} + +#[macro_export] +macro_rules! emerglogln { + () => { + $crate::emerglog!("\n") + }; + ($fmt:expr) => { + $crate::emerglog!(concat!($fmt, "\n")) + }; + ($fmt:expr, $($arg:tt)*) => { + $crate::emerglog!(concat!($fmt, "\n"), $($arg)*) + }; +} diff --git a/src/kernel/src/machine/arm/common/boot/mod.rs b/src/kernel/src/machine/arm/common/boot/mod.rs index 5bfbd35f..1b9d2283 100644 --- a/src/kernel/src/machine/arm/common/boot/mod.rs +++ b/src/kernel/src/machine/arm/common/boot/mod.rs @@ -1,15 +1,13 @@ /// The method of starting a CPU on ARM devices is machine specific /// and usually implemented by the firmware. - mod psci; use core::str::FromStr; -use arm64::registers::{PAR_EL1, Readable}; - +use arm64::registers::{Readable, PAR_EL1}; use twizzler_abi::upcall::MemoryAccessKind; -use crate::memory::{VirtAddr, PhysAddr}; +use crate::memory::{PhysAddr, VirtAddr}; /// Possible boot protocols used to start a CPU. #[derive(Debug, Default, Copy, Clone, PartialEq)] @@ -38,7 +36,7 @@ impl FromStr for BootMethod { match s { "psci" => Ok(BootMethod::Psci), "spin-table" => Ok(BootMethod::SpinTable), - _ => Err(()) + _ => Err(()), } } } @@ -68,13 +66,11 @@ pub struct BootArgs { /// # Safety /// The tcb_base and kernel stack must both be valid memory regions for each thing. pub unsafe fn poke_cpu(cpu: u32, tcb_base: VirtAddr, kernel_stack: *mut u8) { - let core = unsafe { - crate::processor::get_processor_mut(cpu) - }; + let core = unsafe { crate::processor::get_processor_mut(cpu) }; match core.arch.boot { BootMethod::Psci => psci::boot_core(core, tcb_base, kernel_stack), - _ => unimplemented!("boot method: {}", core.arch.boot.as_str()) + _ => unimplemented!("boot method: {}", core.arch.boot.as_str()), } } @@ -101,21 +97,21 @@ fn translate(va: VirtAddr, access: MemoryAccessKind) -> Option { in(reg) va.raw(), options(nostack, nomem), ), - _ => unimplemented!("translation for {:?}", access) + _ => unimplemented!("translation for {:?}", access), } } // PAR_EL1 holds result of AT instruction // - FST: fault status info // - PA: output address if PAR_EL1.matches_all(PAR_EL1::F::TranslationSuccessfull) { - let pa = unsafe { + let pa = unsafe { // PAR_EL1.PA returns bits 47:12 let base_phys = PAR_EL1.read(PAR_EL1::PA) << 12; // the lower 12 bit offset resides in the VA let block_offset = va.raw() & 0xFFF; PhysAddr::new_unchecked(base_phys | block_offset) }; - return Some(pa) + return Some(pa); } None } diff --git a/src/kernel/src/machine/arm/common/boot/psci.rs b/src/kernel/src/machine/arm/common/boot/psci.rs index 90d8bb0a..c6b7c406 100644 --- a/src/kernel/src/machine/arm/common/boot/psci.rs +++ b/src/kernel/src/machine/arm/common/boot/psci.rs @@ -1,23 +1,20 @@ +use arm64::registers::Readable; /// Power State Coordination Interface (PSCI) is a standard interface for power management. /// /// A full explanation of interfaces for power management can be found in the /// "Arm Power State Coordination Interface Platform Design Document": /// https://developer.arm.com/documentation/den0022/f/ - -use arm64::registers::{SCTLR_EL1, MAIR_EL1, TTBR1_EL1, TTBR0_EL1, TCR_EL1, SPSR_EL1}; -use arm64::registers::Readable; +use arm64::registers::{MAIR_EL1, SCTLR_EL1, SPSR_EL1, TCR_EL1, TTBR0_EL1, TTBR1_EL1}; use smccc::psci::cpu_on; +use twizzler_abi::upcall::MemoryAccessKind; +use super::{translate, BootArgs}; use crate::{ machine::info::devicetree, - memory::{VirtAddr, PhysAddr}, + memory::{PhysAddr, VirtAddr}, processor::Processor, }; -use twizzler_abi::upcall::MemoryAccessKind; - -use super::{BootArgs, translate}; - // According to Section 6.4 the MMU and caches are disabled // and software must set the EL1h stack pointer unsafe fn psci_secondary_entry(context_id: &BootArgs) -> ! { @@ -71,7 +68,8 @@ unsafe fn psci_secondary_entry(context_id: &BootArgs) -> ! { /// should be set up so we can execute safe Rust code. fn rust_secondary_entry(args: &BootArgs) -> ! { // call the generic secondary cpu entry point - crate::processor::secondary_entry(args.cpu, + crate::processor::secondary_entry( + args.cpu, VirtAddr::new(args.tcb_base).unwrap(), args.kernel_stack as *mut u8, ); @@ -88,26 +86,24 @@ pub unsafe fn boot_core(core: &mut Processor, tcb_base: VirtAddr, kernel_stack: // TODO: ensure the right bits are 0 let cpu_id = core.arch.mpidr; // pass secondary entry point (physical address) - let entry_va = VirtAddr::new(psci_secondary_entry as u64) - .expect("invalid entry point address"); - let entry_pa = translate(entry_va, MemoryAccessKind::Read) - .expect("entry point is not mapped"); + let entry_va = VirtAddr::new(psci_secondary_entry as u64).expect("invalid entry point address"); + let entry_pa = translate(entry_va, MemoryAccessKind::Read).expect("entry point is not mapped"); // pass Context ID which in our implementation is the boot args - // needed to start the CPU core. The Context ID is gaurenteed to + // needed to start the CPU core. The Context ID is gaurenteed to // be passed as an argument to the entry point we specify. let context_id = &core.arch.args as *const _ as u64; let ctx_pa = translate(VirtAddr::new(context_id).unwrap(), MemoryAccessKind::Write) .expect("context ID is not mapped"); // Here we pass in the necessary arguments to start the CPU - + let cpacr: u64; core::arch::asm!( "mrs {}, CPACR_EL1", out(reg) cpacr, ); - // Register state needed by low level code to setup an environment + // Register state needed by low level code to setup an environment // suitable for executing Rust code in the kernel. core.arch.args.mair = MAIR_EL1.get(); core.arch.args.ttbr1 = TTBR1_EL1.get(); @@ -125,21 +121,19 @@ pub unsafe fn boot_core(core: &mut Processor, tcb_base: VirtAddr, kernel_stack: // get the method from the psci root node let method = { - let psci_info = devicetree() - .find_node("/psci") - .expect("no psci node"); + let psci_info = devicetree().find_node("/psci").expect("no psci node"); psci_info .property("method") .expect("no method property") .as_str() .expect("failed to convert to string") }; - + // here we assume 64 bit calling convention, in the future // we should check if this is different let boot_result = match method { "hvc" => cpu_on::(cpu_id, entry_pa.into(), ctx_pa.into()), - _ => todo!("SMCCC calling convention needed by PSCI") + _ => todo!("SMCCC calling convention needed by PSCI"), }; // Booting up the core is asynchronous and the call only returns OK if the signal was sent if boot_result.is_err() { diff --git a/src/kernel/src/machine/arm/common/gicv2/gicc.rs b/src/kernel/src/machine/arm/common/gicv2/gicc.rs index 0cdb48c2..800d8900 100644 --- a/src/kernel/src/machine/arm/common/gicv2/gicc.rs +++ b/src/kernel/src/machine/arm/common/gicv2/gicc.rs @@ -1,5 +1,5 @@ /// A Generic Interrupt Controller (GIC) v2 CPU Interface -/// +/// /// The full specification can be found here: /// https://developer.arm.com/documentation/ihi0048/b?lang=en /// @@ -8,7 +8,6 @@ /// A summary of its functionality can be found in section 10.6 /// "ARM Cortex-A Series Programmer’s Guide for ARMv8-A": /// https://developer.arm.com/documentation/den0024/a/ - use registers::{ interfaces::{Readable, Writeable}, register_bitfields, register_structs, @@ -16,7 +15,6 @@ use registers::{ }; use super::super::mmio::MmioRef; - use crate::memory::VirtAddr; // Each register in the specification is prefixed with GICC_ @@ -112,7 +110,9 @@ impl GICC { // A write to the EOIR corrsponds to the most recent valid // read of the IAR value. A return of a spurious ID from IAR // does not have to be written to EOIR. - self.registers.EOIR.write(EOIR::InterruptID.val(int_id as u32)); + self.registers + .EOIR + .write(EOIR::InterruptID.val(int_id as u32)); } /// print the configuration of the distributor diff --git a/src/kernel/src/machine/arm/common/gicv2/gicd.rs b/src/kernel/src/machine/arm/common/gicv2/gicd.rs index ef8285dd..e18a29b4 100644 --- a/src/kernel/src/machine/arm/common/gicv2/gicd.rs +++ b/src/kernel/src/machine/arm/common/gicv2/gicd.rs @@ -1,5 +1,5 @@ /// A Generic Interrupt Controller (GIC) v2 Distributor Interface -/// +/// /// The full specification can be found here: /// https://developer.arm.com/documentation/ihi0048/b?lang=en /// @@ -8,17 +8,15 @@ /// A summary of its functionality can be found in section 10.6 /// "ARM Cortex-A Series Programmer’s Guide for ARMv8-A": /// https://developer.arm.com/documentation/den0024/a/ - use core::ops::RangeInclusive; use registers::{ - interfaces::{Readable, Writeable, ReadWriteable}, + interfaces::{ReadWriteable, Readable, Writeable}, register_bitfields, register_structs, registers::{ReadOnly, ReadWrite}, }; use super::super::mmio::MmioRef; - use crate::memory::VirtAddr; // Each register in the specification is prefixed with GICD_ @@ -66,8 +64,8 @@ register_bitfields! { // Each register in the specification is prefixed with GICD_ register_structs! { - /// Distributor Register Map according - /// to Section 4.1.2, Table 4-1. + /// Distributor Register Map according + /// to Section 4.1.2, Table 4-1. /// All registers are 32-bits wide. #[allow(non_snake_case)] DistributorRegisters { @@ -96,14 +94,14 @@ pub struct GICD { } impl GICD { - /// According to 4.3.11 and 3.3: "GICD_IPRIORITYRs provide 8-bit - /// priority field for each interrupt," and Lower numbers have + /// According to 4.3.11 and 3.3: "GICD_IPRIORITYRs provide 8-bit + /// priority field for each interrupt," and Lower numbers have /// higher priority, with 0 being the highest. pub const HIGHEST_PRIORITY: u8 = 0; // The CPU can see interrupt IDs 0-1019. 0-31 are banked by // the distributor and uniquely seen by each processor, and - // SPIs range from 32-1019 (2.2.1). + // SPIs range from 32-1019 (2.2.1). /// Software Generated Interrupts (SGIs) range from 0-15 (See 2.2.1) const SGI_ID_RANGE: RangeInclusive = RangeInclusive::new(0, 15); @@ -151,23 +149,23 @@ impl GICD { } /// Set the enable bit for the corresponding interrupt. - pub fn enable_interrupt(&self, int_id: u32) { - // The GICD_ISENABLERns provide the set-enable bits + pub fn enable_interrupt(&self, int_id: u32) { + // The GICD_ISENABLERns provide the set-enable bits // for each interrupt shared or otherwise (3.1.2). - // + // // NOTE: The implementation of SGIs may have them // permanently enabled or they need to be manually // enabled/disabled - if Self::SGI_ID_RANGE.contains(&int_id) + if Self::SGI_ID_RANGE.contains(&int_id) || Self::PPI_ID_RANGE.contains(&int_id) - || Self::SPI_ID_RANGE.contains(&int_id) - { + || Self::SPI_ID_RANGE.contains(&int_id) + { // according to the algorithm on 4-93: // 1. GICD_ISENABLER n = int_id / 32 let iser = (int_id / 32) as usize; // 2. bit number = int_id % 32 let bit_index = int_id % 32; - + // First, we read the right GICD_ISENABLER register let mut enable = self.registers.ISENABLER[iser].get(); // set the bit in the local copy @@ -182,20 +180,20 @@ impl GICD { /// configure routing of interrupts to particular cpu cores pub fn set_interrupt_target(&self, int_id: u32, core: u32) { // We skip the banked registers since according to 2.2.1 - // those map to interrupt IDs 0-31 which are local to + // those map to interrupt IDs 0-31 which are local to // the processor. // // According to 4.3.12: // - GICD_ITARGETSR0 to GICD_ITARGETSR7 are read-only // - GICD_ITARGETSR0 to GICD_ITARGETSR7 are banked if int_id < *Self::PPI_ID_RANGE.end() { - return + return; } // Following the algorithm on page 4-107: // 1. ITARGETSR num = int_id / 4 // minus 1 since we seperate banked registers - let num = (int_id / 4) as usize - 1; + let num = (int_id / 4) as usize - 1; // 2. byte offset required = int_id % 4 let offset = int_id % 4; @@ -225,7 +223,7 @@ impl GICD { 1 => IPRIORITYR::PriorityOffset1.val(priority.into()), 2 => IPRIORITYR::PriorityOffset2.val(priority.into()), 3 => IPRIORITYR::PriorityOffset3.val(priority.into()), - _ => unreachable!() + _ => unreachable!(), }; self.registers.IPRIORITYR[num].modify(prio); diff --git a/src/kernel/src/machine/arm/common/gicv2/mod.rs b/src/kernel/src/machine/arm/common/gicv2/mod.rs index 119933d7..31a75af7 100644 --- a/src/kernel/src/machine/arm/common/gicv2/mod.rs +++ b/src/kernel/src/machine/arm/common/gicv2/mod.rs @@ -1,17 +1,16 @@ +mod gicc; /// A Generic Interrupt Controller (GIC) v2 driver interface -/// +/// /// The full specification can be found here: /// https://developer.arm.com/documentation/ihi0048/b?lang=en /// /// A summary of its functionality can be found in section 10.6 /// "ARM Cortex-A Series Programmer’s Guide for ARMv8-A": /// https://developer.arm.com/documentation/den0024/a/ - mod gicd; -mod gicc; -use gicd::GICD; use gicc::GICC; +use gicd::GICD; use crate::memory::VirtAddr; @@ -64,7 +63,8 @@ impl GICv2 { self.global.set_interrupt_target(int_id, core); // TODO: have the priority set to something reasonable // set the priority for the corresponding interrupt - self.global.set_interrupt_priority(int_id, GICD::HIGHEST_PRIORITY); + self.global + .set_interrupt_priority(int_id, GICD::HIGHEST_PRIORITY); // TODO: edge triggered or level sensitive??? see GICD_ICFGRn } @@ -85,4 +85,3 @@ impl GICv2 { self.local.print_config(); } } - diff --git a/src/kernel/src/machine/arm/common/mmio.rs b/src/kernel/src/machine/arm/common/mmio.rs index 1d3c497f..16c3fb46 100644 --- a/src/kernel/src/machine/arm/common/mmio.rs +++ b/src/kernel/src/machine/arm/common/mmio.rs @@ -8,9 +8,7 @@ pub struct MmioRef { impl MmioRef { pub fn new(address: *const T) -> Self { - Self { - address, - } + Self { address } } } diff --git a/src/kernel/src/machine/arm/common/uart.rs b/src/kernel/src/machine/arm/common/uart.rs index c9c28f79..92d2fba2 100644 --- a/src/kernel/src/machine/arm/common/uart.rs +++ b/src/kernel/src/machine/arm/common/uart.rs @@ -1,5 +1,5 @@ /// A PL011 UART Driver Interface -/// +/// /// Full specification found here: https://developer.arm.com/documentation/ddi0183/latest // #[derive(Copy, Clone)] @@ -26,9 +26,7 @@ enum Registers { impl PL011 { /// Create new instance of a PL011 UART at some base address pub const unsafe fn new(base: usize) -> Self { - Self { - base: base - } + Self { base } } /// Transmit a single byte of data. Will block to send other data present before. @@ -40,8 +38,8 @@ impl PL011 { // bit is set if no data is present in holding register // or if operating in FIFO mode, FIFO is empty let tx_empty: bool = ((flag_reg >> 7) & 0b1) == 1; - if tx_empty { - break + if tx_empty { + break; } } self.write_reg(Registers::UARTDR, data as u32); @@ -55,10 +53,10 @@ impl PL011 { let flag_reg = unsafe { self.read_reg(Registers::UARTFR) }; let rx_empty = (flag_reg >> 4) & 0b1 == 1; if rx_empty { - return None + return None; } - // received data byte is read by performing reads from the UARTDR Register + // received data byte is read by performing reads from the UARTDR Register // along with the corresponding status information let data = unsafe { self.read_reg(Registers::UARTDR) }; @@ -74,7 +72,7 @@ impl PL011 { // disable UART { let cr = self.read_reg(Registers::UARTCR); - self.write_reg(Registers::UARTCR, (cr & !0b1) as u32); + self.write_reg(Registers::UARTCR, (cr & !0b1) as u32); } // wait for end of tx or rx of current char // while BUSY, !TXFE, !RXFE --> wait @@ -85,10 +83,10 @@ impl PL011 { let rx_empty = (flag_reg >> 4) & 0b1 == 1; if !tx_busy && tx_empty && rx_empty { - break + break; } } - // Flush the transmit FIFO by setting the FEN bit to 0 in the + // Flush the transmit FIFO by setting the FEN bit to 0 in the // Line Control Register, UARTLCR_H on page 3-12. let lcr = self.read_reg(Registers::UARTLCR_H); self.write_reg(Registers::UARTLCR_H, (lcr & !(0b1 << 4)) as u32); @@ -108,7 +106,7 @@ impl PL011 { let brd_scaled: u32 = 4 * clk / baud; // brd * 64 let int: u32 = brd_scaled >> 6; let frac: u32 = brd_scaled & 0x3f; - + // configure channel format: parity, word size, etc. // no parity, 1 stop bit, 8 data bits // brk 0, parity 0, eps 0, fifo enable 0, 2 stop bits 0, wlen 0b11 (8 bits), stick parity 1 @@ -118,7 +116,7 @@ impl PL011 { self.write_reg(Registers::UARTIBRD, int); self.write_reg(Registers::UARTFBRD, frac); self.write_reg(Registers::UARTLCR_H, lcr); - + // disable interrupts self.write_reg(Registers::UARTIMSC, 0u32); @@ -144,7 +142,7 @@ impl PL011 { // disable UART { let cr = self.read_reg(Registers::UARTCR); - self.write_reg(Registers::UARTCR, (cr & !0b1) as u32); + self.write_reg(Registers::UARTCR, (cr & !0b1) as u32); } // wait for end of tx or rx of current char // while BUSY, !TXFE, !RXFE --> wait @@ -155,7 +153,7 @@ impl PL011 { let rx_empty = (flag_reg >> 4) & 0b1 == 1; if !tx_busy && tx_empty && rx_empty { - break + break; } } // Flush the transmit FIFO by setting the FEN bit to 0 in the @@ -178,8 +176,8 @@ impl PL011 { } pub fn clear_rx_interrupt(&self) { - // The UARTICR Register is the interrupt clear register and is write-only. - // On a write of 1, the corresponding interrupt is cleared. + // The UARTICR Register is the interrupt clear register and is write-only. + // On a write of 1, the corresponding interrupt is cleared. // A write of 0 has no effect. Table 3-17 lists the register bit assignments. // // We must write to Receive interrupt clear (RXIC) which is bit 4. This @@ -207,14 +205,14 @@ mod error { /// The received data character must be read first from the Data Register, UARTDR /// before reading the error status associated with that data character. enum Error { - /// Data is recieved when FIFO is already full. The data in the FIFO remains valid. + /// Data is recieved when FIFO is already full. The data in the FIFO remains valid. /// The shift register is overwritten. The CPU must read the data to empty the FIFO. OverrunError, /// The received data input was held LOW for longer than a full-word transmission time. /// The error is cleared after a write to UARTECR. BreakError, - /// Parity does not match the parity of EPS/SPS bits in the Line Control Register, UARTLCR_H. - /// The error is cleared after a write to UARTECR. + /// Parity does not match the parity of EPS/SPS bits in the Line Control Register, + /// UARTLCR_H. The error is cleared after a write to UARTECR. ParityError, /// The received character did not have a valid stop bit (a valid stop bit is 1). /// The error is cleared after a write to UARTECR. diff --git a/src/kernel/src/machine/arm/virt/info.rs b/src/kernel/src/machine/arm/virt/info.rs index 4e52acee..131e1acc 100644 --- a/src/kernel/src/machine/arm/virt/info.rs +++ b/src/kernel/src/machine/arm/virt/info.rs @@ -1,10 +1,7 @@ use fdt::Fdt; +use twizzler_abi::device::{CacheType, MmioInfo}; -use twizzler_abi::device::{MmioInfo, CacheType}; - -use crate::once::Once; -use crate::BootInfo; -use crate::arch::BootInfoSystemTable; +use crate::{arch::BootInfoSystemTable, once::Once, BootInfo}; // We use device tree to describe the hardware on this machine static FDT: Once> = Once::new(); @@ -20,13 +17,10 @@ pub fn init(boot_info: &B) { super::memory::DTB_ADDR.kernel_vaddr() } else { bootloader_dtb_addr - } + } }; // should not fail, but it might ... - unsafe { - Fdt::from_ptr(dtb.as_ptr()) - .expect("invalid DTB file, cannot boot") - } + unsafe { Fdt::from_ptr(dtb.as_ptr()).expect("invalid DTB file, cannot boot") } }); } @@ -68,7 +62,11 @@ pub fn get_uart_info() -> (usize, MmioInfo) { phandle }; if let Some(clock) = devicetree().find_phandle(phandle) { - clock_freq = clock.property("clock-frequency").unwrap().as_usize().unwrap(); + clock_freq = clock + .property("clock-frequency") + .unwrap() + .as_usize() + .unwrap(); } } } @@ -96,10 +94,14 @@ pub fn get_uart_interrupt_num() -> Option { // first number is the SPI flag let is_spi = converted[0] == 1; // second number is the interrupt - let int_num = if is_spi { converted[1] + 16 } else { converted[1] + 32 }; + let int_num = if is_spi { + converted[1] + 16 + } else { + converted[1] + 32 + }; // third number is the trigger level let _trigger = converted[2]; - return Some(int_num) + return Some(int_num); } } None @@ -108,15 +110,15 @@ pub fn get_uart_interrupt_num() -> Option { // return the mmio address info for the distributor and cpu interfaces // for a gicv2 interrupt controller pub fn get_gicv2_info() -> (MmioInfo, MmioInfo) { - let mut gicd_mmio = MmioInfo { + let mut gicd_mmio = MmioInfo { length: 0, cache_type: CacheType::MemoryMappedIO, - info: 0, + info: 0, }; - let mut gicc_mmio = MmioInfo { + let mut gicc_mmio = MmioInfo { length: 0, cache_type: CacheType::MemoryMappedIO, - info: 0, + info: 0, }; if let Some(gic) = devicetree().find_node("/intc") { let mut mmio_regs = gic.reg().unwrap(); diff --git a/src/kernel/src/machine/arm/virt/interrupt.rs b/src/kernel/src/machine/arm/virt/interrupt.rs index b8c29538..09ccadba 100644 --- a/src/kernel/src/machine/arm/virt/interrupt.rs +++ b/src/kernel/src/machine/arm/virt/interrupt.rs @@ -6,7 +6,7 @@ lazy_static! { /// System-wide reference to the interrupt controller pub static ref INTERRUPT_CONTROLLER: GICv2 = { use twizzler_abi::{device::CacheType, object::Protections}; - + use crate::memory::{ PhysAddr, pagetables::{ @@ -15,7 +15,7 @@ lazy_static! { }, }; use crate::arch::memory::mmio::MMIO_ALLOCATOR; - + // retrive the locations of the MMIO registers let (distributor_mmio, cpu_interface_mmio) = crate::machine::info::get_gicv2_info(); // reserve regions of virtual address space for MMIO diff --git a/src/kernel/src/machine/arm/virt/memory.rs b/src/kernel/src/machine/arm/virt/memory.rs index ea431806..516f9359 100644 --- a/src/kernel/src/machine/arm/virt/memory.rs +++ b/src/kernel/src/machine/arm/virt/memory.rs @@ -1,18 +1,14 @@ use crate::memory::{MemoryRegion, MemoryRegionKind, PhysAddr}; -pub const DTB_ADDR: PhysAddr = unsafe { - PhysAddr::new_unchecked(0x4000_0000) -}; +pub const DTB_ADDR: PhysAddr = unsafe { PhysAddr::new_unchecked(0x4000_0000) }; -static RESERVED: [MemoryRegion; 1] = [ - MemoryRegion { - // physical base address in QEMU - start: DTB_ADDR, - // TODO: determine this at runtime - length: 0x100000, - kind: MemoryRegionKind::Reserved, - }, -]; +static RESERVED: [MemoryRegion; 1] = [MemoryRegion { + // physical base address in QEMU + start: DTB_ADDR, + // TODO: determine this at runtime + length: 0x100000, + kind: MemoryRegionKind::Reserved, +}]; /// A slice of physical regions of memory that are reserved /// and should be ignored by the kernel. This list is device specific diff --git a/src/kernel/src/machine/arm/virt/mod.rs b/src/kernel/src/machine/arm/virt/mod.rs index 8f0a4777..a2397fcb 100644 --- a/src/kernel/src/machine/arm/virt/mod.rs +++ b/src/kernel/src/machine/arm/virt/mod.rs @@ -7,4 +7,4 @@ pub mod serial; pub fn machine_post_init() { // initialize uart with interrupts serial::SERIAL.late_init(); -} \ No newline at end of file +} diff --git a/src/kernel/src/machine/arm/virt/processor.rs b/src/kernel/src/machine/arm/virt/processor.rs index 42c095a5..f1aa5aed 100644 --- a/src/kernel/src/machine/arm/virt/processor.rs +++ b/src/kernel/src/machine/arm/virt/processor.rs @@ -3,10 +3,9 @@ use core::str::FromStr; use arm64::registers::MPIDR_EL1; use registers::interfaces::Readable; -use crate::machine::info::devicetree; - // re-export boot module pub use super::super::common::boot::*; +use crate::machine::info::devicetree; pub fn enumerate_cpus() -> u32 { // MT bit means lowest level is logical cores (SMT) @@ -23,9 +22,7 @@ pub fn enumerate_cpus() -> u32 { crate::processor::register(cpu_id, core_id); // set the enable method to turn on the CPU core if let Some(enable) = cpu.property("enable-method") { - let core = unsafe { - crate::processor::get_processor_mut(cpu_id) - }; + let core = unsafe { crate::processor::get_processor_mut(cpu_id) }; // set the arch-sepecific boot protocol core.arch.boot = BootMethod::from_str(enable.as_str().unwrap()).unwrap(); // save the MPIDR_EL1 value found used for boot diff --git a/src/kernel/src/machine/arm/virt/serial.rs b/src/kernel/src/machine/arm/virt/serial.rs index 00c57eb4..32ce8a71 100644 --- a/src/kernel/src/machine/arm/virt/serial.rs +++ b/src/kernel/src/machine/arm/virt/serial.rs @@ -1,14 +1,15 @@ use lazy_static::lazy_static; use twizzler_abi::object::Protections; -use super::super::common::uart::PL011; - -use crate::memory::{PhysAddr, pagetables::{ - ContiguousProvider, MappingCursor, MappingSettings, Mapper, - MappingFlags, -}}; -use crate::interrupt::{Destination, TriggerMode}; -use crate::arch::memory::mmio::MMIO_ALLOCATOR; +use super::super::common::uart::PL011; +use crate::{ + arch::memory::mmio::MMIO_ALLOCATOR, + interrupt::{Destination, TriggerMode}, + memory::{ + pagetables::{ContiguousProvider, Mapper, MappingCursor, MappingFlags, MappingSettings}, + PhysAddr, + }, +}; lazy_static! { // TODO: add a spinlock here @@ -42,8 +43,8 @@ lazy_static! { } // create instance of the PL011 UART driver - let serial_port = unsafe { - PL011::new(uart_mmio_base.into()) + let serial_port = unsafe { + PL011::new(uart_mmio_base.into()) }; serial_port.early_init(clock_freq as u32); serial_port @@ -67,7 +68,7 @@ impl PL011 { fn early_init(&self, clock_freq: u32) { const BAUD: u32 = 115200; // configure the UART with the desired baud, given the clock rate - unsafe { + unsafe { self.init(clock_freq, BAUD); } } diff --git a/src/kernel/src/machine/mod.rs b/src/kernel/src/machine/mod.rs index 9a158c04..a19efc2c 100755 --- a/src/kernel/src/machine/mod.rs +++ b/src/kernel/src/machine/mod.rs @@ -1,31 +1,31 @@ -mod time; - -#[cfg(target_arch = "aarch64")] -mod arm; - -#[cfg(target_arch = "aarch64")] -pub use arm::*; - -#[cfg(target_arch = "x86_64")] -pub mod pc; - -#[cfg(target_arch = "x86_64")] -#[allow(unused_imports)] -pub use pc::*; -pub use time::*; - -use crate::log::KernelConsoleHardware; - -pub struct MachineConsoleHardware; - -impl KernelConsoleHardware for MachineConsoleHardware { - fn write(&self, data: &[u8], flags: crate::log::KernelConsoleWriteFlags) { - serial::write(data, flags); - } -} - -impl MachineConsoleHardware { - pub const fn new() -> Self { - Self - } -} +mod time; + +#[cfg(target_arch = "aarch64")] +mod arm; + +#[cfg(target_arch = "aarch64")] +pub use arm::*; + +#[cfg(target_arch = "x86_64")] +pub mod pc; + +#[cfg(target_arch = "x86_64")] +#[allow(unused_imports)] +pub use pc::*; +pub use time::*; + +use crate::log::KernelConsoleHardware; + +pub struct MachineConsoleHardware; + +impl KernelConsoleHardware for MachineConsoleHardware { + fn write(&self, data: &[u8], flags: crate::log::KernelConsoleWriteFlags) { + serial::write(data, flags); + } +} + +impl MachineConsoleHardware { + pub const fn new() -> Self { + Self + } +} diff --git a/src/kernel/src/machine/pc/mod.rs b/src/kernel/src/machine/pc/mod.rs index 02ecffce..442f8cde 100755 --- a/src/kernel/src/machine/pc/mod.rs +++ b/src/kernel/src/machine/pc/mod.rs @@ -1,7 +1,7 @@ -mod pcie; -pub mod serial; - -pub fn machine_post_init() { - serial::late_init(); - pcie::init(); -} +mod pcie; +pub mod serial; + +pub fn machine_post_init() { + serial::late_init(); + pcie::init(); +} diff --git a/src/kernel/src/machine/pc/pcie.rs b/src/kernel/src/machine/pc/pcie.rs index 372fa0fb..c46bcdac 100644 --- a/src/kernel/src/machine/pc/pcie.rs +++ b/src/kernel/src/machine/pc/pcie.rs @@ -1,28 +1,28 @@ -use alloc::collections::BTreeMap; -use alloc::format; -use alloc::vec::Vec; +use alloc::{collections::BTreeMap, format, vec::Vec}; + use memoffset::offset_of; -use twizzler_abi::device::bus::pcie::{ - get_bar, PcieBridgeHeader, PcieDeviceHeader, PcieDeviceInfo, PcieFunctionHeader, PcieInfo, - PcieKactionSpecific, -}; -use twizzler_abi::device::{ - CacheType, DeviceId, DeviceInterrupt, DeviceRepr, NUM_DEVICE_INTERRUPTS, -}; -use twizzler_abi::kso::unpack_kaction_int_pri_and_opts; -use twizzler_abi::object::{ObjID, NULLPAGE_SIZE}; use twizzler_abi::{ - device::BusType, - kso::{KactionError, KactionValue}, + device::{ + bus::pcie::{ + get_bar, PcieBridgeHeader, PcieDeviceHeader, PcieDeviceInfo, PcieFunctionHeader, + PcieInfo, PcieKactionSpecific, + }, + BusType, CacheType, DeviceId, DeviceInterrupt, DeviceRepr, NUM_DEVICE_INTERRUPTS, + }, + kso::{unpack_kaction_int_pri_and_opts, KactionError, KactionValue}, + object::{ObjID, NULLPAGE_SIZE}, }; use volatile::map_field; -use crate::arch::memory::phys_to_virt; -use crate::interrupt::{DynamicInterrupt, WakeInfo}; -use crate::memory::PhysAddr; -use crate::mutex::Mutex; -use crate::once::Once; -use crate::{arch, device::DeviceRef}; +use crate::{ + arch, + arch::memory::phys_to_virt, + device::DeviceRef, + interrupt::{DynamicInterrupt, WakeInfo}, + memory::PhysAddr, + mutex::Mutex, + once::Once, +}; struct PcieKernelInfo { seg_dev: DeviceRef, diff --git a/src/kernel/src/machine/pc/serial.rs b/src/kernel/src/machine/pc/serial.rs index 08238ab1..e8df1dc5 100755 --- a/src/kernel/src/machine/pc/serial.rs +++ b/src/kernel/src/machine/pc/serial.rs @@ -1,225 +1,226 @@ -use core::{ - cell::UnsafeCell, - fmt::Write, - sync::atomic::{AtomicBool, Ordering}, -}; -use lazy_static::lazy_static; - -use crate::interrupt::{Destination, TriggerMode}; - -pub struct SerialPort { - port: u16, -} - -bitflags::bitflags! { - /// Line status flags - struct LineStsFlags: u8 { - const INPUT_FULL = 1; - // 1 to 4 unknown - const OUTPUT_EMPTY = 1 << 5; - // 6 and 7 unknown - } -} - -impl SerialPort { - const INT_EN: u16 = 1; - const IID: u16 = 2; - const DATA: u16 = 0; - const FIFO_CTRL: u16 = 2; - const LINE_CTRL: u16 = 3; - const MODEM_CTRL: u16 = 4; - const LINE_STS: u16 = 5; - const MODEM_STS: u16 = 6; - const SCRATCH: u16 = 7; - /// Construct a new serial port. - /// # Safety - /// The supplied port must be a correct, functioning serial port on the system. - pub unsafe fn new(port: u16) -> Self { - Self { port } - } - - /// Write register. - /// # Safety - /// Must be a valid register in the serial port register space. - pub unsafe fn write_reg(&self, reg: u16, val: u8) { - x86::io::outb(self.port + reg, val); - } - - /// Read register. - /// # Safety - /// Must be a valid register in the serial port register space. - pub unsafe fn read_reg(&self, reg: u16) -> u8 { - x86::io::inb(self.port + reg) - } - - pub fn init(&mut self) { - unsafe { - for i in 0..8 { - self.read_reg(i); - } - // Disable interrupts - self.write_reg(Self::INT_EN, 0x00); - - // Enable DLAB - self.write_reg(Self::LINE_CTRL, 0x80); - - // Set maximum speed to 38400 bps by configuring DLL and DLM - self.write_reg(Self::DATA, 0x03); - self.write_reg(Self::INT_EN, 0x00); - - // Disable DLAB and set data word length to 8 bits - self.write_reg(Self::LINE_CTRL, 0x03); - - // Enable FIFO, clear TX/RX queues and - // set interrupt watermark at 14 bytes - self.write_reg(Self::FIFO_CTRL, 0xC7); - - // Mark data terminal ready, signal request to send - // and enable auxilliary output #2 (used as interrupt line for CPU) - self.write_reg(Self::MODEM_CTRL, 0x0F); - - // Enable interrupts - self.write_reg(Self::INT_EN, 0x01); - for i in 0..8 { - self.read_reg(i); - } - self.write_reg(Self::MODEM_CTRL, 0x0F); - } - } - - fn line_sts(&mut self) -> LineStsFlags { - unsafe { LineStsFlags::from_bits_truncate(self.read_reg(Self::LINE_STS)) } - } - - pub fn send(&mut self, byte: u8) { - unsafe { - while !self.line_sts().contains(LineStsFlags::OUTPUT_EMPTY) { - core::hint::spin_loop(); - } - self.write_reg(Self::DATA, byte); - } - } - - pub fn receive(&mut self) -> u8 { - unsafe { self.read_reg(Self::DATA) } - } - - pub fn has_pending(&mut self) -> bool { - let iid = unsafe { self.read_reg(Self::IID) }; - logln!("iid {:x}", iid); - iid & 1 != 0 - } - - pub fn read_modem_status(&mut self) -> u8 { - unsafe { self.read_reg(Self::MODEM_CTRL) } - } - - pub fn read_iid(&mut self) -> u8 { - unsafe { self.read_reg(Self::IID) } - } -} - -impl core::fmt::Write for SerialPort { - fn write_str(&mut self, s: &str) -> core::fmt::Result { - for byte in s.bytes() { - self.send(byte); - } - Ok(()) - } -} - -struct SimpleLock { - data: UnsafeCell, - state: AtomicBool, -} - -impl SimpleLock { - fn new(item: T) -> Self { - Self { - state: AtomicBool::new(false), - data: UnsafeCell::new(item), - } - } - fn lock(&self) -> SimpleGuard<'_, T> { - let int = crate::interrupt::disable(); - while self - .state - .compare_exchange_weak(false, true, Ordering::SeqCst, Ordering::SeqCst) - .is_err() - { - core::hint::spin_loop() - } - SimpleGuard { lock: self, int } - } -} - -struct SimpleGuard<'a, T> { - lock: &'a SimpleLock, - int: bool, -} - -impl<'a, T> Drop for SimpleGuard<'a, T> { - fn drop(&mut self) { - self.lock.state.store(false, Ordering::SeqCst); - crate::interrupt::set(self.int); - } -} - -impl core::ops::Deref for SimpleGuard<'_, T> { - type Target = T; - fn deref(&self) -> &Self::Target { - unsafe { &*self.lock.data.get() } - } -} - -impl core::ops::DerefMut for SimpleGuard<'_, T> { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *self.lock.data.get() } - } -} - -unsafe impl Send for SimpleLock where T: Send {} -unsafe impl Sync for SimpleLock where T: Send {} -unsafe impl Send for SimpleGuard<'_, T> where T: Send {} -unsafe impl Sync for SimpleGuard<'_, T> where T: Send + Sync {} - -lazy_static! { - static ref SERIAL1: SimpleLock = { - let mut serial_port = unsafe { SerialPort::new(0x3f8) }; - serial_port.init(); - SimpleLock::new(serial_port) - }; -} - -pub fn late_init() { - crate::arch::set_interrupt( - 36, - false, - TriggerMode::Edge, - crate::interrupt::PinPolarity::ActiveHigh, - Destination::Bsp, - ); -} - -pub fn interrupt_handler() { - let mut serial = SERIAL1.lock(); - let status = serial.read_iid(); - match (status >> 1) & 7 { - 0 => { - let _msr = serial.read_modem_status(); - } - _ => { - let x = serial.receive(); - drop(serial); - crate::log::push_input_byte(x); - } - } -} - -pub fn write(data: &[u8], _flags: crate::log::KernelConsoleWriteFlags) { - unsafe { - let _ = SERIAL1 - .lock() - .write_str(core::str::from_utf8_unchecked(data)); - } -} +use core::{ + cell::UnsafeCell, + fmt::Write, + sync::atomic::{AtomicBool, Ordering}, +}; + +use lazy_static::lazy_static; + +use crate::interrupt::{Destination, TriggerMode}; + +pub struct SerialPort { + port: u16, +} + +bitflags::bitflags! { + /// Line status flags + struct LineStsFlags: u8 { + const INPUT_FULL = 1; + // 1 to 4 unknown + const OUTPUT_EMPTY = 1 << 5; + // 6 and 7 unknown + } +} + +impl SerialPort { + const INT_EN: u16 = 1; + const IID: u16 = 2; + const DATA: u16 = 0; + const FIFO_CTRL: u16 = 2; + const LINE_CTRL: u16 = 3; + const MODEM_CTRL: u16 = 4; + const LINE_STS: u16 = 5; + const MODEM_STS: u16 = 6; + const SCRATCH: u16 = 7; + /// Construct a new serial port. + /// # Safety + /// The supplied port must be a correct, functioning serial port on the system. + pub unsafe fn new(port: u16) -> Self { + Self { port } + } + + /// Write register. + /// # Safety + /// Must be a valid register in the serial port register space. + pub unsafe fn write_reg(&self, reg: u16, val: u8) { + x86::io::outb(self.port + reg, val); + } + + /// Read register. + /// # Safety + /// Must be a valid register in the serial port register space. + pub unsafe fn read_reg(&self, reg: u16) -> u8 { + x86::io::inb(self.port + reg) + } + + pub fn init(&mut self) { + unsafe { + for i in 0..8 { + self.read_reg(i); + } + // Disable interrupts + self.write_reg(Self::INT_EN, 0x00); + + // Enable DLAB + self.write_reg(Self::LINE_CTRL, 0x80); + + // Set maximum speed to 38400 bps by configuring DLL and DLM + self.write_reg(Self::DATA, 0x03); + self.write_reg(Self::INT_EN, 0x00); + + // Disable DLAB and set data word length to 8 bits + self.write_reg(Self::LINE_CTRL, 0x03); + + // Enable FIFO, clear TX/RX queues and + // set interrupt watermark at 14 bytes + self.write_reg(Self::FIFO_CTRL, 0xC7); + + // Mark data terminal ready, signal request to send + // and enable auxilliary output #2 (used as interrupt line for CPU) + self.write_reg(Self::MODEM_CTRL, 0x0F); + + // Enable interrupts + self.write_reg(Self::INT_EN, 0x01); + for i in 0..8 { + self.read_reg(i); + } + self.write_reg(Self::MODEM_CTRL, 0x0F); + } + } + + fn line_sts(&mut self) -> LineStsFlags { + unsafe { LineStsFlags::from_bits_truncate(self.read_reg(Self::LINE_STS)) } + } + + pub fn send(&mut self, byte: u8) { + unsafe { + while !self.line_sts().contains(LineStsFlags::OUTPUT_EMPTY) { + core::hint::spin_loop(); + } + self.write_reg(Self::DATA, byte); + } + } + + pub fn receive(&mut self) -> u8 { + unsafe { self.read_reg(Self::DATA) } + } + + pub fn has_pending(&mut self) -> bool { + let iid = unsafe { self.read_reg(Self::IID) }; + logln!("iid {:x}", iid); + iid & 1 != 0 + } + + pub fn read_modem_status(&mut self) -> u8 { + unsafe { self.read_reg(Self::MODEM_CTRL) } + } + + pub fn read_iid(&mut self) -> u8 { + unsafe { self.read_reg(Self::IID) } + } +} + +impl core::fmt::Write for SerialPort { + fn write_str(&mut self, s: &str) -> core::fmt::Result { + for byte in s.bytes() { + self.send(byte); + } + Ok(()) + } +} + +struct SimpleLock { + data: UnsafeCell, + state: AtomicBool, +} + +impl SimpleLock { + fn new(item: T) -> Self { + Self { + state: AtomicBool::new(false), + data: UnsafeCell::new(item), + } + } + fn lock(&self) -> SimpleGuard<'_, T> { + let int = crate::interrupt::disable(); + while self + .state + .compare_exchange_weak(false, true, Ordering::SeqCst, Ordering::SeqCst) + .is_err() + { + core::hint::spin_loop() + } + SimpleGuard { lock: self, int } + } +} + +struct SimpleGuard<'a, T> { + lock: &'a SimpleLock, + int: bool, +} + +impl<'a, T> Drop for SimpleGuard<'a, T> { + fn drop(&mut self) { + self.lock.state.store(false, Ordering::SeqCst); + crate::interrupt::set(self.int); + } +} + +impl core::ops::Deref for SimpleGuard<'_, T> { + type Target = T; + fn deref(&self) -> &Self::Target { + unsafe { &*self.lock.data.get() } + } +} + +impl core::ops::DerefMut for SimpleGuard<'_, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *self.lock.data.get() } + } +} + +unsafe impl Send for SimpleLock where T: Send {} +unsafe impl Sync for SimpleLock where T: Send {} +unsafe impl Send for SimpleGuard<'_, T> where T: Send {} +unsafe impl Sync for SimpleGuard<'_, T> where T: Send + Sync {} + +lazy_static! { + static ref SERIAL1: SimpleLock = { + let mut serial_port = unsafe { SerialPort::new(0x3f8) }; + serial_port.init(); + SimpleLock::new(serial_port) + }; +} + +pub fn late_init() { + crate::arch::set_interrupt( + 36, + false, + TriggerMode::Edge, + crate::interrupt::PinPolarity::ActiveHigh, + Destination::Bsp, + ); +} + +pub fn interrupt_handler() { + let mut serial = SERIAL1.lock(); + let status = serial.read_iid(); + match (status >> 1) & 7 { + 0 => { + let _msr = serial.read_modem_status(); + } + _ => { + let x = serial.receive(); + drop(serial); + crate::log::push_input_byte(x); + } + } +} + +pub fn write(data: &[u8], _flags: crate::log::KernelConsoleWriteFlags) { + unsafe { + let _ = SERIAL1 + .lock() + .write_str(core::str::from_utf8_unchecked(data)); + } +} diff --git a/src/kernel/src/machine/time.rs b/src/kernel/src/machine/time.rs index 3a8870bf..05683aa5 100644 --- a/src/kernel/src/machine/time.rs +++ b/src/kernel/src/machine/time.rs @@ -1,2 +1,2 @@ /// Enumerate clock sources as part of the board -pub fn enumerate_clocks() {} \ No newline at end of file +pub fn enumerate_clocks() {} diff --git a/src/kernel/src/memory/allocator.rs b/src/kernel/src/memory/allocator.rs index d00fadb3..292f9216 100755 --- a/src/kernel/src/memory/allocator.rs +++ b/src/kernel/src/memory/allocator.rs @@ -1,148 +1,148 @@ -use alloc::alloc::{GlobalAlloc, Layout}; -use core::{ - mem::size_of, - panic, - ptr::NonNull, - sync::atomic::{AtomicUsize, Ordering}, -}; -use slabmalloc::{AllocationError, Allocator, LargeObjectPage, ObjectPage, ZoneAllocator}; - -use crate::spinlock::Spinlock; - -use super::context::{Context, KernelMemoryContext}; - -#[alloc_error_handler] -fn alloc_error_handler(layout: Layout) -> ! { - panic!("allocation error: {:?}", layout) -} - -const EARLY_ALLOCATION_SIZE: usize = 1024 * 1024 * 2; -#[repr(align(64))] -#[derive(Copy, Clone)] -struct AlignedU8(u8); - -static mut EARLY_ALLOCATION_AREA: [AlignedU8; EARLY_ALLOCATION_SIZE] = - [AlignedU8(0); EARLY_ALLOCATION_SIZE]; -static EARLY_ALLOCATION_PTR: AtomicUsize = AtomicUsize::new(0); - -struct KernelAllocatorInner { - ctx: &'static Ctx, - zone: ZoneAllocator<'static>, -} - -struct KernelAllocator { - inner: Spinlock>>, -} - -impl KernelAllocator { - fn early_alloc(&self, layout: Layout) -> *mut u8 { - let start = EARLY_ALLOCATION_PTR.load(Ordering::SeqCst); - let start = crate::utils::align(start, layout.align()); - EARLY_ALLOCATION_PTR.store(start + layout.size(), Ordering::SeqCst); - if start + layout.size() >= EARLY_ALLOCATION_SIZE { - panic!("out of early memory"); - } - unsafe { EARLY_ALLOCATION_AREA.as_mut_ptr().add(start) as *mut u8 } - } -} - -impl KernelAllocatorInner { - fn allocate_page(&mut self) -> &'static mut ObjectPage<'static> { - let size = size_of::(); - let chunk = self - .ctx - .allocate_chunk(Layout::from_size_align(size, size).unwrap()) - .as_ptr(); - unsafe { &mut *(chunk as *mut ObjectPage<'static>) } - } - - fn allocate_large_page(&mut self) -> &'static mut LargeObjectPage<'static> { - let size = size_of::(); - let chunk = self - .ctx - .allocate_chunk(Layout::from_size_align(size, size).unwrap()) - .as_ptr(); - unsafe { &mut *(chunk as *mut LargeObjectPage<'static>) } - } -} - -unsafe impl GlobalAlloc for KernelAllocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - let mut inner = self.inner.lock(); - - if inner.is_none() { - return self.early_alloc(layout); - } - let inner = inner.as_mut().unwrap(); - match layout.size() { - 0..=ZoneAllocator::MAX_ALLOC_SIZE => match inner.zone.allocate(layout) { - Ok(nptr) => nptr.as_ptr(), - Err(AllocationError::OutOfMemory) => { - if layout.size() <= ZoneAllocator::MAX_BASE_ALLOC_SIZE { - let new_page = inner.allocate_page(); - inner - .zone - .refill(layout, new_page) - .expect("failed to refill zone allocator"); - inner - .zone - .allocate(layout) - .expect("allocation failed after refill") - .as_ptr() - } else { - let new_page = inner.allocate_large_page(); - inner - .zone - .refill_large(layout, new_page) - .expect("failed to refill zone allocator"); - inner - .zone - .allocate(layout) - .expect("allocation failed after refill") - .as_ptr() - } - } - Err(AllocationError::InvalidLayout) => { - panic!("cannot allocate this layout {:?}", layout) - } - }, - _ => inner.ctx.allocate_chunk(layout).as_ptr(), - } - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - let mut inner = self.inner.lock(); - if inner.is_none() { - /* freeing memory in early init. Sadly, we just have to leak it. */ - return; - } - let inner = inner.as_mut().unwrap(); - if ptr.is_null() { - return; - } - let nn = NonNull::new(ptr).unwrap(); - match layout.size() { - 0..=ZoneAllocator::MAX_ALLOC_SIZE => { - inner - .zone - .deallocate(nn, layout) - .expect("failed to deallocate memory"); - } - _ => inner - .ctx - .deallocate_chunk(layout, NonNull::new(ptr).unwrap()), - } - } -} - -#[global_allocator] -static SLAB_ALLOCATOR: KernelAllocator = KernelAllocator { - inner: Spinlock::new(None), -}; - -pub fn init(ctx: &'static Context) { - *SLAB_ALLOCATOR.inner.lock() = Some(KernelAllocatorInner { - ctx, - zone: ZoneAllocator::new(), - }); -} +use alloc::alloc::{GlobalAlloc, Layout}; +use core::{ + mem::size_of, + panic, + ptr::NonNull, + sync::atomic::{AtomicUsize, Ordering}, +}; + +use slabmalloc::{AllocationError, Allocator, LargeObjectPage, ObjectPage, ZoneAllocator}; + +use super::context::{Context, KernelMemoryContext}; +use crate::spinlock::Spinlock; + +#[alloc_error_handler] +fn alloc_error_handler(layout: Layout) -> ! { + panic!("allocation error: {:?}", layout) +} + +const EARLY_ALLOCATION_SIZE: usize = 1024 * 1024 * 2; +#[repr(align(64))] +#[derive(Copy, Clone)] +struct AlignedU8(u8); + +static mut EARLY_ALLOCATION_AREA: [AlignedU8; EARLY_ALLOCATION_SIZE] = + [AlignedU8(0); EARLY_ALLOCATION_SIZE]; +static EARLY_ALLOCATION_PTR: AtomicUsize = AtomicUsize::new(0); + +struct KernelAllocatorInner { + ctx: &'static Ctx, + zone: ZoneAllocator<'static>, +} + +struct KernelAllocator { + inner: Spinlock>>, +} + +impl KernelAllocator { + fn early_alloc(&self, layout: Layout) -> *mut u8 { + let start = EARLY_ALLOCATION_PTR.load(Ordering::SeqCst); + let start = crate::utils::align(start, layout.align()); + EARLY_ALLOCATION_PTR.store(start + layout.size(), Ordering::SeqCst); + if start + layout.size() >= EARLY_ALLOCATION_SIZE { + panic!("out of early memory"); + } + unsafe { EARLY_ALLOCATION_AREA.as_mut_ptr().add(start) as *mut u8 } + } +} + +impl KernelAllocatorInner { + fn allocate_page(&mut self) -> &'static mut ObjectPage<'static> { + let size = size_of::(); + let chunk = self + .ctx + .allocate_chunk(Layout::from_size_align(size, size).unwrap()) + .as_ptr(); + unsafe { &mut *(chunk as *mut ObjectPage<'static>) } + } + + fn allocate_large_page(&mut self) -> &'static mut LargeObjectPage<'static> { + let size = size_of::(); + let chunk = self + .ctx + .allocate_chunk(Layout::from_size_align(size, size).unwrap()) + .as_ptr(); + unsafe { &mut *(chunk as *mut LargeObjectPage<'static>) } + } +} + +unsafe impl GlobalAlloc for KernelAllocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let mut inner = self.inner.lock(); + + if inner.is_none() { + return self.early_alloc(layout); + } + let inner = inner.as_mut().unwrap(); + match layout.size() { + 0..=ZoneAllocator::MAX_ALLOC_SIZE => match inner.zone.allocate(layout) { + Ok(nptr) => nptr.as_ptr(), + Err(AllocationError::OutOfMemory) => { + if layout.size() <= ZoneAllocator::MAX_BASE_ALLOC_SIZE { + let new_page = inner.allocate_page(); + inner + .zone + .refill(layout, new_page) + .expect("failed to refill zone allocator"); + inner + .zone + .allocate(layout) + .expect("allocation failed after refill") + .as_ptr() + } else { + let new_page = inner.allocate_large_page(); + inner + .zone + .refill_large(layout, new_page) + .expect("failed to refill zone allocator"); + inner + .zone + .allocate(layout) + .expect("allocation failed after refill") + .as_ptr() + } + } + Err(AllocationError::InvalidLayout) => { + panic!("cannot allocate this layout {:?}", layout) + } + }, + _ => inner.ctx.allocate_chunk(layout).as_ptr(), + } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + let mut inner = self.inner.lock(); + if inner.is_none() { + /* freeing memory in early init. Sadly, we just have to leak it. */ + return; + } + let inner = inner.as_mut().unwrap(); + if ptr.is_null() { + return; + } + let nn = NonNull::new(ptr).unwrap(); + match layout.size() { + 0..=ZoneAllocator::MAX_ALLOC_SIZE => { + inner + .zone + .deallocate(nn, layout) + .expect("failed to deallocate memory"); + } + _ => inner + .ctx + .deallocate_chunk(layout, NonNull::new(ptr).unwrap()), + } + } +} + +#[global_allocator] +static SLAB_ALLOCATOR: KernelAllocator = KernelAllocator { + inner: Spinlock::new(None), +}; + +pub fn init(ctx: &'static Context) { + *SLAB_ALLOCATOR.inner.lock() = Some(KernelAllocatorInner { + ctx, + zone: ZoneAllocator::new(), + }); +} diff --git a/src/kernel/src/memory/context.rs b/src/kernel/src/memory/context.rs index d20896dd..66f6783f 100644 --- a/src/kernel/src/memory/context.rs +++ b/src/kernel/src/memory/context.rs @@ -1,23 +1,23 @@ -//! A memory context is the primary abstraction the kernel uses for manipulating whatever memory system this machine -//! has. This includes both kernel memory management (kernel memory allocator) and management of userland resources. The -//! rest of the kernel can interact with the functions in [UserContext] to operate on userland-visible memory state -//! (e.g. objects' slots mappings in x86), and the functions in [KernelMemoryContext] to operate on kernel memory state -//! (e.g. the allocator and kernel mappings in the higher-half on x86). - -use core::alloc::Layout; -use core::ops::Range; -use core::ptr::NonNull; +//! A memory context is the primary abstraction the kernel uses for manipulating whatever memory +//! system this machine has. This includes both kernel memory management (kernel memory allocator) +//! and management of userland resources. The rest of the kernel can interact with the functions in +//! [UserContext] to operate on userland-visible memory state (e.g. objects' slots mappings in x86), +//! and the functions in [KernelMemoryContext] to operate on kernel memory state (e.g. the allocator +//! and kernel mappings in the higher-half on x86). use alloc::sync::Arc; -use twizzler_abi::object::ObjID; -use twizzler_abi::{device::CacheType, object::Protections}; - -use crate::obj::ObjectRef; -use crate::obj::{InvalidateMode, PageNumber}; +use core::{alloc::Layout, ops::Range, ptr::NonNull}; -use crate::syscall::object::ObjectHandle; +use twizzler_abi::{ + device::CacheType, + object::{ObjID, Protections}, +}; use self::virtmem::KernelObjectVirtHandle; +use crate::{ + obj::{InvalidateMode, ObjectRef, PageNumber}, + syscall::object::ObjectHandle, +}; impl ObjectHandle for ContextRef { type HandleType = Context; @@ -30,25 +30,27 @@ pub type Context = virtmem::VirtContext; /// The [Context] type wrapped in an [Arc]. pub type ContextRef = Arc; -/// A trait that defines the operations expected by higher-level object management routines. An architecture-dependent -/// type can be created that implements Context, which can then be used by the rest of the kernel to manage objects in a -/// context (e.g. an address space). +/// A trait that defines the operations expected by higher-level object management routines. An +/// architecture-dependent type can be created that implements Context, which can then be used by +/// the rest of the kernel to manage objects in a context (e.g. an address space). pub trait UserContext { - /// The type that is expected for informing the context how to map the object (e.g. a slot number). + /// The type that is expected for informing the context how to map the object (e.g. a slot + /// number). type MappingInfo; /// Switch to this context. fn switch_to(&self, sctx: ObjID); - /// Insert a range of an object into the context. The implementation may choose to use start and len as hints, but - /// should keep in mind that calls to `insert_object` may be generated by faults, and so should strive to resolve - /// the fault by correctly mapping the object as requested. + /// Insert a range of an object into the context. The implementation may choose to use start and + /// len as hints, but should keep in mind that calls to `insert_object` may be generated by + /// faults, and so should strive to resolve the fault by correctly mapping the object as + /// requested. fn insert_object( self: &Arc, mapping_info: Self::MappingInfo, object_info: &ObjectContextInfo, ) -> Result<(), InsertError>; - /// Lookup an object within this context. Once this function returns, no guarantees are made about if the object - /// remains mapped as is. + /// Lookup an object within this context. Once this function returns, no guarantees are made + /// about if the object remains mapped as is. fn lookup_object(&self, info: Self::MappingInfo) -> Option; /// Invalidate any mappings for a particular object. fn invalidate_object(&self, obj: ObjID, range: &Range, mode: InvalidateMode); @@ -97,23 +99,27 @@ pub enum InsertError { /// A trait for kernel-related memory context actions. pub trait KernelMemoryContext { type Handle: KernelObjectHandle; - /// Called once during initialization, after which calls to the other function in this trait may be called. + /// Called once during initialization, after which calls to the other function in this trait may + /// be called. fn init_allocator(&self); - /// Allocate a contiguous chunk of memory. This is not expected to be good for small allocations, this should be - /// used to grab large chunks of memory to then serve pieces of using an actual allocator. Returns a pointer to the - /// allocated memory and the size of the allocation (must be greater than layout's size). + /// Allocate a contiguous chunk of memory. This is not expected to be good for small + /// allocations, this should be used to grab large chunks of memory to then serve pieces of + /// using an actual allocator. Returns a pointer to the allocated memory and the size of the + /// allocation (must be greater than layout's size). fn allocate_chunk(&self, layout: Layout) -> NonNull; /// Deallocate a previously allocated chunk. /// /// # Safety - /// The call must ensure that the passed in pointer came from a call to [Self::allocate_chunk] and has the same - /// layout data as was passed to that allocation call. + /// The call must ensure that the passed in pointer came from a call to [Self::allocate_chunk] + /// and has the same layout data as was passed to that allocation call. unsafe fn deallocate_chunk(&self, layout: Layout, ptr: NonNull); - /// Called once after all secondary processors have been booted and are waiting at their main barrier. Should finish - /// any setup needed in the kernel context before all CPUs can freely use this context. + /// Called once after all secondary processors have been booted and are waiting at their main + /// barrier. Should finish any setup needed in the kernel context before all CPUs can freely + /// use this context. fn prep_smp(&self); - /// Insert object into kernel space. The context need only support a small number of kernel-memory-mapped objects. - /// The mapping is released when the returned handle is dropped. + /// Insert object into kernel space. The context need only support a small number of + /// kernel-memory-mapped objects. The mapping is released when the returned handle is + /// dropped. fn insert_kernel_object(&self, info: ObjectContextInfo) -> Self::Handle; } @@ -134,9 +140,10 @@ lazy_static::lazy_static! { }; } -/// Return a reference to the kernel context. The kernel context is the default context that a thread is in if it's not -/// a userland thread. It's the main context used during init and during secondary processor initialization. It may be -/// used to manipulate kernel memory mappings the same as any other context. +/// Return a reference to the kernel context. The kernel context is the default context that a +/// thread is in if it's not a userland thread. It's the main context used during init and during +/// secondary processor initialization. It may be used to manipulate kernel memory mappings the same +/// as any other context. pub fn kernel_context() -> &'static ContextRef { &KERNEL_CONTEXT } diff --git a/src/kernel/src/memory/context/virtmem.rs b/src/kernel/src/memory/context/virtmem.rs index 1003c7e1..dec0b350 100644 --- a/src/kernel/src/memory/context/virtmem.rs +++ b/src/kernel/src/memory/context/virtmem.rs @@ -1,8 +1,8 @@ //! This mod implements [UserContext] and [KernelMemoryContext] for virtual memory systems. +use alloc::{collections::BTreeMap, sync::Arc, vec::Vec}; use core::{intrinsics::size_of, marker::PhantomData, ptr::NonNull}; -use alloc::{collections::BTreeMap, sync::Arc, vec::Vec}; use twizzler_abi::{ device::CacheType, object::{ObjID, Protections, MAX_SIZE, NULLPAGE_SIZE}, @@ -30,22 +30,18 @@ use crate::{ PhysAddr, }, mutex::Mutex, - obj::{self, ObjectRef}, + obj::{self, pages::Page, ObjectRef, PageNumber}, security::KERNEL_SCTX, spinlock::Spinlock, - thread::current_thread_ref, -}; - -use crate::{ - obj::{pages::Page, PageNumber}, - thread::current_memory_context, + thread::{current_memory_context, current_thread_ref}, }; /// A type that implements [Context] for virtual memory systems. pub struct VirtContext { secctx: Mutex>, - // We keep a cache of the actual switch targets so that we don't need to take the above mutex during switch_to. - // Unfortunately, it's still kinda hairy, since this is a spinlock of a memory-allocating collection. See register_sctx for details. + // We keep a cache of the actual switch targets so that we don't need to take the above mutex + // during switch_to. Unfortunately, it's still kinda hairy, since this is a spinlock of a + // memory-allocating collection. See register_sctx for details. target_cache: Spinlock>, slots: Mutex, id: Id<'static>, @@ -196,7 +192,8 @@ impl VirtContext { } } - /// Init a context for being the kernel context, and clone the mappings from the bootstrap context. + /// Init a context for being the kernel context, and clone the mappings from the bootstrap + /// context. pub(super) fn init_kernel_context(&self) { let proto = unsafe { Mapper::current() }; let rm = proto.readmap(MappingCursor::new( @@ -214,8 +211,8 @@ impl VirtContext { self.with_arch(KERNEL_SCTX, |arch| arch.map(cursor, &mut phys, &settings)); } - // ID-map the lower memory. This is needed by some systems to boot secondary CPUs. This mapping is cleared by - // the call to prep_smp later. + // ID-map the lower memory. This is needed by some systems to boot secondary CPUs. This + // mapping is cleared by the call to prep_smp later. let id_len = 0x100000000; // 4GB let cursor = MappingCursor::new( VirtAddr::new( @@ -412,7 +409,8 @@ impl GlobalPageAlloc { arch.map(cursor, &mut phys, &settings); }); self.end = self.end.offset(len).unwrap(); - // Safety: the extension is backed by memory that is directly after the previous call to extend. + // Safety: the extension is backed by memory that is directly after the previous call to + // extend. unsafe { self.alloc.extend(len); } @@ -438,8 +436,8 @@ impl GlobalPageAlloc { } } -// Safety: the internal heap contains raw pointers, which are not Send. However, the heap is globally mapped and static -// for the lifetime of the kernel. +// Safety: the internal heap contains raw pointers, which are not Send. However, the heap is +// globally mapped and static for the lifetime of the kernel. unsafe impl Send for GlobalPageAlloc {} static GLOBAL_PAGE_ALLOC: Spinlock = Spinlock::new(GlobalPageAlloc { @@ -556,8 +554,8 @@ impl Drop for KernelObjectVirtHandle { let kctx = kernel_context(); { let mut slots = kctx.slots.lock(); - // We don't need to tell the object that it's no longer mapped in the kernel context, since object - // invalidation always informs the kernel context. + // We don't need to tell the object that it's no longer mapped in the kernel context, + // since object invalidation always informs the kernel context. slots.remove(self.slot); } kctx.with_arch(KERNEL_SCTX, |arch| { @@ -779,6 +777,7 @@ pub fn page_fault(addr: VirtAddr, cause: MemoryAccessKind, flags: PageFaultFlags #[cfg(test)] mod test { use alloc::sync::Arc; + use twizzler_abi::{marker::BaseType, object::Protections}; use twizzler_kernel_macros::kernel_test; diff --git a/src/kernel/src/memory/frame.rs b/src/kernel/src/memory/frame.rs index 127a795b..b3ceab53 100755 --- a/src/kernel/src/memory/frame.rs +++ b/src/kernel/src/memory/frame.rs @@ -1,622 +1,630 @@ -//! Manage physical frames. -//! -//! On kernel initialization, the system will call into [init] in this module to pass information -//! about physical memory regions. Once that call completes, the physical frame allocator is ready -//! for use. This has to happen before any fully-bootstrapped memory manager is ready to use. Note, -//! though, that this module may have to perform memory allocation during initialization, so it'll -//! have to make use of the bootstrap memory allocator. -//! -//! Physical frames are physical pages of memory, whose size depends on the architecture compiled -//! for. A given physical frame can either be zeroed (that is, the physical memory the frame refers -//! to contains only zeros), or it can be indeterminate. This distinction is maintained because it's -//! common that we need to allocate zero pages AND pages that will be immediately overwritten. Upon -//! allocation, the caller can request a zeroed frame or an indeterminate frame. The allocator will -//! try to reserve known-zero frames for allocations that request them. -//! -//! Allocation returns a [FrameRef], which is a static-lifetime reference to a [Frame]. The [Frame] -//! is a bit of metadata associated with each physical frame in the system. One can efficiently get -//! the [FrameRef] given a physical address, and vice versa. -//! -//! Note: this code is somewhat cursed, since it needs to do a bunch of funky low-level memory -//! management without ever triggering the memory manager (can't allocate memory, since that could -//! recurse or deadlock), and we'll need the ability to store sets of pages without allocating memory -//! outside of this module as well, hence the intrusive linked list design. Additionally, the kernel -//! needs to be able to access frame data from possibly any CPU, so the whole type must be both Sync -//! and Send. This would be easy with the lock-around-inner trick, but this plays badly with the -//! intrusive list, and so we do some cursed manual locking to ensure write isolation. -//! -//! Note: This code uses intrusive linked lists (a type of intrusive data structure). These are standard -//! practice in C kernels, but are rarely needed these days. An intrusive list is a list that stores the -//! list's link data inside the nodes (`struct Foo {link: Link, ...}`) as opposed to storing the objects in -//! the list (`struct ListItem {item: T, link: Link}`). They are useful here because they can form -//! arbitrary containers while ensuring no memory is allocated to store the list, something that is very -//! important inside an allocator for physical pages. For more information, see: [https://docs.rs/intrusive-collections/latest/intrusive_collections/]. - -use core::{ - intrinsics::size_of, - mem::transmute, - sync::atomic::{AtomicU8, Ordering}, -}; - -use crate::{arch::memory::frame::FRAME_SIZE, once::Once}; -use alloc::vec::Vec; -use intrusive_collections::{intrusive_adapter, LinkedList, LinkedListLink}; - -use crate::arch::memory::phys_to_virt; -use crate::spinlock::Spinlock; - -use super::{MemoryRegion, MemoryRegionKind, PhysAddr}; - -pub type FrameRef = &'static Frame; -pub type FrameMutRef = &'static mut Frame; - -#[doc(hidden)] -struct AllocationRegion { - indexer: FrameIndexer, - next_for_init: PhysAddr, - pages: usize, - zeroed: LinkedList, - non_zeroed: LinkedList, -} - -// Safety: this is needed because of the raw pointer, but the raw pointer is static for the life of the kernel. -unsafe impl Send for AllocationRegion {} - -impl AllocationRegion { - fn contains(&self, pa: PhysAddr) -> bool { - self.indexer.contains(pa) - } - - fn get_frame(&self, pa: PhysAddr) -> Option { - self.indexer.get_frame(pa) - } - - /// Get a mutable frame reference. - /// - /// # Safety - /// pa must be a new frame - unsafe fn get_frame_mut(&mut self, pa: PhysAddr) -> Option { - self.indexer.get_frame_mut(pa) - } - - fn admit_one(&mut self) -> bool { - let next = self.next_for_init; - if !self.contains(next) { - return false; - } - self.next_for_init = self.next_for_init.offset(FRAME_SIZE).unwrap(); - - // Unwrap-Ok: we know this address is in this region already - // Safety: we are allocating a new, untouched frame here - let frame = unsafe { self.get_frame_mut(next) }.unwrap(); - // Safety: the frame can be reset since during admit_one we are the only ones with access to the frame data. - unsafe { frame.reset(next) }; - frame.set_admitted(); - frame.set_free(); - self.non_zeroed.push_back(frame); - true - } - - fn free(&mut self, frame: FrameRef) { - if !self.contains(frame.start_address()) { - return; - } - frame.set_free(); - if frame.is_zeroed() { - self.zeroed.push_back(frame); - } else { - self.non_zeroed.push_back(frame); - } - } - - fn allocate(&mut self, try_zero: bool, only_zero: bool) -> Option { - let frame = self.__do_allocate(try_zero, only_zero)?; - assert!(!frame.get_flags().contains(PhysicalFrameFlags::ALLOCATED)); - frame.set_allocated(); - Some(frame) - } - - fn __do_allocate(&mut self, try_zero: bool, only_zero: bool) -> Option { - if only_zero { - if let Some(f) = self.zeroed.pop_back() { - return Some(f); - } - return None; - } - if let Some(f) = self.non_zeroed.pop_back() { - return Some(f); - } - if try_zero { - if let Some(f) = self.zeroed.pop_back() { - return Some(f); - } - } - for i in 0..16 { - if !self.admit_one() { - if i == 0 { - return None; - } - break; - } - } - self.non_zeroed.pop_back() - } - - fn new(m: &MemoryRegion) -> Option { - let start = m.start.align_up(FRAME_SIZE as u64).unwrap(); - let length = m.length - (start.raw() - m.start.raw()) as usize; - let nr_pages = length / FRAME_SIZE; - if nr_pages <= 1 { - return None; - } - let frame_array_len = size_of::() * nr_pages; - let array_pages = ((frame_array_len - 1) / FRAME_SIZE) + 1; - if array_pages >= nr_pages { - return None; - } - - let frame_array_ptr = phys_to_virt(start).as_mut_ptr(); - - let mut this = Self { - // Safety: the pointer is to a static region of reserved memory. - indexer: unsafe { - FrameIndexer::new( - start.offset(array_pages * FRAME_SIZE).unwrap(), - (nr_pages - array_pages) * FRAME_SIZE, - frame_array_ptr, - frame_array_len, - ) - }, - next_for_init: start.offset(array_pages * FRAME_SIZE).unwrap(), - pages: nr_pages - array_pages, - zeroed: LinkedList::new(FrameAdapter::NEW), - non_zeroed: LinkedList::new(FrameAdapter::NEW), - }; - for _ in 0..16 { - this.admit_one(); - } - Some(this) - } -} - -#[doc(hidden)] -struct PhysicalFrameAllocator { - regions: Vec, - region_idx: usize, -} - -/// A physical frame. -/// -/// Contains a physical address and flags that indicate if the frame is zeroed or not. -pub struct Frame { - pa: PhysAddr, - flags: AtomicU8, - lock: AtomicU8, - link: LinkedListLink, -} -intrusive_adapter!(pub FrameAdapter = &'static Frame: Frame { link: LinkedListLink }); - -unsafe impl Send for Frame {} -unsafe impl Sync for Frame {} - -impl core::fmt::Debug for Frame { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("Frame") - .field("pa", &self.pa) - .field("flags", &self.flags.load(Ordering::SeqCst)) - .finish() - } -} - -impl Frame { - // Safety: must only be called once, during admit_one, when the frame has not been initialized yet. - unsafe fn reset(&mut self, pa: PhysAddr) { - self.lock.store(0, Ordering::SeqCst); - self.flags.store(0, Ordering::SeqCst); - let pa_ptr = &mut self.pa as *mut _; - *pa_ptr = pa; - self.link.force_unlink(); - // This store acts as a release for pa as well, which synchronizes with a load in lock (or unlock), which is always called - // at least once during allocation, so any thread that accesses a frame syncs-with this write. - self.unlock(); - } - - pub fn with_link(&self, f: impl FnOnce(&mut LinkedListLink) -> R) -> R { - self.lock(); - let link = unsafe { - (&self.link as *const _ as *mut LinkedListLink) - .as_mut() - .unwrap() - }; - let r = f(link); - self.unlock(); - r - } - - fn lock(&self) { - while self - .lock - .compare_exchange_weak(0, 1, Ordering::SeqCst, Ordering::SeqCst) - .is_err() - { - core::hint::spin_loop(); - } - } - - fn unlock(&self) { - self.lock.store(0, Ordering::SeqCst); - } - - /// Get the start address of the frame. - pub fn start_address(&self) -> PhysAddr { - self.pa - } - - /// Get the length of the frame in bytes. - pub fn size(&self) -> usize { - FRAME_SIZE - } - - /// Zero a frame. - /// - /// This marks a frame as being zeroed and also set the underlying physical memory to zero. - pub fn zero(&self) { - self.lock(); - let virt = phys_to_virt(self.pa); - let ptr: *mut u8 = virt.as_mut_ptr(); - let slice = unsafe { core::slice::from_raw_parts_mut(ptr, self.size()) }; - slice.fill(0); - self.flags - .fetch_or(PhysicalFrameFlags::ZEROED.bits(), Ordering::SeqCst); - self.unlock(); - } - - /// Mark this frame as not being zeroed. Does not modify the physical memory controlled by this Frame. - pub fn set_not_zero(&self) { - self.lock(); - self.flags - .fetch_and(!PhysicalFrameFlags::ZEROED.bits(), Ordering::SeqCst); - self.unlock(); - } - - /// Check if this frame is marked as zeroed. Does not look at the underlying physical memory. - pub fn is_zeroed(&self) -> bool { - self.get_flags().contains(PhysicalFrameFlags::ZEROED) - } - - fn set_admitted(&self) { - self.flags - .fetch_or(PhysicalFrameFlags::ADMITTED.bits(), Ordering::SeqCst); - } - - fn set_free(&self) { - self.flags - .fetch_and(!PhysicalFrameFlags::ALLOCATED.bits(), Ordering::SeqCst); - } - - fn set_allocated(&self) { - self.flags - .fetch_or(PhysicalFrameFlags::ALLOCATED.bits(), Ordering::SeqCst); - } - - /// Get the current flags. - pub fn get_flags(&self) -> PhysicalFrameFlags { - PhysicalFrameFlags::from_bits_truncate(self.flags.load(Ordering::SeqCst)) - } - - /// Copy contents of one frame into another. If the other frame is marked as zeroed, copying will not happen. Both - /// frames are locked first. - pub fn copy_contents_from(&self, other: &Frame) { - self.lock(); - // We don't need to lock the other frame, since if its contents aren't synchronized with this operation, it - // could have reordered to before or after. - if other.is_zeroed() { - // if both are zero, do nothing - if self.is_zeroed() { - self.unlock(); - return; - } - // if other is zero and we aren't, just zero instead of copy - let virt = phys_to_virt(self.pa); - let ptr: *mut u8 = virt.as_mut_ptr(); - let slice = unsafe { core::slice::from_raw_parts_mut(ptr, self.size()) }; - slice.fill(0); - self.flags - .fetch_or(PhysicalFrameFlags::ZEROED.bits(), Ordering::SeqCst); - self.unlock(); - return; - } - - self.flags - .fetch_and(!PhysicalFrameFlags::ZEROED.bits(), Ordering::SeqCst); - let virt = phys_to_virt(self.pa); - let ptr: *mut u8 = virt.as_mut_ptr(); - let slice = unsafe { core::slice::from_raw_parts_mut(ptr, self.size()) }; - - let othervirt = phys_to_virt(other.pa); - let otherptr: *mut u8 = othervirt.as_mut_ptr(); - let otherslice = unsafe { core::slice::from_raw_parts_mut(otherptr, self.size()) }; - - slice.copy_from_slice(otherslice); - self.unlock(); - } - - /// Copy from another physical address into this frame. - pub fn copy_contents_from_physaddr(&self, other: PhysAddr) { - self.lock(); - self.flags - .fetch_and(!PhysicalFrameFlags::ZEROED.bits(), Ordering::SeqCst); - let virt = phys_to_virt(self.pa); - let ptr: *mut u8 = virt.as_mut_ptr(); - let slice = unsafe { core::slice::from_raw_parts_mut(ptr, self.size()) }; - - let othervirt = phys_to_virt(other); - let otherptr: *mut u8 = othervirt.as_mut_ptr(); - let otherslice = unsafe { core::slice::from_raw_parts_mut(otherptr, self.size()) }; - - slice.copy_from_slice(otherslice); - self.unlock(); - } -} - -bitflags::bitflags! { - /// Flags to control the state of a physical frame. Also used by the alloc functions to indicate - /// what kind of physical frame is being requested. - #[derive(Clone, Copy)] - pub struct PhysicalFrameFlags: u8 { - /// The frame is zeroed (or, allocate a zeroed frame) - const ZEROED = 1; - /// The frame has been allocated by the system. - const ALLOCATED = 2; - /// (internal) The frame has been admitted into the frame tracking system. - const ADMITTED = 4; - } -} - -impl PhysicalFrameAllocator { - fn new(memory_regions: &[MemoryRegion]) -> PhysicalFrameAllocator { - Self { - region_idx: 0, - regions: memory_regions - .iter() - .filter_map(|m| { - if m.kind == MemoryRegionKind::UsableRam { - AllocationRegion::new(m) - } else { - None - } - }) - .collect(), - } - } - - fn alloc(&mut self, flags: PhysicalFrameFlags, fallback: bool) -> Option { - let frame = if fallback { - Some(self.__do_alloc_fallback()) - } else { - self.__do_alloc(flags) - }?; - if flags.contains(PhysicalFrameFlags::ZEROED) && !frame.is_zeroed() { - frame.zero(); - } - Some(frame) - } - - fn __do_alloc_fallback(&mut self) -> FrameRef { - // fallback - for reg in &mut self.regions { - let frame = reg.allocate(true, false); - if let Some(frame) = frame { - return frame; - } - } - panic!("out of memory"); - } - - fn __do_alloc(&mut self, flags: PhysicalFrameFlags) -> Option { - let needs_zero = flags.contains(PhysicalFrameFlags::ZEROED); - // try to find an exact match - for reg in &mut self.regions { - let frame = reg.allocate(false, needs_zero); - if frame.is_some() { - return frame; - } - } - None - } - - fn free(&mut self, frame: FrameRef) { - for reg in &mut self.regions { - if reg.contains(frame.start_address()) { - reg.free(frame); - return; - } - } - } -} - -#[doc(hidden)] -static PFA: Once> = Once::new(); - -#[derive(Clone)] -struct FrameIndexer { - start: PhysAddr, - len: usize, - frame_array_ptr: *const Frame, - frame_array_len: usize, -} - -impl FrameIndexer { - /// Build a new frame indexer. - /// - /// # Safety: The passed pointer and len must point to a valid section of memory reserved for the frame slice, which will last the lifetime of the kernel. - unsafe fn new( - start: PhysAddr, - len: usize, - frame_array_ptr: *const Frame, - frame_array_len: usize, - ) -> Self { - Self { - start, - len, - frame_array_ptr, - frame_array_len, - } - } - - fn frame_array(&self) -> &[Frame] { - unsafe { core::slice::from_raw_parts(self.frame_array_ptr, self.frame_array_len) } - } - - fn frame_array_mut(&mut self) -> &mut [Frame] { - unsafe { - core::slice::from_raw_parts_mut(self.frame_array_ptr as *mut _, self.frame_array_len) - } - } - - fn get_frame(&self, pa: PhysAddr) -> Option { - if !self.contains(pa) { - return None; - } - let index = (pa - self.start) / FRAME_SIZE; - assert!(index < self.frame_array_len); - let frame = &self.frame_array()[index as usize]; - // Safety: the frame array is static for the life of the kernel - Some(unsafe { transmute(frame) }) - } - - unsafe fn get_frame_mut(&mut self, pa: PhysAddr) -> Option { - if !self.contains(pa) { - return None; - } - let index = (pa - self.start) / FRAME_SIZE; - assert!(index < self.frame_array_len); - let frame = &mut self.frame_array_mut()[index as usize]; - // Safety: the frame array is static for the life of the kernel - Some(unsafe { transmute(frame) }) - } - - fn contains(&self, pa: PhysAddr) -> bool { - pa >= self.start && pa < (self.start.offset(self.len).unwrap()) - } -} - -// Safety: this is needed because of the raw pointer, but the raw pointer is static for the life of the kernel. -unsafe impl Send for FrameIndexer {} -unsafe impl Sync for FrameIndexer {} - -#[doc(hidden)] -static FI: Once> = Once::new(); - -/// Initialize the global physical frame allocator. -/// # Arguments -/// * `regions`: An array of memory regions passed from the boot info system. -pub fn init(regions: &[MemoryRegion]) { - let pfa = PhysicalFrameAllocator::new(regions); - FI.call_once(|| pfa.regions.iter().map(|r| r.indexer.clone()).collect()); - PFA.call_once(|| Spinlock::new(pfa)); -} - -/// Allocate a physical frame. -/// -/// The `flags` argument allows one to control if the resulting frame is -/// zeroed or not. Note that passing [PhysicalFrameFlags]::ZEROED guarantees that the returned frame -/// is zeroed, but the converse is not true. -/// -/// The returned frame will have its ZEROED flag cleared. In the future, this will probably change -/// to reflect the correct state of the frame. -/// -/// # Panic -/// Will panic if out of physical memory. For this reason, you probably want to use [try_alloc_frame]. -/// -/// # Examples -/// ``` -/// let uninitialized_frame = alloc_frame(PhysicalFrameFlags::empty()); -/// let zeroed_frame = alloc_frame(PhysicalFrameFlags::ZEROED); -/// ``` -pub fn alloc_frame(flags: PhysicalFrameFlags) -> FrameRef { - let mut frame = { PFA.wait().lock().alloc(flags, false) }; - if frame.is_none() { - frame = PFA.wait().lock().alloc(flags, true); - } - let frame = frame.expect("out of memory"); - if flags.contains(PhysicalFrameFlags::ZEROED) { - assert!(frame.is_zeroed()); - } - /* TODO: try to use the MMU to detect if a page is actually ever written to or not */ - frame.set_not_zero(); - assert!(frame.get_flags().contains(PhysicalFrameFlags::ADMITTED)); - assert!(frame.get_flags().contains(PhysicalFrameFlags::ALLOCATED)); - frame -} - -/// Try to allocate a physical frame. The flags argument is the same as in [alloc_frame]. Returns -/// None if no physical frame is available. -pub fn try_alloc_frame(flags: PhysicalFrameFlags) -> Option { - Some(alloc_frame(flags)) -} - -/// Free a physical frame. -/// -/// If the frame's flags indicates that it is zeroed, it will be placed on -/// the zeroed list. -pub fn free_frame(frame: FrameRef) { - assert!(frame.get_flags().contains(PhysicalFrameFlags::ADMITTED)); - assert!(frame.get_flags().contains(PhysicalFrameFlags::ALLOCATED)); - PFA.wait().lock().free(frame); -} - -/// Get a FrameRef from a physical address. -pub fn get_frame(pa: PhysAddr) -> Option { - let fi = FI.wait(); - for fi in fi { - let f = fi.get_frame(pa); - if f.is_some() { - return f; - } - } - None -} - -#[cfg(test)] -mod tests { - use alloc::vec::Vec; - use twizzler_kernel_macros::kernel_test; - - use crate::utils::quick_random; - - use super::{alloc_frame, free_frame, get_frame, PhysicalFrameFlags}; - - #[kernel_test] - fn test_get_frame() { - let frame = alloc_frame(PhysicalFrameFlags::empty()); - let addr = frame.start_address(); - let test_frame = get_frame(addr).unwrap(); - assert!(core::ptr::eq(frame as *const _, test_frame as *const _)); - } - - #[kernel_test] - fn stress_test_pmm() { - let mut stack = Vec::new(); - for _ in 0..100000 { - let x = quick_random(); - let y = quick_random(); - let z = quick_random(); - if x % 2 == 0 && stack.len() < 1000 { - let frame = if y % 3 == 0 { - alloc_frame(PhysicalFrameFlags::ZEROED) - } else { - alloc_frame(PhysicalFrameFlags::empty()) - }; - if z % 5 == 0 { - frame.zero(); - } - stack.push(frame); - } else { - if let Some(frame) = stack.pop() { - free_frame(frame); - } - } - } - } -} +//! Manage physical frames. +//! +//! On kernel initialization, the system will call into [init] in this module to pass information +//! about physical memory regions. Once that call completes, the physical frame allocator is ready +//! for use. This has to happen before any fully-bootstrapped memory manager is ready to use. Note, +//! though, that this module may have to perform memory allocation during initialization, so it'll +//! have to make use of the bootstrap memory allocator. +//! +//! Physical frames are physical pages of memory, whose size depends on the architecture compiled +//! for. A given physical frame can either be zeroed (that is, the physical memory the frame refers +//! to contains only zeros), or it can be indeterminate. This distinction is maintained because it's +//! common that we need to allocate zero pages AND pages that will be immediately overwritten. Upon +//! allocation, the caller can request a zeroed frame or an indeterminate frame. The allocator will +//! try to reserve known-zero frames for allocations that request them. +//! +//! Allocation returns a [FrameRef], which is a static-lifetime reference to a [Frame]. The [Frame] +//! is a bit of metadata associated with each physical frame in the system. One can efficiently get +//! the [FrameRef] given a physical address, and vice versa. +//! +//! Note: this code is somewhat cursed, since it needs to do a bunch of funky low-level memory +//! management without ever triggering the memory manager (can't allocate memory, since that could +//! recurse or deadlock), and we'll need the ability to store sets of pages without allocating +//! memory outside of this module as well, hence the intrusive linked list design. Additionally, the +//! kernel needs to be able to access frame data from possibly any CPU, so the whole type must be +//! both Sync and Send. This would be easy with the lock-around-inner trick, but this plays badly +//! with the intrusive list, and so we do some cursed manual locking to ensure write isolation. +//! +//! Note: This code uses intrusive linked lists (a type of intrusive data structure). These are +//! standard practice in C kernels, but are rarely needed these days. An intrusive list is a list +//! that stores the list's link data inside the nodes (`struct Foo {link: Link, ...}`) as opposed to +//! storing the objects in the list (`struct ListItem {item: T, link: Link}`). They are useful +//! here because they can form arbitrary containers while ensuring no memory is allocated to store +//! the list, something that is very important inside an allocator for physical pages. For more information, see: [https://docs.rs/intrusive-collections/latest/intrusive_collections/]. + +use alloc::vec::Vec; +use core::{ + intrinsics::size_of, + mem::transmute, + sync::atomic::{AtomicU8, Ordering}, +}; + +use intrusive_collections::{intrusive_adapter, LinkedList, LinkedListLink}; + +use super::{MemoryRegion, MemoryRegionKind, PhysAddr}; +use crate::{ + arch::memory::{frame::FRAME_SIZE, phys_to_virt}, + once::Once, + spinlock::Spinlock, +}; + +pub type FrameRef = &'static Frame; +pub type FrameMutRef = &'static mut Frame; + +#[doc(hidden)] +struct AllocationRegion { + indexer: FrameIndexer, + next_for_init: PhysAddr, + pages: usize, + zeroed: LinkedList, + non_zeroed: LinkedList, +} + +// Safety: this is needed because of the raw pointer, but the raw pointer is static for the life of +// the kernel. +unsafe impl Send for AllocationRegion {} + +impl AllocationRegion { + fn contains(&self, pa: PhysAddr) -> bool { + self.indexer.contains(pa) + } + + fn get_frame(&self, pa: PhysAddr) -> Option { + self.indexer.get_frame(pa) + } + + /// Get a mutable frame reference. + /// + /// # Safety + /// pa must be a new frame + unsafe fn get_frame_mut(&mut self, pa: PhysAddr) -> Option { + self.indexer.get_frame_mut(pa) + } + + fn admit_one(&mut self) -> bool { + let next = self.next_for_init; + if !self.contains(next) { + return false; + } + self.next_for_init = self.next_for_init.offset(FRAME_SIZE).unwrap(); + + // Unwrap-Ok: we know this address is in this region already + // Safety: we are allocating a new, untouched frame here + let frame = unsafe { self.get_frame_mut(next) }.unwrap(); + // Safety: the frame can be reset since during admit_one we are the only ones with access to + // the frame data. + unsafe { frame.reset(next) }; + frame.set_admitted(); + frame.set_free(); + self.non_zeroed.push_back(frame); + true + } + + fn free(&mut self, frame: FrameRef) { + if !self.contains(frame.start_address()) { + return; + } + frame.set_free(); + if frame.is_zeroed() { + self.zeroed.push_back(frame); + } else { + self.non_zeroed.push_back(frame); + } + } + + fn allocate(&mut self, try_zero: bool, only_zero: bool) -> Option { + let frame = self.__do_allocate(try_zero, only_zero)?; + assert!(!frame.get_flags().contains(PhysicalFrameFlags::ALLOCATED)); + frame.set_allocated(); + Some(frame) + } + + fn __do_allocate(&mut self, try_zero: bool, only_zero: bool) -> Option { + if only_zero { + if let Some(f) = self.zeroed.pop_back() { + return Some(f); + } + return None; + } + if let Some(f) = self.non_zeroed.pop_back() { + return Some(f); + } + if try_zero { + if let Some(f) = self.zeroed.pop_back() { + return Some(f); + } + } + for i in 0..16 { + if !self.admit_one() { + if i == 0 { + return None; + } + break; + } + } + self.non_zeroed.pop_back() + } + + fn new(m: &MemoryRegion) -> Option { + let start = m.start.align_up(FRAME_SIZE as u64).unwrap(); + let length = m.length - (start.raw() - m.start.raw()) as usize; + let nr_pages = length / FRAME_SIZE; + if nr_pages <= 1 { + return None; + } + let frame_array_len = size_of::() * nr_pages; + let array_pages = ((frame_array_len - 1) / FRAME_SIZE) + 1; + if array_pages >= nr_pages { + return None; + } + + let frame_array_ptr = phys_to_virt(start).as_mut_ptr(); + + let mut this = Self { + // Safety: the pointer is to a static region of reserved memory. + indexer: unsafe { + FrameIndexer::new( + start.offset(array_pages * FRAME_SIZE).unwrap(), + (nr_pages - array_pages) * FRAME_SIZE, + frame_array_ptr, + frame_array_len, + ) + }, + next_for_init: start.offset(array_pages * FRAME_SIZE).unwrap(), + pages: nr_pages - array_pages, + zeroed: LinkedList::new(FrameAdapter::NEW), + non_zeroed: LinkedList::new(FrameAdapter::NEW), + }; + for _ in 0..16 { + this.admit_one(); + } + Some(this) + } +} + +#[doc(hidden)] +struct PhysicalFrameAllocator { + regions: Vec, + region_idx: usize, +} + +/// A physical frame. +/// +/// Contains a physical address and flags that indicate if the frame is zeroed or not. +pub struct Frame { + pa: PhysAddr, + flags: AtomicU8, + lock: AtomicU8, + link: LinkedListLink, +} +intrusive_adapter!(pub FrameAdapter = &'static Frame: Frame { link: LinkedListLink }); + +unsafe impl Send for Frame {} +unsafe impl Sync for Frame {} + +impl core::fmt::Debug for Frame { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Frame") + .field("pa", &self.pa) + .field("flags", &self.flags.load(Ordering::SeqCst)) + .finish() + } +} + +impl Frame { + // Safety: must only be called once, during admit_one, when the frame has not been initialized + // yet. + unsafe fn reset(&mut self, pa: PhysAddr) { + self.lock.store(0, Ordering::SeqCst); + self.flags.store(0, Ordering::SeqCst); + let pa_ptr = &mut self.pa as *mut _; + *pa_ptr = pa; + self.link.force_unlink(); + // This store acts as a release for pa as well, which synchronizes with a load in lock (or + // unlock), which is always called at least once during allocation, so any thread + // that accesses a frame syncs-with this write. + self.unlock(); + } + + pub fn with_link(&self, f: impl FnOnce(&mut LinkedListLink) -> R) -> R { + self.lock(); + let link = unsafe { + (&self.link as *const _ as *mut LinkedListLink) + .as_mut() + .unwrap() + }; + let r = f(link); + self.unlock(); + r + } + + fn lock(&self) { + while self + .lock + .compare_exchange_weak(0, 1, Ordering::SeqCst, Ordering::SeqCst) + .is_err() + { + core::hint::spin_loop(); + } + } + + fn unlock(&self) { + self.lock.store(0, Ordering::SeqCst); + } + + /// Get the start address of the frame. + pub fn start_address(&self) -> PhysAddr { + self.pa + } + + /// Get the length of the frame in bytes. + pub fn size(&self) -> usize { + FRAME_SIZE + } + + /// Zero a frame. + /// + /// This marks a frame as being zeroed and also set the underlying physical memory to zero. + pub fn zero(&self) { + self.lock(); + let virt = phys_to_virt(self.pa); + let ptr: *mut u8 = virt.as_mut_ptr(); + let slice = unsafe { core::slice::from_raw_parts_mut(ptr, self.size()) }; + slice.fill(0); + self.flags + .fetch_or(PhysicalFrameFlags::ZEROED.bits(), Ordering::SeqCst); + self.unlock(); + } + + /// Mark this frame as not being zeroed. Does not modify the physical memory controlled by this + /// Frame. + pub fn set_not_zero(&self) { + self.lock(); + self.flags + .fetch_and(!PhysicalFrameFlags::ZEROED.bits(), Ordering::SeqCst); + self.unlock(); + } + + /// Check if this frame is marked as zeroed. Does not look at the underlying physical memory. + pub fn is_zeroed(&self) -> bool { + self.get_flags().contains(PhysicalFrameFlags::ZEROED) + } + + fn set_admitted(&self) { + self.flags + .fetch_or(PhysicalFrameFlags::ADMITTED.bits(), Ordering::SeqCst); + } + + fn set_free(&self) { + self.flags + .fetch_and(!PhysicalFrameFlags::ALLOCATED.bits(), Ordering::SeqCst); + } + + fn set_allocated(&self) { + self.flags + .fetch_or(PhysicalFrameFlags::ALLOCATED.bits(), Ordering::SeqCst); + } + + /// Get the current flags. + pub fn get_flags(&self) -> PhysicalFrameFlags { + PhysicalFrameFlags::from_bits_truncate(self.flags.load(Ordering::SeqCst)) + } + + /// Copy contents of one frame into another. If the other frame is marked as zeroed, copying + /// will not happen. Both frames are locked first. + pub fn copy_contents_from(&self, other: &Frame) { + self.lock(); + // We don't need to lock the other frame, since if its contents aren't synchronized with + // this operation, it could have reordered to before or after. + if other.is_zeroed() { + // if both are zero, do nothing + if self.is_zeroed() { + self.unlock(); + return; + } + // if other is zero and we aren't, just zero instead of copy + let virt = phys_to_virt(self.pa); + let ptr: *mut u8 = virt.as_mut_ptr(); + let slice = unsafe { core::slice::from_raw_parts_mut(ptr, self.size()) }; + slice.fill(0); + self.flags + .fetch_or(PhysicalFrameFlags::ZEROED.bits(), Ordering::SeqCst); + self.unlock(); + return; + } + + self.flags + .fetch_and(!PhysicalFrameFlags::ZEROED.bits(), Ordering::SeqCst); + let virt = phys_to_virt(self.pa); + let ptr: *mut u8 = virt.as_mut_ptr(); + let slice = unsafe { core::slice::from_raw_parts_mut(ptr, self.size()) }; + + let othervirt = phys_to_virt(other.pa); + let otherptr: *mut u8 = othervirt.as_mut_ptr(); + let otherslice = unsafe { core::slice::from_raw_parts_mut(otherptr, self.size()) }; + + slice.copy_from_slice(otherslice); + self.unlock(); + } + + /// Copy from another physical address into this frame. + pub fn copy_contents_from_physaddr(&self, other: PhysAddr) { + self.lock(); + self.flags + .fetch_and(!PhysicalFrameFlags::ZEROED.bits(), Ordering::SeqCst); + let virt = phys_to_virt(self.pa); + let ptr: *mut u8 = virt.as_mut_ptr(); + let slice = unsafe { core::slice::from_raw_parts_mut(ptr, self.size()) }; + + let othervirt = phys_to_virt(other); + let otherptr: *mut u8 = othervirt.as_mut_ptr(); + let otherslice = unsafe { core::slice::from_raw_parts_mut(otherptr, self.size()) }; + + slice.copy_from_slice(otherslice); + self.unlock(); + } +} + +bitflags::bitflags! { + /// Flags to control the state of a physical frame. Also used by the alloc functions to indicate + /// what kind of physical frame is being requested. + #[derive(Clone, Copy)] + pub struct PhysicalFrameFlags: u8 { + /// The frame is zeroed (or, allocate a zeroed frame) + const ZEROED = 1; + /// The frame has been allocated by the system. + const ALLOCATED = 2; + /// (internal) The frame has been admitted into the frame tracking system. + const ADMITTED = 4; + } +} + +impl PhysicalFrameAllocator { + fn new(memory_regions: &[MemoryRegion]) -> PhysicalFrameAllocator { + Self { + region_idx: 0, + regions: memory_regions + .iter() + .filter_map(|m| { + if m.kind == MemoryRegionKind::UsableRam { + AllocationRegion::new(m) + } else { + None + } + }) + .collect(), + } + } + + fn alloc(&mut self, flags: PhysicalFrameFlags, fallback: bool) -> Option { + let frame = if fallback { + Some(self.__do_alloc_fallback()) + } else { + self.__do_alloc(flags) + }?; + if flags.contains(PhysicalFrameFlags::ZEROED) && !frame.is_zeroed() { + frame.zero(); + } + Some(frame) + } + + fn __do_alloc_fallback(&mut self) -> FrameRef { + // fallback + for reg in &mut self.regions { + let frame = reg.allocate(true, false); + if let Some(frame) = frame { + return frame; + } + } + panic!("out of memory"); + } + + fn __do_alloc(&mut self, flags: PhysicalFrameFlags) -> Option { + let needs_zero = flags.contains(PhysicalFrameFlags::ZEROED); + // try to find an exact match + for reg in &mut self.regions { + let frame = reg.allocate(false, needs_zero); + if frame.is_some() { + return frame; + } + } + None + } + + fn free(&mut self, frame: FrameRef) { + for reg in &mut self.regions { + if reg.contains(frame.start_address()) { + reg.free(frame); + return; + } + } + } +} + +#[doc(hidden)] +static PFA: Once> = Once::new(); + +#[derive(Clone)] +struct FrameIndexer { + start: PhysAddr, + len: usize, + frame_array_ptr: *const Frame, + frame_array_len: usize, +} + +impl FrameIndexer { + /// Build a new frame indexer. + /// + /// # Safety: The passed pointer and len must point to a valid section of memory reserved for the frame slice, which will last the lifetime of the kernel. + unsafe fn new( + start: PhysAddr, + len: usize, + frame_array_ptr: *const Frame, + frame_array_len: usize, + ) -> Self { + Self { + start, + len, + frame_array_ptr, + frame_array_len, + } + } + + fn frame_array(&self) -> &[Frame] { + unsafe { core::slice::from_raw_parts(self.frame_array_ptr, self.frame_array_len) } + } + + fn frame_array_mut(&mut self) -> &mut [Frame] { + unsafe { + core::slice::from_raw_parts_mut(self.frame_array_ptr as *mut _, self.frame_array_len) + } + } + + fn get_frame(&self, pa: PhysAddr) -> Option { + if !self.contains(pa) { + return None; + } + let index = (pa - self.start) / FRAME_SIZE; + assert!(index < self.frame_array_len); + let frame = &self.frame_array()[index as usize]; + // Safety: the frame array is static for the life of the kernel + Some(unsafe { transmute(frame) }) + } + + unsafe fn get_frame_mut(&mut self, pa: PhysAddr) -> Option { + if !self.contains(pa) { + return None; + } + let index = (pa - self.start) / FRAME_SIZE; + assert!(index < self.frame_array_len); + let frame = &mut self.frame_array_mut()[index as usize]; + // Safety: the frame array is static for the life of the kernel + Some(unsafe { transmute(frame) }) + } + + fn contains(&self, pa: PhysAddr) -> bool { + pa >= self.start && pa < (self.start.offset(self.len).unwrap()) + } +} + +// Safety: this is needed because of the raw pointer, but the raw pointer is static for the life of +// the kernel. +unsafe impl Send for FrameIndexer {} +unsafe impl Sync for FrameIndexer {} + +#[doc(hidden)] +static FI: Once> = Once::new(); + +/// Initialize the global physical frame allocator. +/// # Arguments +/// * `regions`: An array of memory regions passed from the boot info system. +pub fn init(regions: &[MemoryRegion]) { + let pfa = PhysicalFrameAllocator::new(regions); + FI.call_once(|| pfa.regions.iter().map(|r| r.indexer.clone()).collect()); + PFA.call_once(|| Spinlock::new(pfa)); +} + +/// Allocate a physical frame. +/// +/// The `flags` argument allows one to control if the resulting frame is +/// zeroed or not. Note that passing [PhysicalFrameFlags]::ZEROED guarantees that the returned frame +/// is zeroed, but the converse is not true. +/// +/// The returned frame will have its ZEROED flag cleared. In the future, this will probably change +/// to reflect the correct state of the frame. +/// +/// # Panic +/// Will panic if out of physical memory. For this reason, you probably want to use +/// [try_alloc_frame]. +/// +/// # Examples +/// ``` +/// let uninitialized_frame = alloc_frame(PhysicalFrameFlags::empty()); +/// let zeroed_frame = alloc_frame(PhysicalFrameFlags::ZEROED); +/// ``` +pub fn alloc_frame(flags: PhysicalFrameFlags) -> FrameRef { + let mut frame = { PFA.wait().lock().alloc(flags, false) }; + if frame.is_none() { + frame = PFA.wait().lock().alloc(flags, true); + } + let frame = frame.expect("out of memory"); + if flags.contains(PhysicalFrameFlags::ZEROED) { + assert!(frame.is_zeroed()); + } + /* TODO: try to use the MMU to detect if a page is actually ever written to or not */ + frame.set_not_zero(); + assert!(frame.get_flags().contains(PhysicalFrameFlags::ADMITTED)); + assert!(frame.get_flags().contains(PhysicalFrameFlags::ALLOCATED)); + frame +} + +/// Try to allocate a physical frame. The flags argument is the same as in [alloc_frame]. Returns +/// None if no physical frame is available. +pub fn try_alloc_frame(flags: PhysicalFrameFlags) -> Option { + Some(alloc_frame(flags)) +} + +/// Free a physical frame. +/// +/// If the frame's flags indicates that it is zeroed, it will be placed on +/// the zeroed list. +pub fn free_frame(frame: FrameRef) { + assert!(frame.get_flags().contains(PhysicalFrameFlags::ADMITTED)); + assert!(frame.get_flags().contains(PhysicalFrameFlags::ALLOCATED)); + PFA.wait().lock().free(frame); +} + +/// Get a FrameRef from a physical address. +pub fn get_frame(pa: PhysAddr) -> Option { + let fi = FI.wait(); + for fi in fi { + let f = fi.get_frame(pa); + if f.is_some() { + return f; + } + } + None +} + +#[cfg(test)] +mod tests { + use alloc::vec::Vec; + + use twizzler_kernel_macros::kernel_test; + + use super::{alloc_frame, free_frame, get_frame, PhysicalFrameFlags}; + use crate::utils::quick_random; + + #[kernel_test] + fn test_get_frame() { + let frame = alloc_frame(PhysicalFrameFlags::empty()); + let addr = frame.start_address(); + let test_frame = get_frame(addr).unwrap(); + assert!(core::ptr::eq(frame as *const _, test_frame as *const _)); + } + + #[kernel_test] + fn stress_test_pmm() { + let mut stack = Vec::new(); + for _ in 0..100000 { + let x = quick_random(); + let y = quick_random(); + let z = quick_random(); + if x % 2 == 0 && stack.len() < 1000 { + let frame = if y % 3 == 0 { + alloc_frame(PhysicalFrameFlags::ZEROED) + } else { + alloc_frame(PhysicalFrameFlags::empty()) + }; + if z % 5 == 0 { + frame.zero(); + } + stack.push(frame); + } else { + if let Some(frame) = stack.pop() { + free_frame(frame); + } + } + } + } +} diff --git a/src/kernel/src/memory/mod.rs b/src/kernel/src/memory/mod.rs index 5bd3739e..11b3ad22 100755 --- a/src/kernel/src/memory/mod.rs +++ b/src/kernel/src/memory/mod.rs @@ -1,47 +1,47 @@ -use core::sync::atomic::{AtomicBool, Ordering}; - -use crate::{arch, security::KERNEL_SCTX, BootInfo}; - -pub mod allocator; -pub mod context; -pub mod frame; -pub mod pagetables; - -pub use arch::{PhysAddr, VirtAddr}; - -use self::context::{KernelMemoryContext, UserContext}; - -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum MemoryRegionKind { - UsableRam, - Reserved, - BootloaderReserved, -} - -pub struct MemoryRegion { - pub start: PhysAddr, - pub length: usize, - pub kind: MemoryRegionKind, -} - -pub fn init(boot_info: &B) { - frame::init(boot_info.memory_regions()); - let kc = context::kernel_context(); - kc.switch_to(KERNEL_SCTX); - kc.init_allocator(); - allocator::init(kc); - // set flag to indicate that mm system is initalized - MEM_INIT.store(true, Ordering::SeqCst); -} - -static MEM_INIT: AtomicBool = AtomicBool::new(false); - -/// Indicates if memory management has been initalized by the boot core. -pub fn is_init() -> bool { - MEM_INIT.load(Ordering::SeqCst) -} - -pub fn prep_smp() { - let kc = context::kernel_context(); - kc.prep_smp(); -} +use core::sync::atomic::{AtomicBool, Ordering}; + +use crate::{arch, security::KERNEL_SCTX, BootInfo}; + +pub mod allocator; +pub mod context; +pub mod frame; +pub mod pagetables; + +pub use arch::{PhysAddr, VirtAddr}; + +use self::context::{KernelMemoryContext, UserContext}; + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum MemoryRegionKind { + UsableRam, + Reserved, + BootloaderReserved, +} + +pub struct MemoryRegion { + pub start: PhysAddr, + pub length: usize, + pub kind: MemoryRegionKind, +} + +pub fn init(boot_info: &B) { + frame::init(boot_info.memory_regions()); + let kc = context::kernel_context(); + kc.switch_to(KERNEL_SCTX); + kc.init_allocator(); + allocator::init(kc); + // set flag to indicate that mm system is initalized + MEM_INIT.store(true, Ordering::SeqCst); +} + +static MEM_INIT: AtomicBool = AtomicBool::new(false); + +/// Indicates if memory management has been initalized by the boot core. +pub fn is_init() -> bool { + MEM_INIT.load(Ordering::SeqCst) +} + +pub fn prep_smp() { + let kc = context::kernel_context(); + kc.prep_smp(); +} diff --git a/src/kernel/src/memory/pagetables.rs b/src/kernel/src/memory/pagetables.rs index a77fcf5c..0120a6fe 100644 --- a/src/kernel/src/memory/pagetables.rs +++ b/src/kernel/src/memory/pagetables.rs @@ -9,11 +9,11 @@ mod reader; mod settings; mod table; -pub use cursor::MappingCursor; - -pub use crate::arch::memory::pagetables::Table; pub use consistency::DeferredUnmappingOps; +pub use cursor::MappingCursor; pub use mapper::Mapper; pub use phys_provider::{ContiguousProvider, PhysAddrProvider, ZeroPageProvider}; pub use reader::{MapInfo, MapReader}; pub use settings::{MappingFlags, MappingSettings}; + +pub use crate::arch::memory::pagetables::Table; diff --git a/src/kernel/src/memory/pagetables/consistency.rs b/src/kernel/src/memory/pagetables/consistency.rs index f6ff2eaa..21199505 100644 --- a/src/kernel/src/memory/pagetables/consistency.rs +++ b/src/kernel/src/memory/pagetables/consistency.rs @@ -8,7 +8,8 @@ use crate::{ memory::frame::{free_frame, FrameAdapter, FrameRef}, }; -/// Management for consistency, wrapping any cache-line flushing and TLB coherence into a single object. +/// Management for consistency, wrapping any cache-line flushing and TLB coherence into a single +/// object. pub(super) struct Consistency { cl: ArchCacheLineMgr, tlb: ArchTlbMgr, diff --git a/src/kernel/src/memory/pagetables/cursor.rs b/src/kernel/src/memory/pagetables/cursor.rs index b3c51ee2..db63bdd0 100644 --- a/src/kernel/src/memory/pagetables/cursor.rs +++ b/src/kernel/src/memory/pagetables/cursor.rs @@ -13,7 +13,8 @@ impl MappingCursor { Self { start, len } } - /// Advance the cursor by `len`. Should the resulting address be non-canonical, `None` is returned. + /// Advance the cursor by `len`. Should the resulting address be non-canonical, `None` is + /// returned. pub fn advance(mut self, len: usize) -> Option { if self.len <= len { return None; @@ -24,7 +25,8 @@ impl MappingCursor { Some(self) } - /// Advance the cursor by up to `len`, so we end up aligned on len. Should the resulting address be non-canonical, `None` is returned. + /// Advance the cursor by up to `len`, so we end up aligned on len. Should the resulting address + /// be non-canonical, `None` is returned. pub fn align_advance(mut self, len: usize) -> Option { let vaddr = self.start.align_up(len as u64).ok()?; if vaddr == self.start { diff --git a/src/kernel/src/memory/pagetables/mapper.rs b/src/kernel/src/memory/pagetables/mapper.rs index ac3aac56..cc2832e0 100644 --- a/src/kernel/src/memory/pagetables/mapper.rs +++ b/src/kernel/src/memory/pagetables/mapper.rs @@ -1,14 +1,14 @@ -use crate::arch::{ - address::PhysAddr, - memory::pagetables::{Entry, Table}, -}; - use super::{ consistency::{Consistency, DeferredUnmappingOps}, MapInfo, MappingCursor, MappingSettings, PhysAddrProvider, }; +use crate::arch::{ + address::PhysAddr, + memory::pagetables::{Entry, Table}, +}; -/// Manager for a set of page tables. This is the primary interface for manipulating a set of page tables. +/// Manager for a set of page tables. This is the primary interface for manipulating a set of page +/// tables. pub struct Mapper { root: PhysAddr, start_level: usize, @@ -40,8 +40,8 @@ impl Mapper { unsafe { &*(self.root.kernel_vaddr().as_ptr::()) } } - /// Set a top level table to a direct value. Useful for creating large regions of global memory (like the kernel's - /// vaddr memory range). Does not perform any consistency operations. + /// Set a top level table to a direct value. Useful for creating large regions of global memory + /// (like the kernel's vaddr memory range). Does not perform any consistency operations. pub fn set_top_level_table(&mut self, index: usize, entry: Entry) { let root = self.root_mut(); let was_present = root[index].is_present(); @@ -56,7 +56,8 @@ impl Mapper { } } - /// Get a top level table entry's value. Useful for cloning large regions during creation (e.g. the kernel's memory region). + /// Get a top level table entry's value. Useful for cloning large regions during creation (e.g. + /// the kernel's memory region). pub fn get_top_level_table(&self, index: usize) -> Entry { let root = self.root(); root[index] @@ -81,8 +82,8 @@ impl Mapper { } #[must_use] - /// Unmap a region from the page tables. The deferred operations must be run, and must be run AFTER unlocking any - /// page table locks. + /// Unmap a region from the page tables. The deferred operations must be run, and must be run + /// AFTER unlocking any page table locks. pub fn unmap(&mut self, cursor: MappingCursor) -> DeferredUnmappingOps { let mut consist = Consistency::new(self.root); let level = self.start_level; @@ -99,9 +100,10 @@ impl Mapper { root.change(&mut consist, cursor, level, settings); } - /// Read the map of a single address (the start of the cursor). If there is a mapping at the specified location, - /// return the mapping information. Otherwise, return Err with a length that specifies how much the cursor may - /// advance before calling this function again to check for a new mapping. + /// Read the map of a single address (the start of the cursor). If there is a mapping at the + /// specified location, return the mapping information. Otherwise, return Err with a length + /// that specifies how much the cursor may advance before calling this function again to + /// check for a new mapping. pub(super) fn do_read_map(&self, cursor: &MappingCursor) -> Result { let level = self.start_level; let root = self.root(); diff --git a/src/kernel/src/memory/pagetables/phys_provider.rs b/src/kernel/src/memory/pagetables/phys_provider.rs index 14792566..4c68ccc0 100644 --- a/src/kernel/src/memory/pagetables/phys_provider.rs +++ b/src/kernel/src/memory/pagetables/phys_provider.rs @@ -12,7 +12,8 @@ pub trait PhysAddrProvider { } #[derive(Default)] -/// An implementation of [PhysAddrProvider] that just allocates and returns freshly allocated and zeroed frames. +/// An implementation of [PhysAddrProvider] that just allocates and returns freshly allocated and +/// zeroed frames. pub struct ZeroPageProvider { current: Option, } diff --git a/src/kernel/src/memory/pagetables/reader.rs b/src/kernel/src/memory/pagetables/reader.rs index 370bbdf9..77d47a39 100644 --- a/src/kernel/src/memory/pagetables/reader.rs +++ b/src/kernel/src/memory/pagetables/reader.rs @@ -1,6 +1,5 @@ -use crate::arch::address::{PhysAddr, VirtAddr}; - use super::{Mapper, MappingCursor, MappingSettings}; +use crate::arch::address::{PhysAddr, VirtAddr}; /// Iterator for reading mapping information. Will not cross non-canonical address boundaries. pub struct MapReader<'a> { @@ -57,10 +56,14 @@ impl<'a> Iterator for MapCoalescer<'a> { let next = self.reader.next(); if let Some(next) = next { if let Some(last) = &mut self.last { - if let Ok(last_next) = last.vaddr().offset(last.len()) && let Ok(last_next_phys) = last.paddr().offset(last.len()) - && last_next == next.vaddr() && last.settings() == next.settings() && last_next_phys == next.paddr() { - last.psize += next.len(); - continue; + if let Ok(last_next) = last.vaddr().offset(last.len()) + && let Ok(last_next_phys) = last.paddr().offset(last.len()) + && last_next == next.vaddr() + && last.settings() == next.settings() + && last_next_phys == next.paddr() + { + last.psize += next.len(); + continue; } let ret = last.clone(); @@ -86,8 +89,9 @@ pub struct MapInfo { } impl Mapper { - /// Create a [MapReader] that can be used to iterate over the region specified by the mapping cursor. If the mapping - /// cursor includes a non-canonical region, the reader will stop early. + /// Create a [MapReader] that can be used to iterate over the region specified by the mapping + /// cursor. If the mapping cursor includes a non-canonical region, the reader will stop + /// early. pub fn readmap(&self, cursor: MappingCursor) -> MapReader<'_> { MapReader { mapper: self, @@ -116,7 +120,8 @@ impl MapInfo { self.vaddr } - /// Length of this individual mapping (corresponds to the length of physical and virtual memory covered by this mapping). + /// Length of this individual mapping (corresponds to the length of physical and virtual memory + /// covered by this mapping). pub fn len(&self) -> usize { self.psize } diff --git a/src/kernel/src/memory/pagetables/table.rs b/src/kernel/src/memory/pagetables/table.rs index f70963f5..c53315b8 100644 --- a/src/kernel/src/memory/pagetables/table.rs +++ b/src/kernel/src/memory/pagetables/table.rs @@ -1,3 +1,4 @@ +use super::{consistency::Consistency, MapInfo, MappingCursor, MappingSettings, PhysAddrProvider}; use crate::{ arch::{ address::{PhysAddr, VirtAddr}, @@ -9,8 +10,6 @@ use crate::{ }, }; -use super::{consistency::Consistency, MapInfo, MappingCursor, MappingSettings, PhysAddrProvider}; - impl Table { fn next_table_mut(&mut self, index: usize) -> Option<&mut Table> { let entry = self[index]; diff --git a/src/kernel/src/mutex.rs b/src/kernel/src/mutex.rs index beaf3129..fadcea2f 100644 --- a/src/kernel/src/mutex.rs +++ b/src/kernel/src/mutex.rs @@ -65,8 +65,8 @@ impl Mutex { } } - /// Get a mut reference to the contained data. Does not perform locking, but is safe because we have a mut reference - /// to the mutex itself. + /// Get a mut reference to the contained data. Does not perform locking, but is safe because we + /// have a mut reference to the mutex itself. pub fn get_mut(&mut self) -> &mut T { self.cell.get_mut() } @@ -229,11 +229,12 @@ impl Default for Mutex { } mod test { + use alloc::{sync::Arc, vec::Vec}; use core::{cmp::max, time::Duration}; - use alloc::{sync::Arc, vec::Vec}; use twizzler_kernel_macros::kernel_test; + use super::Mutex; use crate::{ processor::NR_CPUS, syscall::sync::sys_thread_sync, @@ -241,8 +242,6 @@ mod test { utils::quick_random, }; - use super::Mutex; - #[kernel_test] fn test_mutex() { const ITERS: usize = 50; diff --git a/src/kernel/src/obj/copy.rs b/src/kernel/src/obj/copy.rs index 90152a37..edf67a60 100644 --- a/src/kernel/src/obj/copy.rs +++ b/src/kernel/src/obj/copy.rs @@ -1,13 +1,13 @@ -use crate::mutex::LockGuard; - use super::{ pages::Page, range::{PageRange, PageRangeTree}, InvalidateMode, ObjectRef, PageNumber, }; +use crate::mutex::LockGuard; -// Given a page range and a subrange within it, split it into two parts, the part before the subrange, and the part after. -// Each part may be None if its length is zero (consider splitting [1,2,3,4] with the subrange [1,2] => (None, Some([3,4]))). +// Given a page range and a subrange within it, split it into two parts, the part before the +// subrange, and the part after. Each part may be None if its length is zero (consider splitting +// [1,2,3,4] with the subrange [1,2] => (None, Some([3,4]))). fn split_range( range: PageRange, out: core::ops::Range, @@ -27,8 +27,9 @@ fn split_range( (r1, r2) } -// Add a page range to the object page tree. We are given: (1) a range we want to take from, (2) a subrange within that range (specified by offset and length), -// and a point to insert this into (dest_point). +// Add a page range to the object page tree. We are given: (1) a range we want to take from, (2) a +// subrange within that range (specified by offset and length), and a point to insert this into +// (dest_point). fn copy_range_to_object_tree( dest_tree: &mut LockGuard, dest_point: PageNumber, @@ -40,11 +41,13 @@ fn copy_range_to_object_tree( let new_offset = range.offset + offset; let new_range = range.new_from(dest_point, new_offset, length); let new_range_key = new_range.start..new_range.start.offset(new_range.length); - // Now insert the new range. This will, of course, kick any ranges that overlap with the new range out of the tree, so we - // need to split those and add in pages that shouldn't have been replaced. + // Now insert the new range. This will, of course, kick any ranges that overlap with the new + // range out of the tree, so we need to split those and add in pages that shouldn't have + // been replaced. let kicked = dest_tree.insert_replace(new_range_key.clone(), new_range); for k in kicked { - // We need to split any kicked ranges into parts that don't overlap with new_range_key, and then reinsert those splits. + // We need to split any kicked ranges into parts that don't overlap with new_range_key, and + // then reinsert those splits. let (r1, r2) = split_range(k.1, new_range_key.clone()); if let Some(mut r1) = r1 { r1.gc_pagevec(); @@ -73,7 +76,8 @@ fn copy_single( if let Some((src_page, _)) = src_page { dest_page.as_mut_slice()[offset..max].copy_from_slice(&src_page.as_slice()[offset..max]); } else { - // TODO: could skip this on freshly created page, if we can detect that. That's just an optimization, though. + // TODO: could skip this on freshly created page, if we can detect that. That's just an + // optimization, though. dest_page.as_mut_slice()[offset..max].fill(0); } } @@ -96,15 +100,16 @@ fn zero_single( /// to copy on write. In the case that a page needs to be partially copied, we'll do a manual copy /// for that page. This only happens at the start and end of the copy region. /// -/// We allow non-page-aligned offsets, and that misalignment may differ between source and dest objects, -/// but the kernel may have to resort to a bytewise copy of the object pages if the offsets aren't both -/// misaligned by the same amount (e.g., if page size is 0x1000, then (dest off, src off) of (0x1000, 0x4000), -/// (0x1100, 0x3100) will still enable COW style copying, but (0x1100, 0x1200) will require manual copy). +/// We allow non-page-aligned offsets, and that misalignment may differ between source and dest +/// objects, but the kernel may have to resort to a bytewise copy of the object pages if the offsets +/// aren't both misaligned by the same amount (e.g., if page size is 0x1000, then (dest off, src +/// off) of (0x1000, 0x4000), (0x1100, 0x3100) will still enable COW style copying, but (0x1100, +/// 0x1200) will require manual copy). /// /// We lock the page trees for each object (in a canonical order) and ensure that the regions are /// remapped appropriately for any mapping of the objects. This ensures that the source object is -/// "checkpointed" before copying, and that the destination object cannot be read in the region being -/// overwritten until the copy is done. +/// "checkpointed" before copying, and that the destination object cannot be read in the region +/// being overwritten until the copy is done. pub fn copy_ranges( src: &ObjectRef, src_off: usize, @@ -136,8 +141,9 @@ pub fn copy_ranges( // Step 1: lock the page trees for the objects, in a canonical order. let (mut src_tree, mut dest_tree) = crate::utils::lock_two(&src.range_tree, &dest.range_tree); - // Step 2: Invalidate the page ranges. In the destination, we fully unmap the object for that range. In the source, - // we only need to ensure that no one modifies pages, so we just write-protect it. + // Step 2: Invalidate the page ranges. In the destination, we fully unmap the object for that + // range. In the source, we only need to ensure that no one modifies pages, so we just + // write-protect it. src.invalidate( src_start..src_start.offset(nr_pages), InvalidateMode::WriteProtect, @@ -177,29 +183,34 @@ pub fn copy_ranges( remaining_pages -= 1; } - // Step 3b: copy full pages. The number of pages is how many we have left, minus if we are going to do a partial page at the end. + // Step 3b: copy full pages. The number of pages is how many we have left, minus if we are going + // to do a partial page at the end. let vec_pages = remaining_pages - if end_offset > 0 { 1 } else { 0 }; let mut remaining_vec_pages = vec_pages; if vec_pages > 0 { let ranges = src_tree.range(src_point..src_point.offset(vec_pages)); for range in ranges { - // If the source point is below the range's start, then there's a hole in the source page tree. We don't have - // to copy at all, just shift up the dest point to where it needs to be for this range (since we will be copying from it). + // If the source point is below the range's start, then there's a hole in the source + // page tree. We don't have to copy at all, just shift up the dest point to + // where it needs to be for this range (since we will be copying from it). if src_point < *range.0 { let diff = *range.0 - src_point; // If the hole is bigger than our copy region, just break. - // Note: I don't think this will ever be true, given the way we select the ranges from the tree, but I haven't proven it yet. + // Note: I don't think this will ever be true, given the way we select the ranges + // from the tree, but I haven't proven it yet. if diff > remaining_vec_pages { dest_point = dest_point.offset(remaining_vec_pages); remaining_vec_pages = 0; break; } - // TODO: we'll need to either ensure everything is present, or interface with the pager. We'll probably do the later in the future. + // TODO: we'll need to either ensure everything is present, or interface with the + // pager. We'll probably do the later in the future. dest_point = dest_point.offset(diff); remaining_vec_pages -= diff; } - // Okay, finally, we can calculate the subrange from the source range that we'll be using for our destination region. + // Okay, finally, we can calculate the subrange from the source range that we'll be + // using for our destination region. let offset = src_point.num().saturating_sub(range.0.num()); let len = core::cmp::min(range.1.value().length - offset, remaining_vec_pages); copy_range_to_object_tree(&mut dest_tree, dest_point, range.1.value(), offset, len); @@ -271,7 +282,8 @@ fn copy_bytes( this_length } else { let this_length = core::cmp::min(PageNumber::PAGE_SIZE - this_dest_offset, remaining); - // TODO: could skip this on freshly created page, if we can detect that. That's just an optimization, though. + // TODO: could skip this on freshly created page, if we can detect that. That's just an + // optimization, though. dest_page.as_mut_slice()[this_dest_offset..(this_dest_offset + this_length)].fill(0); this_length }; @@ -335,16 +347,17 @@ pub fn zero_ranges(dest: &ObjectRef, dest_off: usize, byte_length: usize) { // Okay, now we'll try to evict page tree entries that comprise the region. let vec_pages = remaining_pages - if end_offset > 0 { 1 } else { 0 }; if vec_pages > 0 { - // Our plan is to collect all the page ranges within this range of pages, and remove them. We'll have to pay special attention - // to the first and last ranges, though, as they may only partially overlap the region to be zero'd. + // Our plan is to collect all the page ranges within this range of pages, and remove them. + // We'll have to pay special attention to the first and last ranges, though, as they + // may only partially overlap the region to be zero'd. let ranges = dest_tree.range(dest_point..dest_point.offset(vec_pages)); let mut points = ranges .into_iter() .map(|r| r.0.clone()) .collect::>(); - // Handle the last range, keeping only the parts that are after the zeroing region. We use pop because we - // won't be needing to consider this entry later. + // Handle the last range, keeping only the parts that are after the zeroing region. We use + // pop because we won't be needing to consider this entry later. if let Some(last) = points.pop() && let Some(mut last_range) = dest_tree.remove(&last) { @@ -366,8 +379,9 @@ pub fn zero_ranges(dest: &ObjectRef, dest_off: usize, byte_length: usize) { } } - // Handle the first range, truncating it if it starts before the zeroing region. Don't bother removing it from - // the list -- we'll just skip it in the iterator (remove head of vec can be slow). + // Handle the first range, truncating it if it starts before the zeroing region. Don't + // bother removing it from the list -- we'll just skip it in the iterator (remove + // head of vec can be slow). if let Some(first) = points.first() && let Some(mut first_range) = dest_tree.remove(first) { @@ -386,7 +400,8 @@ pub fn zero_ranges(dest: &ObjectRef, dest_off: usize, byte_length: usize) { } } - // Finally we can remove the remaining ranges that are wholely contained. Skip the first one, though, we handled that above. + // Finally we can remove the remaining ranges that are wholely contained. Skip the first + // one, though, we handled that above. for point in points.iter().skip(1) { dest_tree.remove(point); } @@ -406,14 +421,13 @@ pub fn zero_ranges(dest: &ObjectRef, dest_off: usize, byte_length: usize) { mod test { use twizzler_abi::{device::CacheType, object::Protections}; + use super::copy_ranges; use crate::{ memory::context::{kernel_context, KernelMemoryContext, ObjectContextInfo}, obj::{copy::zero_ranges, pages::Page, ObjectRef, PageNumber}, userinit::create_blank_object, }; - use super::copy_ranges; - fn check_slices( src: &ObjectRef, src_off: usize, @@ -505,18 +519,21 @@ mod test { let ps = PageNumber::PAGE_SIZE; let half_ps = PageNumber::PAGE_SIZE / 2; - // This is for mis-aligning the offsets. Use about an eighth of a page for that, the exact number doesn't matter. + // This is for mis-aligning the offsets. Use about an eighth of a page for that, the exact + // number doesn't matter. let abit = ps / 8; assert!(abit > 0 && abit < ps); - // Some helper functions for finding regions of the objects to use for copy testing automatically. + // Some helper functions for finding regions of the objects to use for copy testing + // automatically. let mut src_counting_page_num = 1; let mut dest_counting_page_num = 1; let calc_off = |page_num: usize, misalign: usize| -> usize { ps * page_num + misalign * abit }; let mut do_check = |src_off_misalign, dest_off_misalign, len| { - let nr_pages = len / PageNumber::PAGE_SIZE + 2; // Just bump up, assuming there are partial pages. Slightly wasteful, but it's just a test. + let nr_pages = len / PageNumber::PAGE_SIZE + 2; // Just bump up, assuming there are partial pages. Slightly wasteful, but it's just a + // test. let src_off = calc_off(src_counting_page_num, src_off_misalign); let dest_off = calc_off(dest_counting_page_num, dest_off_misalign); src_counting_page_num += nr_pages; @@ -527,7 +544,8 @@ mod test { // Basic test do_check(0, 0, ps); - // Overwrite. These two pages in src have different contents (see loop at start of this function). + // Overwrite. These two pages in src have different contents (see loop at start of this + // function). let second_page = ps * 2; let third_page = ps * 3; copy_ranges_and_check(&src, second_page, &dest, second_page, ps); @@ -546,7 +564,8 @@ mod test { // Page aligned, 2 pages and a bit more, not length aligned do_check(0, 0, ps * 2 + abit); - // Test fallback to manual copy. Force that by doubling the partial page offset for dest, but not src. + // Test fallback to manual copy. Force that by doubling the partial page offset for dest, + // but not src. do_check(abit, abit * 2, ps + abit); do_check(abit, abit * 2, abit); @@ -554,8 +573,9 @@ mod test { // Test zeroing with a couple pages, not length aligned. zero_ranges_and_check(&dest, ps + abit, ps * 2 + abit); - // Test two back-to-back ranges. This first copy will copy (page(2) + abit) -> (page(2) + abit) for a len of - // a page. So the end point will be (page(3) + abit), which is where the second copy starts. + // Test two back-to-back ranges. This first copy will copy (page(2) + abit) -> (page(2) + + // abit) for a len of a page. So the end point will be (page(3) + abit), which is + // where the second copy starts. copy_ranges(&src, second_page + abit, &dest, second_page + abit, ps); copy_ranges_and_check(&src, third_page + abit, &dest, third_page + abit, ps); // Make sure we didn't overwrite the first copy. diff --git a/src/kernel/src/obj/mod.rs b/src/kernel/src/obj/mod.rs index b993f75e..aa8ce8af 100644 --- a/src/kernel/src/obj/mod.rs +++ b/src/kernel/src/obj/mod.rs @@ -1,15 +1,16 @@ -use core::{ - fmt::Display, - sync::atomic::{AtomicU32, Ordering}, -}; - use alloc::{ collections::{btree_map::Entry, BTreeMap}, sync::{Arc, Weak}, vec::Vec, }; +use core::{ + fmt::Display, + sync::atomic::{AtomicU32, Ordering}, +}; + use twizzler_abi::object::{ObjID, MAX_SIZE}; +use self::{pages::Page, thread_sync::SleepInfo}; use crate::{ arch::memory::frame::FRAME_SIZE, idcounter::{IdCounter, SimpleId, StableId}, @@ -20,8 +21,6 @@ use crate::{ mutex::{LockGuard, Mutex}, }; -use self::{pages::Page, thread_sync::SleepInfo}; - pub mod control; pub mod copy; pub mod pages; @@ -162,7 +161,8 @@ impl Object { } pub fn release_pin(&self, _pin: u32) { - // TODO: Currently we don't track pins. This will be changed in-future when we fully implement eviction. + // TODO: Currently we don't track pins. This will be changed in-future when we fully + // implement eviction. } pub fn pin(&self, start: PageNumber, len: usize) -> Option<(Vec, u32)> { diff --git a/src/kernel/src/obj/pages.rs b/src/kernel/src/obj/pages.rs index 7742ac69..1c197911 100644 --- a/src/kernel/src/obj/pages.rs +++ b/src/kernel/src/obj/pages.rs @@ -1,18 +1,19 @@ +use alloc::sync::Arc; use core::sync::atomic::{AtomicU32, AtomicU64, Ordering}; -use alloc::sync::Arc; use twizzler_abi::device::{CacheType, MMIO_OFFSET}; +use super::{Object, PageNumber}; use crate::{ arch::memory::{frame::FRAME_SIZE, phys_to_virt}, - memory::frame::{self, free_frame, FrameRef, PhysicalFrameFlags}, - memory::{PhysAddr, VirtAddr}, + memory::{ + frame::{self, free_frame, FrameRef, PhysicalFrameFlags}, + PhysAddr, VirtAddr, + }, }; -use super::{Object, PageNumber}; - -/// An object page can be either a physical frame (allocatable memory) or a static physical address (wired). This will likely be -/// overhauled soon. +/// An object page can be either a physical frame (allocatable memory) or a static physical address +/// (wired). This will likely be overhauled soon. #[derive(Debug)] enum FrameOrWired { Frame(FrameRef), @@ -45,7 +46,8 @@ impl Drop for Page { } impl Page { - // TODO: we should have a way of allocating non-zero pages, for pages that will be immediately overwritten. + // TODO: we should have a way of allocating non-zero pages, for pages that will be immediately + // overwritten. pub fn new() -> Self { Self { frame: FrameOrWired::Frame(frame::alloc_frame(PhysicalFrameFlags::ZEROED)), @@ -74,7 +76,8 @@ impl Page { pub unsafe fn get_mut_to_val(&self, offset: usize) -> *mut T { /* TODO: enforce alignment and size of offset */ - /* TODO: once we start optimizing frame zeroing, we need to make the frame as non-zeroed here */ + /* TODO: once we start optimizing frame zeroing, we need to make the frame as non-zeroed + * here */ let va = self.as_virtaddr(); let bytes = va.as_mut_ptr::(); bytes.add(offset) as *mut T diff --git a/src/kernel/src/obj/pagevec.rs b/src/kernel/src/obj/pagevec.rs index cc3d6b03..8392c553 100644 --- a/src/kernel/src/obj/pagevec.rs +++ b/src/kernel/src/obj/pagevec.rs @@ -1,11 +1,10 @@ use alloc::{format, string::String, sync::Arc, vec::Vec}; -use crate::mutex::Mutex; - use super::{ pages::{Page, PageRef}, range::PageRange, }; +use crate::mutex::Mutex; pub struct PageVec { pages: Vec>, diff --git a/src/kernel/src/obj/range.rs b/src/kernel/src/obj/range.rs index aed7d55a..fa18a358 100644 --- a/src/kernel/src/obj/range.rs +++ b/src/kernel/src/obj/range.rs @@ -1,15 +1,14 @@ +use alloc::{sync::Arc, vec::Vec}; use core::fmt::Display; -use alloc::{sync::Arc, vec::Vec}; use nonoverlapping_interval_tree::{IntervalValue, NonOverlappingIntervalTree}; -use crate::mutex::Mutex; - use super::{ pages::{Page, PageRef}, pagevec::{PageVec, PageVecRef}, PageNumber, }; +use crate::mutex::Mutex; pub struct PageRange { pub start: PageNumber, @@ -60,9 +59,10 @@ impl PageRange { pub fn gc_pagevec(&mut self) { if self.is_shared() { - // TODO: maybe we can do something smarter here, but it may be dangerous. In particular, we should - // study what pagevecs actually look like in a long-running system and decide what to do based on that. - // Of course, if we want to be able to do anything here, we'll either need to promote pagevecs to non-shared + // TODO: maybe we can do something smarter here, but it may be dangerous. In particular, + // we should study what pagevecs actually look like in a long-running system + // and decide what to do based on that. Of course, if we want to be able to + // do anything here, we'll either need to promote pagevecs to non-shared // or we will need to track more page info. return; } diff --git a/src/kernel/src/obj/thread_sync.rs b/src/kernel/src/obj/thread_sync.rs index 5e7f35d0..6ddbe2f5 100644 --- a/src/kernel/src/obj/thread_sync.rs +++ b/src/kernel/src/obj/thread_sync.rs @@ -1,9 +1,9 @@ use alloc::collections::BTreeMap; -use twizzler_abi::syscall::ThreadSyncOp; -use crate::thread::{current_thread_ref, ThreadRef}; +use twizzler_abi::syscall::ThreadSyncOp; use super::Object; +use crate::thread::{current_thread_ref, ThreadRef}; struct SleepEntry { threads: BTreeMap, @@ -68,7 +68,8 @@ impl SleepInfo { count += 1; p }) { - /* TODO (opt): if sync_sleep_done is also set, maybe we can just immeditately reschedule this thread. */ + /* TODO (opt): if sync_sleep_done is also set, maybe we can just immeditately + * reschedule this thread. */ crate::syscall::sync::add_to_requeue(t); } } diff --git a/src/kernel/src/once.rs b/src/kernel/src/once.rs index 9cded08a..bd58b406 100644 --- a/src/kernel/src/once.rs +++ b/src/kernel/src/once.rs @@ -30,7 +30,8 @@ impl Once { } } /// Initialize the data once and only once, returning the data once it is initialized. The given - /// closure will only execute the first time this function is called, and otherwise will not be run. + /// closure will only execute the first time this function is called, and otherwise will not be + /// run. /// /// If multiple calls to call_once race, only one of them will run and initialize the data, the /// others will block. diff --git a/src/kernel/src/operations.rs b/src/kernel/src/operations.rs index 65da0ac4..79368d2b 100644 --- a/src/kernel/src/operations.rs +++ b/src/kernel/src/operations.rs @@ -1,4 +1,5 @@ use alloc::vec::Vec; + use twizzler_abi::object::Protections; use crate::{ diff --git a/src/kernel/src/panic.rs b/src/kernel/src/panic.rs index 35075f0c..6bac645c 100755 --- a/src/kernel/src/panic.rs +++ b/src/kernel/src/panic.rs @@ -1,155 +1,155 @@ -use addr2line::Context; -use object::{Object, ObjectSection}; - -use core::panic::PanicInfo; - -use crate::interrupt::disable; - -static mut DEBUG_CTX: Option< - Context>>, -> = None; - -fn load_debug_context( - file: &object::read::elf::ElfFile64, -) -> Option< - addr2line::Context>, -> { - let endian = addr2line::gimli::RunTimeEndian::Little; //TODO - fn load_section( - id: addr2line::gimli::SectionId, - file: &object::read::elf::ElfFile64, - endian: addr2line::gimli::RunTimeEndian, - ) -> Result, object::Error> - { - let data = file - .section_by_name(id.name()) - .and_then(|section| section.uncompressed_data().ok()) - .unwrap_or(alloc::borrow::Cow::Borrowed(&[])); - Ok(addr2line::gimli::EndianRcSlice::new( - alloc::rc::Rc::from(&*data), - endian, - )) - } - - let result = addr2line::gimli::Dwarf::load(|id| load_section(id, file, endian)); - match result { - Ok(dwarf) => match addr2line::Context::from_dwarf(dwarf) { - Ok(dwarf) => Some(dwarf), - Err(e) => { - logln!("loading debug information failed: {:?}", e); - None - } - }, - Err(e) => { - logln!("loading debug information failed: {:?}", e); - None - } - } -} - -pub fn init(kernel_image: &'static [u8]) { - let image = - object::read::elf::ElfFile64::parse(kernel_image).expect("failed to parse kernel image"); - let ctx = load_debug_context(&image); - unsafe { DEBUG_CTX = ctx }; -} - -const MAX_FRAMES: usize = 100; -pub fn backtrace(symbolize: bool, entry_point: Option) { - let mut frame_nr = 0; - let trace_callback = |frame: &backtracer_core::Frame| { - let ip = frame.ip(); - - if !symbolize { - emerglogln!("{:4} - {:18p}", frame_nr, ip); - } else { - // Resolve this instruction pointer to a symbol name - let _ = backtracer_core::resolve( - if let Some(ref ctx) = unsafe { &DEBUG_CTX } { - Some(ctx) - } else { - None - }, - 0, - ip, - |symbol| { - let name = symbol.name(); - if let Some(addr) = symbol.addr() { - emerglogln!( - "{:4}: {:18p} - {}", - frame_nr, - addr, - if let Some(ref name) = name { - name - } else { - "??" - } - ) - } else { - emerglogln!( - "{:4}: ?? - {}", - frame_nr, - if let Some(ref name) = name { - name - } else { - "??" - } - ) - } - if let Some(filename) = symbol.filename() { - if let Some(linenr) = symbol.lineno() { - emerglogln!( - " at {}:{}", - filename, - linenr - ); - } - } - }, - ); - } - frame_nr += 1; - - if frame_nr > MAX_FRAMES { - return false; - } - - true // keep going to the next frame - }; - - if let Some(entry_point) = entry_point { - backtracer_core::trace_from(entry_point, trace_callback); - } else { - backtracer_core::trace(trace_callback); - } -} - -static DID_PANIC: core::sync::atomic::AtomicBool = core::sync::atomic::AtomicBool::new(false); -#[panic_handler] -fn panic(info: &PanicInfo) -> ! { - disable(); - let second_panic = DID_PANIC.swap(true, core::sync::atomic::Ordering::SeqCst); - if second_panic { - loop {} - } - emerglogln!("[error] {}", info); - if second_panic { - emerglogln!("we've had one, yes, but what about second panic?"); - } - - emerglogln!("starting backtrace..."); - - backtrace(!second_panic, None); - - emerglogln!("unrecoverable, halting processor."); - - if crate::is_test_mode() { - emerglogln!("!!! TEST MODE PANIC -- RESETTING"); - //crate::arch::debug_shutdown(42); - } - - loop {} -} - -#[lang = "eh_personality"] -pub extern "C" fn rust_eh_personality() {} +use core::panic::PanicInfo; + +use addr2line::Context; +use object::{Object, ObjectSection}; + +use crate::interrupt::disable; + +static mut DEBUG_CTX: Option< + Context>>, +> = None; + +fn load_debug_context( + file: &object::read::elf::ElfFile64, +) -> Option< + addr2line::Context>, +> { + let endian = addr2line::gimli::RunTimeEndian::Little; //TODO + fn load_section( + id: addr2line::gimli::SectionId, + file: &object::read::elf::ElfFile64, + endian: addr2line::gimli::RunTimeEndian, + ) -> Result, object::Error> + { + let data = file + .section_by_name(id.name()) + .and_then(|section| section.uncompressed_data().ok()) + .unwrap_or(alloc::borrow::Cow::Borrowed(&[])); + Ok(addr2line::gimli::EndianRcSlice::new( + alloc::rc::Rc::from(&*data), + endian, + )) + } + + let result = addr2line::gimli::Dwarf::load(|id| load_section(id, file, endian)); + match result { + Ok(dwarf) => match addr2line::Context::from_dwarf(dwarf) { + Ok(dwarf) => Some(dwarf), + Err(e) => { + logln!("loading debug information failed: {:?}", e); + None + } + }, + Err(e) => { + logln!("loading debug information failed: {:?}", e); + None + } + } +} + +pub fn init(kernel_image: &'static [u8]) { + let image = + object::read::elf::ElfFile64::parse(kernel_image).expect("failed to parse kernel image"); + let ctx = load_debug_context(&image); + unsafe { DEBUG_CTX = ctx }; +} + +const MAX_FRAMES: usize = 100; +pub fn backtrace(symbolize: bool, entry_point: Option) { + let mut frame_nr = 0; + let trace_callback = |frame: &backtracer_core::Frame| { + let ip = frame.ip(); + + if !symbolize { + emerglogln!("{:4} - {:18p}", frame_nr, ip); + } else { + // Resolve this instruction pointer to a symbol name + let _ = backtracer_core::resolve( + if let Some(ref ctx) = unsafe { &DEBUG_CTX } { + Some(ctx) + } else { + None + }, + 0, + ip, + |symbol| { + let name = symbol.name(); + if let Some(addr) = symbol.addr() { + emerglogln!( + "{:4}: {:18p} - {}", + frame_nr, + addr, + if let Some(ref name) = name { + name + } else { + "??" + } + ) + } else { + emerglogln!( + "{:4}: ?? - {}", + frame_nr, + if let Some(ref name) = name { + name + } else { + "??" + } + ) + } + if let Some(filename) = symbol.filename() { + if let Some(linenr) = symbol.lineno() { + emerglogln!( + " at {}:{}", + filename, + linenr + ); + } + } + }, + ); + } + frame_nr += 1; + + if frame_nr > MAX_FRAMES { + return false; + } + + true // keep going to the next frame + }; + + if let Some(entry_point) = entry_point { + backtracer_core::trace_from(entry_point, trace_callback); + } else { + backtracer_core::trace(trace_callback); + } +} + +static DID_PANIC: core::sync::atomic::AtomicBool = core::sync::atomic::AtomicBool::new(false); +#[panic_handler] +fn panic(info: &PanicInfo) -> ! { + disable(); + let second_panic = DID_PANIC.swap(true, core::sync::atomic::Ordering::SeqCst); + if second_panic { + loop {} + } + emerglogln!("[error] {}", info); + if second_panic { + emerglogln!("we've had one, yes, but what about second panic?"); + } + + emerglogln!("starting backtrace..."); + + backtrace(!second_panic, None); + + emerglogln!("unrecoverable, halting processor."); + + if crate::is_test_mode() { + emerglogln!("!!! TEST MODE PANIC -- RESETTING"); + //crate::arch::debug_shutdown(42); + } + + loop {} +} + +#[lang = "eh_personality"] +pub extern "C" fn rust_eh_personality() {} diff --git a/src/kernel/src/processor.rs b/src/kernel/src/processor.rs index 83581a5b..6f33e733 100644 --- a/src/kernel/src/processor.rs +++ b/src/kernel/src/processor.rs @@ -1,582 +1,581 @@ -use core::{ - alloc::Layout, - ptr::null_mut, - sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, -}; - -use crate::{ - arch::interrupt::GENERIC_IPI_VECTOR, - interrupt::{self, Destination}, - once::Once, - spinlock::{LockGuard, SpinLoop, Spinlock}, - thread::{current_thread_ref, priority::Priority}, -}; -use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use intrusive_collections::{intrusive_adapter, LinkedList}; - -use crate::{ - arch::{self, processor::ArchProcessor}, - image::TlsInfo, - memory::VirtAddr, - sched::{CPUTopoNode, CPUTopoType}, - thread::{Thread, ThreadRef}, -}; - -#[thread_local] -static mut BOOT_KERNEL_STACK: *mut u8 = core::ptr::null_mut(); - -#[thread_local] -static mut CPU_ID: u32 = 0; - -#[thread_local] -static mut CURRENT_PROCESSOR: *const Processor = null_mut(); - -#[derive(Debug, Default)] -pub struct ProcessorStats { - pub preempts: AtomicU64, - pub wakeups: AtomicU64, - pub steals: AtomicU64, - pub idle: AtomicU64, - pub non_idle: AtomicU64, - pub hardticks: AtomicU64, - pub switches: AtomicU64, -} - -struct IpiTask { - outstanding: AtomicU64, - func: Box, -} - -pub struct Processor { - pub arch: ArchProcessor, - sched: Spinlock, - running: AtomicBool, - topology_path: Once>, - pub id: u32, - bsp_id: u32, - pub idle_thread: Once, - pub load: AtomicU64, - pub stats: ProcessorStats, - ipi_tasks: Spinlock>>, - exited: Spinlock>, -} - -const NR_QUEUES: usize = 32; -#[derive(Default)] -pub struct SchedulingQueues { - pub queues: [LinkedList; NR_QUEUES], - pub last_chosen_priority: Option, -} - -intrusive_adapter!(pub SchedLinkAdapter = ThreadRef: Thread { sched_link: intrusive_collections::linked_list::AtomicLink }); - -pub struct SchedLockGuard<'a> { - queues: LockGuard<'a, SchedulingQueues, SpinLoop>, -} - -impl core::ops::Deref for SchedLockGuard<'_> { - type Target = SchedulingQueues; - fn deref(&self) -> &Self::Target { - &*self.queues - } -} - -impl core::ops::DerefMut for SchedLockGuard<'_> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut *self.queues - } -} - -impl Drop for SchedLockGuard<'_> { - fn drop(&mut self) { - current_thread_ref().map(|c| c.exit_critical()); - } -} - -impl SchedulingQueues { - pub fn reinsert_thread(&mut self, thread: ThreadRef) -> bool { - let queue_number = thread.queue_number::(); - let needs_preempt = if let Some(ref last) = self.last_chosen_priority { - last < &thread.effective_priority() - } else { - false - }; - self.queues[queue_number].push_back(thread); - needs_preempt - } - - pub fn check_priority_change(&mut self, thread: &Thread) -> bool { - for i in 0..NR_QUEUES { - let queue = &mut self.queues[i]; - - let mut cursor = queue.front_mut(); - while let Some(item) = cursor.get() { - if item.id() == thread.id() { - let item = cursor.remove().unwrap(); - drop(cursor); - return self.reinsert_thread(item); - } - cursor.move_next(); - } - } - false - } - - pub fn get_min_non_empty(&self) -> usize { - for i in 0..NR_QUEUES { - if !self.queues[i].is_empty() { - return i; - } - } - NR_QUEUES - } - - pub fn has_work(&self) -> bool { - self.get_min_non_empty() != NR_QUEUES || self.last_chosen_priority.is_some() - } - - pub fn should_preempt(&self, pri: &Priority, eq: bool) -> bool { - let q = pri.queue_number::(); - let m = self.get_min_non_empty(); - let c = self - .last_chosen_priority - .as_ref() - .map_or(NR_QUEUES, |p| p.queue_number::()); - if eq { - q <= m || q <= c - } else { - q < m || q < c - } - } - - pub fn has_higher_priority(&self, pri: Option<&Priority>) -> bool { - let q = self.get_min_non_empty(); - if let Some(pri) = pri { - let highest = Priority::from_queue_number::(q); - &highest > pri - || self - .last_chosen_priority - .as_ref() - .map_or(false, |last| last > pri) - } else { - q < NR_QUEUES || self.last_chosen_priority.is_some() - } - } - - pub fn choose_next(&mut self, for_self: bool) -> Option { - for queue in &mut self.queues { - if !queue.is_empty() { - let choice = queue.pop_front(); - if for_self { - self.last_chosen_priority = choice.as_ref().map(|c| c.effective_priority()); - } - return choice; - } - } - if for_self { - self.last_chosen_priority = None; - } - None - } -} - -impl Processor { - pub fn new(id: u32, bsp_id: u32) -> Self { - Self { - arch: ArchProcessor::default(), - sched: Spinlock::new(Default::default()), - running: AtomicBool::new(false), - topology_path: Once::new(), - id, - bsp_id, - idle_thread: Once::new(), - load: AtomicU64::new(1), - stats: ProcessorStats::default(), - ipi_tasks: Spinlock::new(Vec::new()), - exited: Spinlock::new(Vec::new()), - } - } - - pub fn is_bsp(&self) -> bool { - self.id == self.bsp_id - } - - pub fn bsp_id(&self) -> u32 { - self.bsp_id - } - - pub fn schedlock(&self) -> SchedLockGuard { - current_thread_ref().map(|c| c.enter_critical_unguarded()); - let queues = self.sched.lock(); - SchedLockGuard { queues } - } - - pub fn current_priority(&self) -> Priority { - /* TODO: optimize this by just keeping track of it outside the sched? */ - let sched = self.schedlock(); - let queue_pri = Priority::from_queue_number::(sched.get_min_non_empty()); - if let Some(ref pri) = sched.last_chosen_priority { - core::cmp::max(queue_pri, pri.clone()) - } else { - queue_pri - } - } - - pub fn current_load(&self) -> u64 { - self.load.load(Ordering::SeqCst) - } - - fn set_topology(&self, topo_path: Vec<(usize, bool)>) { - self.topology_path.call_once(|| topo_path); - } - - fn set_running(&self) { - self.running - .store(true, core::sync::atomic::Ordering::SeqCst); - } - - fn is_running(&self) -> bool { - self.running.load(Ordering::SeqCst) - } - - pub fn set_idle_thread(&self, idle: ThreadRef) { - self.idle_thread.call_once(|| idle); - } - - fn enqueue_ipi_task(&self, task: Arc) { - task.outstanding.fetch_add(1, Ordering::SeqCst); - self.ipi_tasks.lock().push(task); - } - - fn run_ipi_tasks(&self) { - let mut tasks = self.ipi_tasks.lock(); - for task in tasks.drain(..) { - (task.func)(); - task.outstanding.fetch_sub(1, Ordering::Release); - } - } - - pub fn push_exited(&self, th: ThreadRef) { - self.exited.lock().push(th); - } - - pub fn cleanup_exited(&self) { - let item = self.exited.lock().pop(); - drop(item); - } -} - -const MAX_CPU_ID: usize = 1024; - -pub fn current_processor() -> &'static Processor { - if !tls_ready() { - panic!("tried to read a thread-local value with no FS base set"); - } - unsafe { CURRENT_PROCESSOR.as_ref() }.unwrap() -} - -const INIT: Option> = None; -static mut ALL_PROCESSORS: [Option>; MAX_CPU_ID + 1] = [INIT; MAX_CPU_ID + 1]; - -pub fn get_processor(id: u32) -> &'static Processor { - unsafe { ALL_PROCESSORS[id as usize].as_ref().unwrap() } -} - -/// Obtain a mutable reference to a processor object. This should not be called unless -/// you know what you are doing. Generally during the boostrap process. -pub unsafe fn get_processor_mut(id: u32) -> &'static mut Processor { - ALL_PROCESSORS[id as usize].as_mut().unwrap() -} - -pub fn with_each_active_processor(mut f: impl FnMut(&'static Processor)) { - for p in unsafe { &ALL_PROCESSORS } { - if let Some(p) = p { - if p.is_running() { - f(p) - } - } - } -} - -#[inline] -pub fn tls_ready() -> bool { - crate::arch::processor::tls_ready() -} - -pub const KERNEL_STACK_SIZE: usize = 81920; - -pub fn init_cpu(tls_template: TlsInfo, bsp_id: u32) { - let tcb_base = crate::arch::image::init_tls(tls_template); - crate::arch::processor::init(tcb_base); - unsafe { - BOOT_KERNEL_STACK = 0xfffffff000001000u64 as *mut u8; //TODO: get this from bootloader config? - CPU_ID = bsp_id; - CURRENT_PROCESSOR = &**ALL_PROCESSORS[CPU_ID as usize].as_ref().unwrap(); - } - let topo_path = arch::processor::get_topology(); - current_processor().set_topology(topo_path); -} - -pub static NR_CPUS: AtomicUsize = AtomicUsize::new(1); - -static CPU_MAIN_BARRIER: AtomicBool = AtomicBool::new(false); -pub fn secondary_entry(id: u32, tcb_base: VirtAddr, kernel_stack_base: *mut u8) -> ! { - crate::arch::processor::init(tcb_base); - unsafe { - BOOT_KERNEL_STACK = kernel_stack_base; - CPU_ID = id; - CURRENT_PROCESSOR = &**ALL_PROCESSORS[id as usize].as_ref().unwrap(); - } - arch::init_secondary(); - let topo_path = arch::processor::get_topology(); - current_processor().set_topology(topo_path); - current_processor() - .running - .store(true, core::sync::atomic::Ordering::SeqCst); - NR_CPUS.fetch_add(1, Ordering::SeqCst); - while !CPU_MAIN_BARRIER.load(core::sync::atomic::Ordering::SeqCst) {} - crate::init_threading(); -} - -fn start_secondary_cpu(cpu: u32, tls_template: TlsInfo) { - if cpu == 0 { - panic!("TODO: we currently assume the bootstrap processor gets ID 0"); - } - let tcb_base = crate::arch::image::init_tls(tls_template); - /* TODO: dedicated kernel stack allocator, with guard page support */ - let kernel_stack = unsafe { - let layout = Layout::from_size_align(KERNEL_STACK_SIZE, 16).unwrap(); - alloc::alloc::alloc_zeroed(layout) - }; - - //logln!("poking cpu {} {:?} {:?}", cpu, tcb_base, kernel_stack); - unsafe { - crate::arch::poke_cpu(cpu, tcb_base, kernel_stack); - } -} - -pub fn boot_all_secondaries(tls_template: TlsInfo) { - for p in unsafe { &ALL_PROCESSORS }.iter().flatten() { - if !p.running.load(core::sync::atomic::Ordering::SeqCst) { - start_secondary_cpu(p.id, tls_template); - } - while !p.running.load(core::sync::atomic::Ordering::SeqCst) { - // We can safely spin-loop here because we are in kernel initialization. - core::hint::spin_loop(); - } - } - - let mut cpu_topo_root = CPUTopoNode::new(CPUTopoType::System); - for p in unsafe { &ALL_PROCESSORS }.iter().flatten() { - let topo_path = p.topology_path.wait(); - cpu_topo_root.set_cpu(p.id); - let mut level = &mut cpu_topo_root; - for (path, is_thread) in topo_path { - let mut child = level.child_mut(*path); - if child.is_none() { - let ty = if *is_thread { - CPUTopoType::Thread - } else { - CPUTopoType::Cache - }; - level.add_child(*path, CPUTopoNode::new(ty)); - child = level.child_mut(*path); - } - - let child = child.unwrap(); - - child.set_cpu(p.id); - - let next = level.child_mut(*path); - level = next.unwrap(); - } - } - crate::sched::set_cpu_topology(cpu_topo_root); - CPU_MAIN_BARRIER.store(true, core::sync::atomic::Ordering::SeqCst); - crate::memory::prep_smp(); -} - -pub fn register(id: u32, bsp_id: u32) { - if id as usize >= unsafe { &ALL_PROCESSORS }.len() { - unimplemented!("processor ID too large"); - } - - unsafe { - ALL_PROCESSORS[id as usize] = Some(Box::new(Processor::new(id, bsp_id))); - if id == bsp_id { - ALL_PROCESSORS[id as usize].as_ref().unwrap().set_running(); - } - } -} - -fn enqueue_ipi_task_many(incl_self: bool, task: &Arc) { - let current = current_processor(); - for p in unsafe { &ALL_PROCESSORS }.iter().flatten() { - if p.id != current.id || incl_self { - p.enqueue_ipi_task(task.clone()); - } - } -} - -/// Run a closure on some set of CPUs, waiting for all invocations to complete. -pub fn ipi_exec(target: Destination, f: Box) { - if current_thread_ref().is_none() { - return; - } - let task = Arc::new(IpiTask { - outstanding: AtomicU64::new(0), - func: f, - }); - - // We need to disable interrupts to prevent our current CPU from changing until we've submitted the IPIs. - let int_state = interrupt::disable(); - let current = current_processor(); - match target { - // Lowest priority doesn't really make sense in IPIs, so we just pretend it goes to BSP. - Destination::Bsp | Destination::LowestPriority => { - get_processor(current.bsp_id()).enqueue_ipi_task(task.clone()); - } - Destination::Single(id) => { - let proc = get_processor(id); - if !proc.is_running() { - logln!("tried to send IPI to non-running CPU"); - interrupt::set(int_state); - return; - } - if proc.id == current.id { - // We are the only recipients, so just run the closure. - (task.func)(); - interrupt::set(int_state); - return; - } - proc.enqueue_ipi_task(task.clone()); - } - Destination::AllButSelf => enqueue_ipi_task_many(false, &task), - Destination::All => enqueue_ipi_task_many(true, &task), - } - - // No point using the IPI hardware to send ourselves a message, so just run it manually if current CPU is included. - let (target, target_self) = match target { - Destination::All => (Destination::AllButSelf, true), - x => (x, false), - }; - arch::send_ipi(target, GENERIC_IPI_VECTOR); - - if target_self { - current.run_ipi_tasks(); - } - - // We can take interrupts while we wait for other CPUs to execute. - interrupt::set(int_state); - - spin_wait_until( - || { - if task.outstanding.load(Ordering::SeqCst) != 0 { - None - } else { - Some(()) - } - }, - || { - if !int_state { - current.run_ipi_tasks(); - } - }, - ); - - core::sync::atomic::fence(Ordering::SeqCst); -} - -pub fn generic_ipi_handler() { - let current = current_processor(); - current.run_ipi_tasks(); - core::sync::atomic::fence(Ordering::SeqCst); -} - -/// Spin waits while a condition (cond) is true, regularly running architecture-dependent spin-wait code along with the provided -/// pause function. The cond function should not mutate state, and it should be fast (ideally reading a single, perhaps atomic, -/// memory value + a comparison). The pause function, on the other hand, can be heavier-weight, and may do arbitrary work (within -/// the context of the caller). The cond function will be called some multiple of times between calls to pause, and if cond returns -/// false, then this function immediately returns. The [core::hint::spin_loop] function is called between calls to cond. -pub fn spin_wait_until(until: impl Fn() -> Option, mut pause: impl FnMut()) -> R { - const NR_SPIN_LOOPS: usize = 100; - loop { - for _ in 0..NR_SPIN_LOOPS { - if let Some(ret) = until() { - return ret; - } - core::hint::spin_loop(); - } - arch::processor::spin_wait_iteration(); - pause(); - } -} - -#[cfg(test)] -mod test { - use core::sync::atomic::{AtomicUsize, Ordering}; - - use alloc::{boxed::Box, sync::Arc}; - use twizzler_kernel_macros::kernel_test; - - use crate::interrupt::Destination; - - use super::ALL_PROCESSORS; - - const NR_IPI_TEST_ITERS: usize = 1000; - #[kernel_test] - fn ipi_test() { - for _ in 0..NR_IPI_TEST_ITERS { - let nr_cpus = unsafe { &ALL_PROCESSORS }.iter().flatten().count(); - let counter = Arc::new(AtomicUsize::new(0)); - let counter2 = counter.clone(); - super::ipi_exec( - Destination::All, - Box::new(move || { - counter2.fetch_add(1, Ordering::SeqCst); - }), - ); - assert_eq!(nr_cpus, counter.load(Ordering::SeqCst)); - - let counter = Arc::new(AtomicUsize::new(0)); - let counter2 = counter.clone(); - super::ipi_exec( - Destination::AllButSelf, - Box::new(move || { - counter2.fetch_add(1, Ordering::SeqCst); - }), - ); - assert_eq!(nr_cpus, counter.load(Ordering::SeqCst) + 1); - - let counter = Arc::new(AtomicUsize::new(0)); - let counter2 = counter.clone(); - super::ipi_exec( - Destination::Bsp, - Box::new(move || { - counter2.fetch_add(1, Ordering::SeqCst); - }), - ); - assert_eq!(1, counter.load(Ordering::SeqCst)); - - let counter = Arc::new(AtomicUsize::new(0)); - let counter2 = counter.clone(); - super::ipi_exec( - Destination::Single(0), - Box::new(move || { - counter2.fetch_add(1, Ordering::SeqCst); - }), - ); - assert_eq!(1, counter.load(Ordering::SeqCst)); - - let counter = Arc::new(AtomicUsize::new(0)); - let counter2 = counter.clone(); - super::ipi_exec( - Destination::LowestPriority, - Box::new(move || { - counter2.fetch_add(1, Ordering::SeqCst); - }), - ); - assert_eq!(1, counter.load(Ordering::SeqCst)); - } - } -} +use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use core::{ + alloc::Layout, + ptr::null_mut, + sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, +}; + +use intrusive_collections::{intrusive_adapter, LinkedList}; + +use crate::{ + arch::{self, interrupt::GENERIC_IPI_VECTOR, processor::ArchProcessor}, + image::TlsInfo, + interrupt::{self, Destination}, + memory::VirtAddr, + once::Once, + sched::{CPUTopoNode, CPUTopoType}, + spinlock::{LockGuard, SpinLoop, Spinlock}, + thread::{current_thread_ref, priority::Priority, Thread, ThreadRef}, +}; + +#[thread_local] +static mut BOOT_KERNEL_STACK: *mut u8 = core::ptr::null_mut(); + +#[thread_local] +static mut CPU_ID: u32 = 0; + +#[thread_local] +static mut CURRENT_PROCESSOR: *const Processor = null_mut(); + +#[derive(Debug, Default)] +pub struct ProcessorStats { + pub preempts: AtomicU64, + pub wakeups: AtomicU64, + pub steals: AtomicU64, + pub idle: AtomicU64, + pub non_idle: AtomicU64, + pub hardticks: AtomicU64, + pub switches: AtomicU64, +} + +struct IpiTask { + outstanding: AtomicU64, + func: Box, +} + +pub struct Processor { + pub arch: ArchProcessor, + sched: Spinlock, + running: AtomicBool, + topology_path: Once>, + pub id: u32, + bsp_id: u32, + pub idle_thread: Once, + pub load: AtomicU64, + pub stats: ProcessorStats, + ipi_tasks: Spinlock>>, + exited: Spinlock>, +} + +const NR_QUEUES: usize = 32; +#[derive(Default)] +pub struct SchedulingQueues { + pub queues: [LinkedList; NR_QUEUES], + pub last_chosen_priority: Option, +} + +intrusive_adapter!(pub SchedLinkAdapter = ThreadRef: Thread { sched_link: intrusive_collections::linked_list::AtomicLink }); + +pub struct SchedLockGuard<'a> { + queues: LockGuard<'a, SchedulingQueues, SpinLoop>, +} + +impl core::ops::Deref for SchedLockGuard<'_> { + type Target = SchedulingQueues; + fn deref(&self) -> &Self::Target { + &*self.queues + } +} + +impl core::ops::DerefMut for SchedLockGuard<'_> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut *self.queues + } +} + +impl Drop for SchedLockGuard<'_> { + fn drop(&mut self) { + current_thread_ref().map(|c| c.exit_critical()); + } +} + +impl SchedulingQueues { + pub fn reinsert_thread(&mut self, thread: ThreadRef) -> bool { + let queue_number = thread.queue_number::(); + let needs_preempt = if let Some(ref last) = self.last_chosen_priority { + last < &thread.effective_priority() + } else { + false + }; + self.queues[queue_number].push_back(thread); + needs_preempt + } + + pub fn check_priority_change(&mut self, thread: &Thread) -> bool { + for i in 0..NR_QUEUES { + let queue = &mut self.queues[i]; + + let mut cursor = queue.front_mut(); + while let Some(item) = cursor.get() { + if item.id() == thread.id() { + let item = cursor.remove().unwrap(); + drop(cursor); + return self.reinsert_thread(item); + } + cursor.move_next(); + } + } + false + } + + pub fn get_min_non_empty(&self) -> usize { + for i in 0..NR_QUEUES { + if !self.queues[i].is_empty() { + return i; + } + } + NR_QUEUES + } + + pub fn has_work(&self) -> bool { + self.get_min_non_empty() != NR_QUEUES || self.last_chosen_priority.is_some() + } + + pub fn should_preempt(&self, pri: &Priority, eq: bool) -> bool { + let q = pri.queue_number::(); + let m = self.get_min_non_empty(); + let c = self + .last_chosen_priority + .as_ref() + .map_or(NR_QUEUES, |p| p.queue_number::()); + if eq { + q <= m || q <= c + } else { + q < m || q < c + } + } + + pub fn has_higher_priority(&self, pri: Option<&Priority>) -> bool { + let q = self.get_min_non_empty(); + if let Some(pri) = pri { + let highest = Priority::from_queue_number::(q); + &highest > pri + || self + .last_chosen_priority + .as_ref() + .map_or(false, |last| last > pri) + } else { + q < NR_QUEUES || self.last_chosen_priority.is_some() + } + } + + pub fn choose_next(&mut self, for_self: bool) -> Option { + for queue in &mut self.queues { + if !queue.is_empty() { + let choice = queue.pop_front(); + if for_self { + self.last_chosen_priority = choice.as_ref().map(|c| c.effective_priority()); + } + return choice; + } + } + if for_self { + self.last_chosen_priority = None; + } + None + } +} + +impl Processor { + pub fn new(id: u32, bsp_id: u32) -> Self { + Self { + arch: ArchProcessor::default(), + sched: Spinlock::new(Default::default()), + running: AtomicBool::new(false), + topology_path: Once::new(), + id, + bsp_id, + idle_thread: Once::new(), + load: AtomicU64::new(1), + stats: ProcessorStats::default(), + ipi_tasks: Spinlock::new(Vec::new()), + exited: Spinlock::new(Vec::new()), + } + } + + pub fn is_bsp(&self) -> bool { + self.id == self.bsp_id + } + + pub fn bsp_id(&self) -> u32 { + self.bsp_id + } + + pub fn schedlock(&self) -> SchedLockGuard { + current_thread_ref().map(|c| c.enter_critical_unguarded()); + let queues = self.sched.lock(); + SchedLockGuard { queues } + } + + pub fn current_priority(&self) -> Priority { + /* TODO: optimize this by just keeping track of it outside the sched? */ + let sched = self.schedlock(); + let queue_pri = Priority::from_queue_number::(sched.get_min_non_empty()); + if let Some(ref pri) = sched.last_chosen_priority { + core::cmp::max(queue_pri, pri.clone()) + } else { + queue_pri + } + } + + pub fn current_load(&self) -> u64 { + self.load.load(Ordering::SeqCst) + } + + fn set_topology(&self, topo_path: Vec<(usize, bool)>) { + self.topology_path.call_once(|| topo_path); + } + + fn set_running(&self) { + self.running + .store(true, core::sync::atomic::Ordering::SeqCst); + } + + fn is_running(&self) -> bool { + self.running.load(Ordering::SeqCst) + } + + pub fn set_idle_thread(&self, idle: ThreadRef) { + self.idle_thread.call_once(|| idle); + } + + fn enqueue_ipi_task(&self, task: Arc) { + task.outstanding.fetch_add(1, Ordering::SeqCst); + self.ipi_tasks.lock().push(task); + } + + fn run_ipi_tasks(&self) { + let mut tasks = self.ipi_tasks.lock(); + for task in tasks.drain(..) { + (task.func)(); + task.outstanding.fetch_sub(1, Ordering::Release); + } + } + + pub fn push_exited(&self, th: ThreadRef) { + self.exited.lock().push(th); + } + + pub fn cleanup_exited(&self) { + let item = self.exited.lock().pop(); + drop(item); + } +} + +const MAX_CPU_ID: usize = 1024; + +pub fn current_processor() -> &'static Processor { + if !tls_ready() { + panic!("tried to read a thread-local value with no FS base set"); + } + unsafe { CURRENT_PROCESSOR.as_ref() }.unwrap() +} + +const INIT: Option> = None; +static mut ALL_PROCESSORS: [Option>; MAX_CPU_ID + 1] = [INIT; MAX_CPU_ID + 1]; + +pub fn get_processor(id: u32) -> &'static Processor { + unsafe { ALL_PROCESSORS[id as usize].as_ref().unwrap() } +} + +/// Obtain a mutable reference to a processor object. This should not be called unless +/// you know what you are doing. Generally during the boostrap process. +pub unsafe fn get_processor_mut(id: u32) -> &'static mut Processor { + ALL_PROCESSORS[id as usize].as_mut().unwrap() +} + +pub fn with_each_active_processor(mut f: impl FnMut(&'static Processor)) { + for p in unsafe { &ALL_PROCESSORS } { + if let Some(p) = p { + if p.is_running() { + f(p) + } + } + } +} + +#[inline] +pub fn tls_ready() -> bool { + crate::arch::processor::tls_ready() +} + +pub const KERNEL_STACK_SIZE: usize = 81920; + +pub fn init_cpu(tls_template: TlsInfo, bsp_id: u32) { + let tcb_base = crate::arch::image::init_tls(tls_template); + crate::arch::processor::init(tcb_base); + unsafe { + BOOT_KERNEL_STACK = 0xfffffff000001000u64 as *mut u8; //TODO: get this from bootloader config? + CPU_ID = bsp_id; + CURRENT_PROCESSOR = &**ALL_PROCESSORS[CPU_ID as usize].as_ref().unwrap(); + } + let topo_path = arch::processor::get_topology(); + current_processor().set_topology(topo_path); +} + +pub static NR_CPUS: AtomicUsize = AtomicUsize::new(1); + +static CPU_MAIN_BARRIER: AtomicBool = AtomicBool::new(false); +pub fn secondary_entry(id: u32, tcb_base: VirtAddr, kernel_stack_base: *mut u8) -> ! { + crate::arch::processor::init(tcb_base); + unsafe { + BOOT_KERNEL_STACK = kernel_stack_base; + CPU_ID = id; + CURRENT_PROCESSOR = &**ALL_PROCESSORS[id as usize].as_ref().unwrap(); + } + arch::init_secondary(); + let topo_path = arch::processor::get_topology(); + current_processor().set_topology(topo_path); + current_processor() + .running + .store(true, core::sync::atomic::Ordering::SeqCst); + NR_CPUS.fetch_add(1, Ordering::SeqCst); + while !CPU_MAIN_BARRIER.load(core::sync::atomic::Ordering::SeqCst) {} + crate::init_threading(); +} + +fn start_secondary_cpu(cpu: u32, tls_template: TlsInfo) { + if cpu == 0 { + panic!("TODO: we currently assume the bootstrap processor gets ID 0"); + } + let tcb_base = crate::arch::image::init_tls(tls_template); + /* TODO: dedicated kernel stack allocator, with guard page support */ + let kernel_stack = unsafe { + let layout = Layout::from_size_align(KERNEL_STACK_SIZE, 16).unwrap(); + alloc::alloc::alloc_zeroed(layout) + }; + + //logln!("poking cpu {} {:?} {:?}", cpu, tcb_base, kernel_stack); + unsafe { + crate::arch::poke_cpu(cpu, tcb_base, kernel_stack); + } +} + +pub fn boot_all_secondaries(tls_template: TlsInfo) { + for p in unsafe { &ALL_PROCESSORS }.iter().flatten() { + if !p.running.load(core::sync::atomic::Ordering::SeqCst) { + start_secondary_cpu(p.id, tls_template); + } + while !p.running.load(core::sync::atomic::Ordering::SeqCst) { + // We can safely spin-loop here because we are in kernel initialization. + core::hint::spin_loop(); + } + } + + let mut cpu_topo_root = CPUTopoNode::new(CPUTopoType::System); + for p in unsafe { &ALL_PROCESSORS }.iter().flatten() { + let topo_path = p.topology_path.wait(); + cpu_topo_root.set_cpu(p.id); + let mut level = &mut cpu_topo_root; + for (path, is_thread) in topo_path { + let mut child = level.child_mut(*path); + if child.is_none() { + let ty = if *is_thread { + CPUTopoType::Thread + } else { + CPUTopoType::Cache + }; + level.add_child(*path, CPUTopoNode::new(ty)); + child = level.child_mut(*path); + } + + let child = child.unwrap(); + + child.set_cpu(p.id); + + let next = level.child_mut(*path); + level = next.unwrap(); + } + } + crate::sched::set_cpu_topology(cpu_topo_root); + CPU_MAIN_BARRIER.store(true, core::sync::atomic::Ordering::SeqCst); + crate::memory::prep_smp(); +} + +pub fn register(id: u32, bsp_id: u32) { + if id as usize >= unsafe { &ALL_PROCESSORS }.len() { + unimplemented!("processor ID too large"); + } + + unsafe { + ALL_PROCESSORS[id as usize] = Some(Box::new(Processor::new(id, bsp_id))); + if id == bsp_id { + ALL_PROCESSORS[id as usize].as_ref().unwrap().set_running(); + } + } +} + +fn enqueue_ipi_task_many(incl_self: bool, task: &Arc) { + let current = current_processor(); + for p in unsafe { &ALL_PROCESSORS }.iter().flatten() { + if p.id != current.id || incl_self { + p.enqueue_ipi_task(task.clone()); + } + } +} + +/// Run a closure on some set of CPUs, waiting for all invocations to complete. +pub fn ipi_exec(target: Destination, f: Box) { + if current_thread_ref().is_none() { + return; + } + let task = Arc::new(IpiTask { + outstanding: AtomicU64::new(0), + func: f, + }); + + // We need to disable interrupts to prevent our current CPU from changing until we've submitted + // the IPIs. + let int_state = interrupt::disable(); + let current = current_processor(); + match target { + // Lowest priority doesn't really make sense in IPIs, so we just pretend it goes to BSP. + Destination::Bsp | Destination::LowestPriority => { + get_processor(current.bsp_id()).enqueue_ipi_task(task.clone()); + } + Destination::Single(id) => { + let proc = get_processor(id); + if !proc.is_running() { + logln!("tried to send IPI to non-running CPU"); + interrupt::set(int_state); + return; + } + if proc.id == current.id { + // We are the only recipients, so just run the closure. + (task.func)(); + interrupt::set(int_state); + return; + } + proc.enqueue_ipi_task(task.clone()); + } + Destination::AllButSelf => enqueue_ipi_task_many(false, &task), + Destination::All => enqueue_ipi_task_many(true, &task), + } + + // No point using the IPI hardware to send ourselves a message, so just run it manually if + // current CPU is included. + let (target, target_self) = match target { + Destination::All => (Destination::AllButSelf, true), + x => (x, false), + }; + arch::send_ipi(target, GENERIC_IPI_VECTOR); + + if target_self { + current.run_ipi_tasks(); + } + + // We can take interrupts while we wait for other CPUs to execute. + interrupt::set(int_state); + + spin_wait_until( + || { + if task.outstanding.load(Ordering::SeqCst) != 0 { + None + } else { + Some(()) + } + }, + || { + if !int_state { + current.run_ipi_tasks(); + } + }, + ); + + core::sync::atomic::fence(Ordering::SeqCst); +} + +pub fn generic_ipi_handler() { + let current = current_processor(); + current.run_ipi_tasks(); + core::sync::atomic::fence(Ordering::SeqCst); +} + +/// Spin waits while a condition (cond) is true, regularly running architecture-dependent spin-wait +/// code along with the provided pause function. The cond function should not mutate state, and it +/// should be fast (ideally reading a single, perhaps atomic, memory value + a comparison). The +/// pause function, on the other hand, can be heavier-weight, and may do arbitrary work (within +/// the context of the caller). The cond function will be called some multiple of times between +/// calls to pause, and if cond returns false, then this function immediately returns. The +/// [core::hint::spin_loop] function is called between calls to cond. +pub fn spin_wait_until(until: impl Fn() -> Option, mut pause: impl FnMut()) -> R { + const NR_SPIN_LOOPS: usize = 100; + loop { + for _ in 0..NR_SPIN_LOOPS { + if let Some(ret) = until() { + return ret; + } + core::hint::spin_loop(); + } + arch::processor::spin_wait_iteration(); + pause(); + } +} + +#[cfg(test)] +mod test { + use alloc::{boxed::Box, sync::Arc}; + use core::sync::atomic::{AtomicUsize, Ordering}; + + use twizzler_kernel_macros::kernel_test; + + use super::ALL_PROCESSORS; + use crate::interrupt::Destination; + + const NR_IPI_TEST_ITERS: usize = 1000; + #[kernel_test] + fn ipi_test() { + for _ in 0..NR_IPI_TEST_ITERS { + let nr_cpus = unsafe { &ALL_PROCESSORS }.iter().flatten().count(); + let counter = Arc::new(AtomicUsize::new(0)); + let counter2 = counter.clone(); + super::ipi_exec( + Destination::All, + Box::new(move || { + counter2.fetch_add(1, Ordering::SeqCst); + }), + ); + assert_eq!(nr_cpus, counter.load(Ordering::SeqCst)); + + let counter = Arc::new(AtomicUsize::new(0)); + let counter2 = counter.clone(); + super::ipi_exec( + Destination::AllButSelf, + Box::new(move || { + counter2.fetch_add(1, Ordering::SeqCst); + }), + ); + assert_eq!(nr_cpus, counter.load(Ordering::SeqCst) + 1); + + let counter = Arc::new(AtomicUsize::new(0)); + let counter2 = counter.clone(); + super::ipi_exec( + Destination::Bsp, + Box::new(move || { + counter2.fetch_add(1, Ordering::SeqCst); + }), + ); + assert_eq!(1, counter.load(Ordering::SeqCst)); + + let counter = Arc::new(AtomicUsize::new(0)); + let counter2 = counter.clone(); + super::ipi_exec( + Destination::Single(0), + Box::new(move || { + counter2.fetch_add(1, Ordering::SeqCst); + }), + ); + assert_eq!(1, counter.load(Ordering::SeqCst)); + + let counter = Arc::new(AtomicUsize::new(0)); + let counter2 = counter.clone(); + super::ipi_exec( + Destination::LowestPriority, + Box::new(move || { + counter2.fetch_add(1, Ordering::SeqCst); + }), + ); + assert_eq!(1, counter.load(Ordering::SeqCst)); + } + } +} diff --git a/src/kernel/src/queue.rs b/src/kernel/src/queue.rs index 2c930de2..c797443b 100644 --- a/src/kernel/src/queue.rs +++ b/src/kernel/src/queue.rs @@ -1,6 +1,6 @@ +use alloc::{collections::BTreeMap, sync::Arc, vec::Vec}; use core::sync::atomic::{AtomicBool, Ordering}; -use alloc::{collections::BTreeMap, sync::Arc, vec::Vec}; use twizzler_abi::{ device::CacheType, object::Protections, diff --git a/src/kernel/src/sched.rs b/src/kernel/src/sched.rs index 55856a13..891cbd5d 100644 --- a/src/kernel/src/sched.rs +++ b/src/kernel/src/sched.rs @@ -1,6 +1,6 @@ +use alloc::{collections::BTreeMap, sync::Arc, vec::Vec}; use core::sync::atomic::{AtomicBool, AtomicPtr, AtomicU64, Ordering}; -use alloc::{collections::BTreeMap, sync::Arc, vec::Vec}; use fixedbitset::FixedBitSet; use twizzler_abi::thread::ExecutionState; @@ -10,12 +10,10 @@ use crate::{ once::Once, processor::{current_processor, get_processor, Processor}, spinlock::Spinlock, - thread::{current_thread_ref, set_current_thread, Thread, ThreadRef}, + thread::{current_thread_ref, priority::Priority, set_current_thread, Thread, ThreadRef}, utils::quick_random, }; -use crate::thread::priority::Priority; - #[derive(Clone, Debug, Copy)] pub enum CPUTopoType { System, diff --git a/src/kernel/src/security.rs b/src/kernel/src/security.rs index 446ae3db..998db58b 100644 --- a/src/kernel/src/security.rs +++ b/src/kernel/src/security.rs @@ -1,4 +1,5 @@ use alloc::{collections::BTreeMap, sync::Arc}; + use lazy_static::lazy_static; use twizzler_abi::{ object::{ObjID, Protections}, @@ -88,7 +89,8 @@ impl SecCtxMgr { self.inner.lock().active.clone() } - /// Get the active ID. This is faster than active().id() and doesn't allocate memory (and only uses a spinlock). + /// Get the active ID. This is faster than active().id() and doesn't allocate memory (and only + /// uses a spinlock). pub fn active_id(&self) -> ObjID { *self.active_id.lock() } @@ -203,8 +205,9 @@ impl Drop for SecCtxMgr { fn drop(&mut self) { let mut global = GLOBAL_SECCTX_MGR.contexts.lock(); let inner = self.inner.lock(); - // Check the contexts we have a reference to. If the value is 2, then it's only us and the global mgr that have a ref. - // Since we hold the global mgr lock, this will not get incremented if no one else holds a ref. + // Check the contexts we have a reference to. If the value is 2, then it's only us and the + // global mgr that have a ref. Since we hold the global mgr lock, this will not get + // incremented if no one else holds a ref. for ctx in inner.inactive.values() { if ctx.id() != KERNEL_SCTX && Arc::strong_count(ctx) == 2 { global.remove(&ctx.id()); diff --git a/src/kernel/src/syscall/mod.rs b/src/kernel/src/syscall/mod.rs index 26d2cf49..a061a10d 100644 --- a/src/kernel/src/syscall/mod.rs +++ b/src/kernel/src/syscall/mod.rs @@ -11,17 +11,16 @@ use twizzler_abi::{ }, }; -use crate::memory::VirtAddr; -use crate::time::TICK_SOURCES; -use crate::{ - clock::{fill_with_every_first, fill_with_first_kind, fill_with_kind}, - thread::current_thread_ref, -}; - use self::{ object::{sys_new_handle, sys_sctx_attach, sys_unbind_handle}, thread::thread_ctrl, }; +use crate::{ + clock::{fill_with_every_first, fill_with_first_kind, fill_with_kind}, + memory::VirtAddr, + thread::current_thread_ref, + time::TICK_SOURCES, +}; // TODO: move the handle stuff into its own file and make this private. pub mod object; diff --git a/src/kernel/src/syscall/object.rs b/src/kernel/src/syscall/object.rs index fc6eb57c..0f7bd795 100644 --- a/src/kernel/src/syscall/object.rs +++ b/src/kernel/src/syscall/object.rs @@ -2,6 +2,7 @@ use alloc::{ collections::{BTreeMap, BTreeSet}, sync::Arc, }; + use twizzler_abi::{ object::{ObjID, Protections}, syscall::{ diff --git a/src/kernel/src/syscall/sync.rs b/src/kernel/src/syscall/sync.rs index 3fe617fe..a20130f0 100644 --- a/src/kernel/src/syscall/sync.rs +++ b/src/kernel/src/syscall/sync.rs @@ -1,6 +1,6 @@ +use alloc::{collections::BTreeMap, vec::Vec}; use core::time::Duration; -use alloc::{collections::BTreeMap, vec::Vec}; use twizzler_abi::{ syscall::{ThreadSync, ThreadSyncError, ThreadSyncReference, ThreadSyncSleep, ThreadSyncWake}, thread::ExecutionState, diff --git a/src/kernel/src/thread.rs b/src/kernel/src/thread.rs index c7cdcc81..97640829 100644 --- a/src/kernel/src/thread.rs +++ b/src/kernel/src/thread.rs @@ -1,10 +1,10 @@ +use alloc::{boxed::Box, sync::Arc}; use core::{ alloc::Layout, cell::RefCell, sync::atomic::{AtomicI32, AtomicU32, AtomicU64, Ordering}, }; -use alloc::{boxed::Box, sync::Arc}; use intrusive_collections::{linked_list::AtomicLink, offset_of, RBTreeAtomicLink}; use twizzler_abi::{ object::{ObjID, NULLPAGE_SIZE}, @@ -13,6 +13,10 @@ use twizzler_abi::{ upcall::{UpcallFlags, UpcallInfo, UpcallMode, UpcallTarget, UPCALL_EXIT_CODE}, }; +use self::{ + flags::{THREAD_IN_KERNEL, THREAD_PROC_IDLE}, + priority::{Priority, PriorityClass}, +}; use crate::{ idcounter::{Id, IdCounter}, interrupt, @@ -23,11 +27,6 @@ use crate::{ spinlock::Spinlock, }; -use self::{ - flags::{THREAD_IN_KERNEL, THREAD_PROC_IDLE}, - priority::{Priority, PriorityClass}, -}; - pub mod entry; mod flags; pub mod priority; diff --git a/src/kernel/src/thread/entry.rs b/src/kernel/src/thread/entry.rs index f3e605fc..d7deedef 100644 --- a/src/kernel/src/thread/entry.rs +++ b/src/kernel/src/thread/entry.rs @@ -1,11 +1,12 @@ +use alloc::{boxed::Box, sync::Arc}; use core::mem::MaybeUninit; -use alloc::{boxed::Box, sync::Arc}; use twizzler_abi::{ object::ObjID, syscall::{ThreadSpawnArgs, ThreadSpawnError, ThreadSpawnFlags, UpcallTargetSpawnOption}, }; +use super::{current_memory_context, current_thread_ref, priority::Priority, Thread, ThreadRef}; use crate::{ condvar::CondVar, memory::{context::Context, VirtAddr}, @@ -16,8 +17,6 @@ use crate::{ userinit::user_init, }; -use super::{current_memory_context, current_thread_ref, priority::Priority, Thread, ThreadRef}; - extern "C" fn user_new_start() { let (entry, stack_base, stack_size, arg) = { /* we need this scope to drop the current thread ref before jumping to user */ @@ -122,8 +121,8 @@ struct KthreadArg { arg: usize, } -/// Run a closure on a new thread. Returns both the handle to the thread and also a handle that allows -/// the caller to wait for the result. +/// Run a closure on a new thread. Returns both the handle to the thread and also a handle that +/// allows the caller to wait for the result. pub fn run_closure_in_new_thread( pri: Priority, f: F, @@ -132,8 +131,8 @@ where F: (FnOnce() -> R) + Send, { let main = move |arg: usize| { - // Safety: this pointer is generated below by a call to Arc::into_raw, and is guaranteed to have a valid count by the - // code that generates this pointer. + // Safety: this pointer is generated below by a call to Arc::into_raw, and is guaranteed to + // have a valid count by the code that generates this pointer. let info = unsafe { Arc::from_raw(arg as *const KthreadClosure) }; // Take this out, but don't hold the lock when we run the closure. let closure = { info.closure.lock().take().unwrap() }; @@ -158,8 +157,8 @@ where signal: CondVar::new(), }); let raw = Arc::into_raw(info); - // Safety: manually increment the strong count so we can pass the raw pointer to the new thread. That - // thread will call Arc::from_raw, gaining a valid Arc. + // Safety: manually increment the strong count so we can pass the raw pointer to the new thread. + // That thread will call Arc::from_raw, gaining a valid Arc. unsafe { Arc::increment_strong_count(raw); } @@ -168,17 +167,18 @@ where arg: raw as usize, }); let thr = start_new_kernel(pri, trampoline, Box::into_raw(arg) as usize); - // Safety: this is our own Arc, from earlier, after we manually incremented the count on behalf of - // the receiving thread. + // Safety: this is our own Arc, from earlier, after we manually incremented the count on behalf + // of the receiving thread. let info = unsafe { Arc::from_raw(raw) }; (thr, info) } #[cfg(test)] mod test { - use crate::thread::Priority; use twizzler_kernel_macros::kernel_test; + use crate::thread::Priority; + #[kernel_test] fn test_closure() { let x = super::run_closure_in_new_thread(Priority::default_user(), || 42) diff --git a/src/kernel/src/thread/suspend.rs b/src/kernel/src/thread/suspend.rs index c340ba5b..05d8f87b 100644 --- a/src/kernel/src/thread/suspend.rs +++ b/src/kernel/src/thread/suspend.rs @@ -1,10 +1,14 @@ +use alloc::boxed::Box; use core::sync::atomic::Ordering; -use alloc::boxed::Box; use intrusive_collections::{intrusive_adapter, KeyAdapter, RBTree}; use lazy_static::lazy_static; use twizzler_abi::{object::ObjID, thread::ExecutionState}; +use super::{ + flags::{THREAD_IS_SUSPENDED, THREAD_MUST_SUSPEND}, + Thread, ThreadRef, +}; use crate::{ interrupt::Destination, processor::ipi_exec, @@ -13,11 +17,6 @@ use crate::{ thread::current_thread_ref, }; -use super::{ - flags::{THREAD_IS_SUSPENDED, THREAD_MUST_SUSPEND}, - Thread, ThreadRef, -}; - lazy_static! { static ref SUSPENDED_THREADS: Spinlock> = Spinlock::new(RBTree::new(SuspendNodeAdapter::new())); @@ -32,10 +31,11 @@ impl<'a> KeyAdapter<'a> for SuspendNodeAdapter { } impl Thread { - /// Tell a thread to suspend. If that thread is the caller, then suspend immediately unless in a critical section. - /// Otherwise, call out to other CPUs to - /// force the thread to suspend. Note that if the target is the calling thread, then it will have to be unsuspended before - /// it returns, and so will NOT be suspended upon completion of this call. + /// Tell a thread to suspend. If that thread is the caller, then suspend immediately unless in a + /// critical section. Otherwise, call out to other CPUs to + /// force the thread to suspend. Note that if the target is the calling thread, then it will + /// have to be unsuspended before it returns, and so will NOT be suspended upon completion + /// of this call. pub fn suspend(self: &ThreadRef) { self.flags.fetch_or(THREAD_MUST_SUSPEND, Ordering::SeqCst); if self == ¤t_thread_ref().unwrap() { @@ -61,11 +61,13 @@ impl Thread { return; } if self.flags.fetch_or(THREAD_IS_SUSPENDED, Ordering::SeqCst) & THREAD_IS_SUSPENDED != 0 { - // The only time we'll see this flag set is if we are coming out of a suspend. So, just return. + // The only time we'll see this flag set is if we are coming out of a suspend. So, just + // return. return; } { - // Do this before inserting the thread, to ensure no one writes Running here before we suspend. + // Do this before inserting the thread, to ensure no one writes Running here before we + // suspend. self.set_state(ExecutionState::Suspended); let mut suspended_threads = SUSPENDED_THREADS.lock(); assert!(suspended_threads.find(&self.objid()).is_null()); @@ -75,14 +77,16 @@ impl Thread { // goodnight! schedule(false); - // goodmorning! Clear the flags. This is one operation, so we'll never observe THREAD_IS_SUSPENDED without THREAD_MUST_SUSPEND. + // goodmorning! Clear the flags. This is one operation, so we'll never observe + // THREAD_IS_SUSPENDED without THREAD_MUST_SUSPEND. self.flags.fetch_and( !(THREAD_IS_SUSPENDED | THREAD_MUST_SUSPEND), Ordering::SeqCst, ); } - /// If a thread is suspended, then wake it up. Returns false if that thread was not on the suspend list. + /// If a thread is suspended, then wake it up. Returns false if that thread was not on the + /// suspend list. pub fn unsuspend_thread(self: &ThreadRef) -> bool { let mut suspended_threads = SUSPENDED_THREADS.lock(); if suspended_threads.find_mut(&self.objid()).remove().is_some() { @@ -96,12 +100,12 @@ impl Thread { } mod test { + use alloc::sync::Arc; use core::{ sync::atomic::{AtomicBool, Ordering}, time::Duration, }; - use alloc::sync::Arc; use twizzler_kernel_macros::kernel_test; use crate::{ diff --git a/src/kernel/src/userinit.rs b/src/kernel/src/userinit.rs index 42474eeb..e494d1e3 100644 --- a/src/kernel/src/userinit.rs +++ b/src/kernel/src/userinit.rs @@ -1,4 +1,5 @@ use alloc::sync::Arc; + use twizzler_abi::{ aux::{KernelInitInfo, KernelInitName}, object::Protections, diff --git a/src/kernel/src/utils.rs b/src/kernel/src/utils.rs index b557b7db..8f70b61e 100755 --- a/src/kernel/src/utils.rs +++ b/src/kernel/src/utils.rs @@ -1,79 +1,79 @@ -use crate::{ - mutex::{LockGuard, Mutex}, - processor::current_processor, - spinlock::{self, GenericSpinlock, RelaxStrategy}, -}; - -pub fn align + Into>(val: T, align: usize) -> T { - let val = val.into(); - if val == 0 { - return val.into(); - } - let res: usize = ((val - 1) & !(align - 1)) + align; - res.into() -} - -/// Lock two mutexes in a stable order such that no deadlock cycles are created. -/// -/// This is VITAL if you want to lock multiple mutexes for objects where you cannot -/// statically ensure ordering to avoid deadlock. It ensures that any two given mutexes -/// will be locked in the same order even if you permute the arguments to this function. -/// It does so by inspecting the addresses of the mutexes themselves to project a total -/// order onto the locks. -pub fn lock_two<'a, 'b, A, B>( - a: &'a Mutex, - b: &'b Mutex, -) -> (LockGuard<'a, A>, LockGuard<'b, B>) { - let a_val = a as *const Mutex as usize; - let b_val = b as *const Mutex as usize; - assert_ne!(a_val, b_val); - if a_val > b_val { - let lg_b = b.lock(); - let lg_a = a.lock(); - (lg_a, lg_b) - } else { - let lg_a = a.lock(); - let lg_b = b.lock(); - (lg_a, lg_b) - } -} -/// Lock two spinlocks in a stable order such that no deadlock cycles are created. -/// -/// This is VITAL if you want to lock multiple mutexes for objects where you cannot -/// statically ensure ordering to avoid deadlock. It ensures that any two given spinlocks -/// will be locked in the same order even if you permute the arguments to this function. -/// It does so by inspecting the addresses of the spinlocks themselves to project a total -/// order onto the locks. -pub fn spinlock_two<'a, 'b, A, B, R: RelaxStrategy>( - a: &'a GenericSpinlock, - b: &'b GenericSpinlock, -) -> (spinlock::LockGuard<'a, A, R>, spinlock::LockGuard<'b, B, R>) { - let a_val = a as *const GenericSpinlock as usize; - let b_val = b as *const GenericSpinlock as usize; - assert_ne!(a_val, b_val); - if a_val > b_val { - let lg_b = b.lock(); - let lg_a = a.lock(); - (lg_a, lg_b) - } else { - let lg_a = a.lock(); - let lg_b = b.lock(); - (lg_a, lg_b) - } -} - -#[thread_local] -static mut RAND_STATE: u32 = 0; - -/// A quick, but poor, NON CRYPTOGRAPHIC random number generator. -pub fn quick_random() -> u32 { - let mut state = unsafe { RAND_STATE }; - if state == 0 { - state = current_processor().id; - } - let newstate = state.wrapping_mul(69069).wrapping_add(5); - unsafe { - RAND_STATE = newstate; - } - newstate >> 16 -} +use crate::{ + mutex::{LockGuard, Mutex}, + processor::current_processor, + spinlock::{self, GenericSpinlock, RelaxStrategy}, +}; + +pub fn align + Into>(val: T, align: usize) -> T { + let val = val.into(); + if val == 0 { + return val.into(); + } + let res: usize = ((val - 1) & !(align - 1)) + align; + res.into() +} + +/// Lock two mutexes in a stable order such that no deadlock cycles are created. +/// +/// This is VITAL if you want to lock multiple mutexes for objects where you cannot +/// statically ensure ordering to avoid deadlock. It ensures that any two given mutexes +/// will be locked in the same order even if you permute the arguments to this function. +/// It does so by inspecting the addresses of the mutexes themselves to project a total +/// order onto the locks. +pub fn lock_two<'a, 'b, A, B>( + a: &'a Mutex, + b: &'b Mutex, +) -> (LockGuard<'a, A>, LockGuard<'b, B>) { + let a_val = a as *const Mutex as usize; + let b_val = b as *const Mutex as usize; + assert_ne!(a_val, b_val); + if a_val > b_val { + let lg_b = b.lock(); + let lg_a = a.lock(); + (lg_a, lg_b) + } else { + let lg_a = a.lock(); + let lg_b = b.lock(); + (lg_a, lg_b) + } +} +/// Lock two spinlocks in a stable order such that no deadlock cycles are created. +/// +/// This is VITAL if you want to lock multiple mutexes for objects where you cannot +/// statically ensure ordering to avoid deadlock. It ensures that any two given spinlocks +/// will be locked in the same order even if you permute the arguments to this function. +/// It does so by inspecting the addresses of the spinlocks themselves to project a total +/// order onto the locks. +pub fn spinlock_two<'a, 'b, A, B, R: RelaxStrategy>( + a: &'a GenericSpinlock, + b: &'b GenericSpinlock, +) -> (spinlock::LockGuard<'a, A, R>, spinlock::LockGuard<'b, B, R>) { + let a_val = a as *const GenericSpinlock as usize; + let b_val = b as *const GenericSpinlock as usize; + assert_ne!(a_val, b_val); + if a_val > b_val { + let lg_b = b.lock(); + let lg_a = a.lock(); + (lg_a, lg_b) + } else { + let lg_a = a.lock(); + let lg_b = b.lock(); + (lg_a, lg_b) + } +} + +#[thread_local] +static mut RAND_STATE: u32 = 0; + +/// A quick, but poor, NON CRYPTOGRAPHIC random number generator. +pub fn quick_random() -> u32 { + let mut state = unsafe { RAND_STATE }; + if state == 0 { + state = current_processor().id; + } + let newstate = state.wrapping_mul(69069).wrapping_add(5); + unsafe { + RAND_STATE = newstate; + } + newstate >> 16 +} diff --git a/src/lib/twizzler-abi/src/aux.rs b/src/lib/twizzler-abi/src/aux.rs index 5b3fb546..563ce811 100644 --- a/src/lib/twizzler-abi/src/aux.rs +++ b/src/lib/twizzler-abi/src/aux.rs @@ -1,6 +1,7 @@ //! When running a new program (and thus, initializing a new runtime), the new program expects to //! receive some information about how it was started, including arguments, env vars, etc. These are -//! passed to the new program through the _start function as an array of AuxEntries as its only argument. +//! passed to the new program through the _start function as an array of AuxEntries as its only +//! argument. //! //! This array of entries is an unspecified length and is terminated by the Null entry at the end of //! the array. diff --git a/src/lib/twizzler-abi/src/device/mod.rs b/src/lib/twizzler-abi/src/device/mod.rs index f39d27e1..d3451f00 100644 --- a/src/lib/twizzler-abi/src/device/mod.rs +++ b/src/lib/twizzler-abi/src/device/mod.rs @@ -48,7 +48,8 @@ pub enum BusType { #[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Debug)] #[repr(u32)] pub enum SubObjectType { - /// An info sub-object, which is comprised of a device-specific (or bus-specific) information structure. + /// An info sub-object, which is comprised of a device-specific (or bus-specific) information + /// structure. Info = 0, /// A mapping of the MMIO registers for this device into an object. Mmio = 1, @@ -106,7 +107,8 @@ impl crate::marker::BaseType for MmioInfo { todo!() } } -/// An mmio object has, at its base, a [MmioInfo] struct. At this offset, the mmio mapping actually starts. +/// An mmio object has, at its base, a [MmioInfo] struct. At this offset, the mmio mapping actually +/// starts. pub const MMIO_OFFSET: usize = 0x2000; bitflags::bitflags! { diff --git a/src/lib/twizzler-abi/src/marker.rs b/src/lib/twizzler-abi/src/marker.rs index 454d711a..3a18a2c6 100644 --- a/src/lib/twizzler-abi/src/marker.rs +++ b/src/lib/twizzler-abi/src/marker.rs @@ -1,4 +1,5 @@ -//! Marker traits used to indicate safety for storing data in objects and using a struct as a base type. +//! Marker traits used to indicate safety for storing data in objects and using a struct as a base +//! type. use core::{ cell::UnsafeCell, diff --git a/src/lib/twizzler-abi/src/meta.rs b/src/lib/twizzler-abi/src/meta.rs index 01b719c9..1af52aa1 100644 --- a/src/lib/twizzler-abi/src/meta.rs +++ b/src/lib/twizzler-abi/src/meta.rs @@ -1,8 +1,9 @@ //! Types that make up object metadata. -use crate::object::ObjID; - -use crate::marker::{BaseTag, BaseVersion}; +use crate::{ + marker::{BaseTag, BaseVersion}, + object::ObjID, +}; /// Flags for objects. #[derive(Clone, Copy, Debug)] diff --git a/src/lib/twizzler-abi/src/object.rs b/src/lib/twizzler-abi/src/object.rs index b04e9a61..63e34d52 100644 --- a/src/lib/twizzler-abi/src/object.rs +++ b/src/lib/twizzler-abi/src/object.rs @@ -1,4 +1,5 @@ -//! Low-level object APIs, mostly around IDs and basic things like protection definitions and metadata. +//! Low-level object APIs, mostly around IDs and basic things like protection definitions and +//! metadata. /// The maximum size of an object, including null page and meta page(s). pub const MAX_SIZE: usize = 1024 * 1024 * 1024; diff --git a/src/lib/twizzler-abi/src/runtime.rs b/src/lib/twizzler-abi/src/runtime.rs index aaa0d4d4..9af07254 100644 --- a/src/lib/twizzler-abi/src/runtime.rs +++ b/src/lib/twizzler-abi/src/runtime.rs @@ -1,9 +1,10 @@ -//! This mod implements the [twizzler_runtime_api::Runtime] trait as the "minruntime", or minimal runtime implementation. -//! The word minimal is pretty subjective, but here we're roughly saying "it's the runtime that you can use to interact with -//! the kernel and twizzler-abi directly, with no additional support". +//! This mod implements the [twizzler_runtime_api::Runtime] trait as the "minruntime", or minimal +//! runtime implementation. The word minimal is pretty subjective, but here we're roughly saying +//! "it's the runtime that you can use to interact with the kernel and twizzler-abi directly, with +//! no additional support". //! -//! Additionally, we provide a mechanism for linking our runtime only if no other runtime is linked, via the "extern_weak" linkage -//! attribute on __twz_get_runtime. +//! Additionally, we provide a mechanism for linking our runtime only if no other runtime is linked, +//! via the "extern_weak" linkage attribute on __twz_get_runtime. use twizzler_runtime_api::Runtime; @@ -42,4 +43,4 @@ static OUR_RUNTIME: MinimalRuntime = MinimalRuntime {}; #[used] static USE_MARKER: fn() -> &'static (dyn Runtime + Sync) = __twz_get_runtime; -pub use object::slot::get_kernel_init_info; \ No newline at end of file +pub use object::slot::get_kernel_init_info; diff --git a/src/lib/twizzler-abi/src/runtime/alloc.rs b/src/lib/twizzler-abi/src/runtime/alloc.rs index 37e6a1af..2ba810bd 100644 --- a/src/lib/twizzler-abi/src/runtime/alloc.rs +++ b/src/lib/twizzler-abi/src/runtime/alloc.rs @@ -1,10 +1,11 @@ -//! Implements the allocation part of the core runtime trait. We use talc as our allocator, creating new objects for it to -//! claim when it runs out of memory. +//! Implements the allocation part of the core runtime trait. We use talc as our allocator, creating +//! new objects for it to claim when it runs out of memory. use core::{alloc::GlobalAlloc, ptr::NonNull}; use talc::{OomHandler, Span}; +use super::object::slot::global_allocate; use crate::{ object::{Protections, MAX_SIZE, NULLPAGE_SIZE}, runtime::simple_mutex::Mutex, @@ -14,8 +15,6 @@ use crate::{ }, }; -use super::object::slot::global_allocate; - pub struct MinimalAllocator { imp: Mutex>, } diff --git a/src/lib/twizzler-abi/src/runtime/core.rs b/src/lib/twizzler-abi/src/runtime/core.rs index a106ac8c..14d682e1 100644 --- a/src/lib/twizzler-abi/src/runtime/core.rs +++ b/src/lib/twizzler-abi/src/runtime/core.rs @@ -4,17 +4,16 @@ use core::{alloc::GlobalAlloc, ptr}; use twizzler_runtime_api::{AuxEntry, BasicAux, BasicReturn, CoreRuntime}; -use crate::{ - object::ObjID, - upcall::{UpcallFlags, UpcallInfo, UpcallMode, UpcallOptions, UpcallTarget}, -}; - use super::{ alloc::MinimalAllocator, phdrs::{process_phdrs, Phdr}, tls::init_tls, MinimalRuntime, }; +use crate::{ + object::ObjID, + upcall::{UpcallFlags, UpcallInfo, UpcallMode, UpcallOptions, UpcallTarget}, +}; // Just keep a single, simple global allocator. static GLOBAL_ALLOCATOR: MinimalAllocator = MinimalAllocator::new(); diff --git a/src/lib/twizzler-abi/src/runtime/debug.rs b/src/lib/twizzler-abi/src/runtime/debug.rs index 575fa081..4e2bce29 100644 --- a/src/lib/twizzler-abi/src/runtime/debug.rs +++ b/src/lib/twizzler-abi/src/runtime/debug.rs @@ -2,12 +2,11 @@ use twizzler_runtime_api::{AddrRange, DebugRuntime, Library, LibraryId, MapFlags}; -use crate::object::{InternalObject, ObjID, Protections, MAX_SIZE, NULLPAGE_SIZE}; - use super::{ MinimalRuntime, __twz_get_runtime, load_elf::{ElfObject, PhdrType}, }; +use crate::object::{InternalObject, ObjID, Protections, MAX_SIZE, NULLPAGE_SIZE}; static mut EXEC_ID: ObjID = ObjID::new(0); @@ -67,7 +66,8 @@ impl DebugRuntime for MinimalRuntime { Some(lib.mapping.clone()) } - // The minimal runtime doesn't provide this, since we can get segment information in a simpler way for static binaries. + // The minimal runtime doesn't provide this, since we can get segment information in a simpler + // way for static binaries. fn iterate_phdr( &self, _f: &mut dyn FnMut(twizzler_runtime_api::DlPhdrInfo) -> core::ffi::c_int, diff --git a/src/lib/twizzler-abi/src/runtime/idcounter.rs b/src/lib/twizzler-abi/src/runtime/idcounter.rs index 15c2f826..1c4b3bfd 100644 --- a/src/lib/twizzler-abi/src/runtime/idcounter.rs +++ b/src/lib/twizzler-abi/src/runtime/idcounter.rs @@ -68,7 +68,8 @@ impl IdCounter { next } - /// Release an ID to that it may be reused in the future. Note: it may not be immediately reused. + /// Release an ID to that it may be reused in the future. Note: it may not be immediately + /// reused. pub fn release(&self, id: u32) { // First see if we can just subtract the next counter. if self @@ -82,7 +83,8 @@ impl IdCounter { let mut stack = self.stack.lock(); stack.push(id); if stack.len() > MAX_BEFORE_UH_OH { - // We hit the high watermark, so make future calls to fresh() try harder to get the lock. + // We hit the high watermark, so make future calls to fresh() try harder to get the + // lock. self.uh_oh.store(true, Ordering::SeqCst); } self.stack_non_empty.store(true, Ordering::SeqCst); diff --git a/src/lib/twizzler-abi/src/runtime/load_elf.rs b/src/lib/twizzler-abi/src/runtime/load_elf.rs index 6cc1ba6e..f1c8aa62 100644 --- a/src/lib/twizzler-abi/src/runtime/load_elf.rs +++ b/src/lib/twizzler-abi/src/runtime/load_elf.rs @@ -2,21 +2,18 @@ use core::{intrinsics::copy_nonoverlapping, mem::size_of}; -use crate::object::InternalObject; +use twizzler_runtime_api::AuxEntry; -use crate::syscall::UpcallTargetSpawnOption; use crate::{ - object::{ObjID, Protections, MAX_SIZE, NULLPAGE_SIZE}, + object::{InternalObject, ObjID, Protections, MAX_SIZE, NULLPAGE_SIZE}, slot::{RESERVED_DATA, RESERVED_STACK, RESERVED_TEXT}, syscall::{ sys_unbind_handle, BackingType, HandleType, LifetimeType, MapFlags, NewHandleFlags, ObjectCreate, ObjectCreateFlags, ObjectSource, ThreadSpawnArgs, ThreadSpawnFlags, - UnbindHandleFlags, + UnbindHandleFlags, UpcallTargetSpawnOption, }, }; -use twizzler_runtime_api::AuxEntry; - #[derive(Debug)] #[repr(C)] pub(crate) struct ElfHeader { diff --git a/src/lib/twizzler-abi/src/runtime/object.rs b/src/lib/twizzler-abi/src/runtime/object.rs index 17bdc867..e62dd9bd 100644 --- a/src/lib/twizzler-abi/src/runtime/object.rs +++ b/src/lib/twizzler-abi/src/runtime/object.rs @@ -2,17 +2,16 @@ use core::ptr::NonNull; -use crate::{rustc_alloc::boxed::Box, syscall::UnmapFlags}; use twizzler_runtime_api::{InternalHandleRefs, MapError, ObjectHandle, ObjectRuntime}; +use super::MinimalRuntime; use crate::{ object::{ObjID, Protections, MAX_SIZE, NULLPAGE_SIZE}, runtime::object::slot::global_allocate, - syscall::{sys_object_map, ObjectMapError}, + rustc_alloc::boxed::Box, + syscall::{sys_object_map, ObjectMapError, UnmapFlags}, }; -use super::MinimalRuntime; - mod handle; #[allow(unused_imports)] diff --git a/src/lib/twizzler-abi/src/runtime/object/handle.rs b/src/lib/twizzler-abi/src/runtime/object/handle.rs index 285a259b..4aba9273 100644 --- a/src/lib/twizzler-abi/src/runtime/object/handle.rs +++ b/src/lib/twizzler-abi/src/runtime/object/handle.rs @@ -2,12 +2,12 @@ use core::{marker::PhantomData, ptr::NonNull}; -use crate::rustc_alloc::boxed::Box; use twizzler_runtime_api::{InternalHandleRefs, MapFlags, ObjectHandle}; use crate::{ object::{ObjID, Protections, MAX_SIZE, NULLPAGE_SIZE}, runtime::object::slot::global_allocate, + rustc_alloc::boxed::Box, syscall::{ sys_object_create, sys_object_map, BackingType, LifetimeType, ObjectCreate, ObjectCreateFlags, diff --git a/src/lib/twizzler-abi/src/runtime/object/slot.rs b/src/lib/twizzler-abi/src/runtime/object/slot.rs index 8310674d..efd9cbc7 100644 --- a/src/lib/twizzler-abi/src/runtime/object/slot.rs +++ b/src/lib/twizzler-abi/src/runtime/object/slot.rs @@ -11,9 +11,15 @@ pub fn global_release(slot: usize) { SLOT_TRACKER.lock().dealloc(slot) } -use crate::{arch::SLOTS, runtime::simple_mutex::Mutex, object::{MAX_SIZE, NULLPAGE_SIZE}, aux::KernelInitInfo}; use bitset_core::BitSet; +use crate::{ + arch::SLOTS, + aux::KernelInitInfo, + object::{MAX_SIZE, NULLPAGE_SIZE}, + runtime::simple_mutex::Mutex, +}; + struct SlotTracker { bitmap: [u32; SLOTS / 32], } @@ -50,5 +56,9 @@ pub(crate) fn slot_to_start_and_meta(slot: usize) -> (usize, usize) { /// Get the initial kernel info for init. Only works for init. pub fn get_kernel_init_info() -> &'static KernelInitInfo { let (start, _) = slot_to_start_and_meta(crate::slot::RESERVED_KERNEL_INIT); - unsafe { ((start + NULLPAGE_SIZE) as *const KernelInitInfo).as_ref().unwrap() } + unsafe { + ((start + NULLPAGE_SIZE) as *const KernelInitInfo) + .as_ref() + .unwrap() + } } diff --git a/src/lib/twizzler-abi/src/runtime/simple_mutex.rs b/src/lib/twizzler-abi/src/runtime/simple_mutex.rs index 497d9b54..e5114fe5 100644 --- a/src/lib/twizzler-abi/src/runtime/simple_mutex.rs +++ b/src/lib/twizzler-abi/src/runtime/simple_mutex.rs @@ -36,9 +36,11 @@ impl MutexImp { /// Lock a mutex, which can be unlocked by calling [Mutex::unlock]. /// # Safety /// The caller must ensure that they are not recursively locking, that they unlock the - /// mutex correctly, and that any data protected by the mutex is only accessed with the mutex locked. + /// mutex correctly, and that any data protected by the mutex is only accessed with the mutex + /// locked. /// - /// Note, this is why you should use the standard library mutex, which enforces all of these things. + /// Note, this is why you should use the standard library mutex, which enforces all of these + /// things. #[allow(dead_code)] pub unsafe fn lock(&self) { for _ in 0..100 { @@ -96,8 +98,8 @@ impl MutexImp { } #[inline] - /// Similar to [Mutex::lock], but if we can't immediately grab the lock, don't and return false. Return - /// true if we got the lock. + /// Similar to [Mutex::lock], but if we can't immediately grab the lock, don't and return false. + /// Return true if we got the lock. /// # Safety /// Same safety concerns as [Mutex::lock], but now you have to check to see if the lock happened /// or not. diff --git a/src/lib/twizzler-abi/src/runtime/stdio.rs b/src/lib/twizzler-abi/src/runtime/stdio.rs index 5f94daf0..4d04c266 100644 --- a/src/lib/twizzler-abi/src/runtime/stdio.rs +++ b/src/lib/twizzler-abi/src/runtime/stdio.rs @@ -4,9 +4,8 @@ use core::fmt::Debug; use twizzler_runtime_api::{IoRead, IoWrite, ReadError, RustStdioRuntime, WriteError}; -use crate::syscall::KernelConsoleReadError; - use super::MinimalRuntime; +use crate::syscall::KernelConsoleReadError; impl RustStdioRuntime for MinimalRuntime { fn with_panic_output(&self, cb: twizzler_runtime_api::IoWritePanicDynCallback<'_, ()>) { diff --git a/src/lib/twizzler-abi/src/runtime/thread.rs b/src/lib/twizzler-abi/src/runtime/thread.rs index f33df0e0..ec5a65b7 100644 --- a/src/lib/twizzler-abi/src/runtime/thread.rs +++ b/src/lib/twizzler-abi/src/runtime/thread.rs @@ -2,24 +2,20 @@ use core::alloc::Layout; -use crate::{ - object::Protections, runtime::idcounter::IdCounter, rustc_alloc::collections::BTreeMap, - thread::ExecutionState, -}; - use twizzler_runtime_api::{CoreRuntime, JoinError, SpawnError, ThreadRuntime}; +use super::{object::InternalObject, MinimalRuntime}; use crate::{ - runtime::simple_mutex::Mutex, + object::Protections, + runtime::{idcounter::IdCounter, simple_mutex::Mutex}, + rustc_alloc::collections::BTreeMap, syscall::{ ThreadSpawnError, ThreadSpawnFlags, ThreadSync, ThreadSyncError, ThreadSyncFlags, ThreadSyncReference, ThreadSyncSleep, ThreadSyncWake, }, - thread::ThreadRepr, + thread::{ExecutionState, ThreadRepr}, }; -use super::{object::InternalObject, MinimalRuntime}; - struct InternalThread { repr: InternalObject, #[allow(dead_code)] diff --git a/src/lib/twizzler-abi/src/runtime/time.rs b/src/lib/twizzler-abi/src/runtime/time.rs index 169a1e3e..3f19ef39 100644 --- a/src/lib/twizzler-abi/src/runtime/time.rs +++ b/src/lib/twizzler-abi/src/runtime/time.rs @@ -4,9 +4,8 @@ use core::time::Duration; use twizzler_runtime_api::{Monotonicity, RustTimeRuntime}; -use crate::syscall::{sys_read_clock_info, ClockSource, ReadClockFlags}; - use super::MinimalRuntime; +use crate::syscall::{sys_read_clock_info, ClockSource, ReadClockFlags}; impl RustTimeRuntime for MinimalRuntime { fn get_monotonic(&self) -> Duration { diff --git a/src/lib/twizzler-abi/src/syscall/console.rs b/src/lib/twizzler-abi/src/syscall/console.rs index 82d91bd6..11319f94 100644 --- a/src/lib/twizzler-abi/src/syscall/console.rs +++ b/src/lib/twizzler-abi/src/syscall/console.rs @@ -1,9 +1,8 @@ use bitflags::bitflags; use num_enum::{FromPrimitive, IntoPrimitive}; -use crate::arch::syscall::raw_syscall; - use super::{convert_codes_to_result, Syscall}; +use crate::arch::syscall::raw_syscall; #[derive( Debug, diff --git a/src/lib/twizzler-abi/src/syscall/create.rs b/src/lib/twizzler-abi/src/syscall/create.rs index 58afbc6a..4a1ab1fe 100644 --- a/src/lib/twizzler-abi/src/syscall/create.rs +++ b/src/lib/twizzler-abi/src/syscall/create.rs @@ -1,8 +1,8 @@ -use crate::{arch::syscall::raw_syscall, object::ObjID}; use bitflags::bitflags; use num_enum::{FromPrimitive, IntoPrimitive}; use super::{convert_codes_to_result, Syscall}; +use crate::{arch::syscall::raw_syscall, object::ObjID}; #[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq, Default)] #[repr(C)] @@ -12,7 +12,8 @@ use super::{convert_codes_to_result, Syscall}; pub struct ObjectSource { /// The ID of the source object, or zero for filling destination with zero. pub id: ObjID, - /// The offset into the source object to start the copy. If id is zero, this field is reserved for future use. + /// The offset into the source object to start the copy. If id is zero, this field is reserved + /// for future use. pub src_start: u64, /// The offset into the dest object to start the copy or zero. pub dest_start: u64, diff --git a/src/lib/twizzler-abi/src/syscall/handle.rs b/src/lib/twizzler-abi/src/syscall/handle.rs index 1e770562..ec199ae6 100644 --- a/src/lib/twizzler-abi/src/syscall/handle.rs +++ b/src/lib/twizzler-abi/src/syscall/handle.rs @@ -1,9 +1,8 @@ use bitflags::bitflags; use num_enum::{FromPrimitive, IntoPrimitive}; -use crate::{arch::syscall::raw_syscall, object::ObjID}; - use super::{convert_codes_to_result, justval, Syscall}; +use crate::{arch::syscall::raw_syscall, object::ObjID}; #[derive( Debug, Copy, diff --git a/src/lib/twizzler-abi/src/syscall/info.rs b/src/lib/twizzler-abi/src/syscall/info.rs index 8ba22919..66fbdaae 100644 --- a/src/lib/twizzler-abi/src/syscall/info.rs +++ b/src/lib/twizzler-abi/src/syscall/info.rs @@ -1,8 +1,7 @@ use core::num::NonZeroUsize; -use crate::arch::syscall::raw_syscall; - use super::Syscall; +use crate::arch::syscall::raw_syscall; #[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq)] #[repr(C)] /// Information about the system. diff --git a/src/lib/twizzler-abi/src/syscall/kaction.rs b/src/lib/twizzler-abi/src/syscall/kaction.rs index 69ef652e..34b41583 100644 --- a/src/lib/twizzler-abi/src/syscall/kaction.rs +++ b/src/lib/twizzler-abi/src/syscall/kaction.rs @@ -1,11 +1,10 @@ +use super::{convert_codes_to_result, Syscall}; use crate::{ arch::syscall::raw_syscall, kso::{KactionCmd, KactionError, KactionFlags, KactionValue}, object::ObjID, }; -use super::{convert_codes_to_result, Syscall}; - /// Execute a kaction on an object. pub fn sys_kaction( cmd: KactionCmd, diff --git a/src/lib/twizzler-abi/src/syscall/map.rs b/src/lib/twizzler-abi/src/syscall/map.rs index 93a27457..ee58062f 100644 --- a/src/lib/twizzler-abi/src/syscall/map.rs +++ b/src/lib/twizzler-abi/src/syscall/map.rs @@ -3,13 +3,12 @@ use core::mem::MaybeUninit; use bitflags::bitflags; use num_enum::{FromPrimitive, IntoPrimitive}; +use super::{convert_codes_to_result, justval, Syscall}; use crate::{ arch::syscall::raw_syscall, object::{ObjID, Protections}, }; -use super::{convert_codes_to_result, justval, Syscall}; - #[derive( Debug, Copy, diff --git a/src/lib/twizzler-abi/src/syscall/mod.rs b/src/lib/twizzler-abi/src/syscall/mod.rs index 980404f0..fc4d41cf 100644 --- a/src/lib/twizzler-abi/src/syscall/mod.rs +++ b/src/lib/twizzler-abi/src/syscall/mod.rs @@ -1,4 +1,5 @@ -//! Wrapper functions around for raw_syscall, providing a typed and safer way to interact with the kernel. +//! Wrapper functions around for raw_syscall, providing a typed and safer way to interact with the +//! kernel. mod console; mod create; diff --git a/src/lib/twizzler-abi/src/syscall/object_control.rs b/src/lib/twizzler-abi/src/syscall/object_control.rs index 49aeab95..a8b2979f 100644 --- a/src/lib/twizzler-abi/src/syscall/object_control.rs +++ b/src/lib/twizzler-abi/src/syscall/object_control.rs @@ -1,8 +1,7 @@ use num_enum::{FromPrimitive, IntoPrimitive}; -use crate::{arch::syscall::raw_syscall, object::ObjID}; - use super::{convert_codes_to_result, justval, Syscall}; +use crate::{arch::syscall::raw_syscall, object::ObjID}; #[derive( Debug, diff --git a/src/lib/twizzler-abi/src/syscall/object_stat.rs b/src/lib/twizzler-abi/src/syscall/object_stat.rs index 67701963..20459358 100644 --- a/src/lib/twizzler-abi/src/syscall/object_stat.rs +++ b/src/lib/twizzler-abi/src/syscall/object_stat.rs @@ -2,9 +2,8 @@ use core::mem::MaybeUninit; use num_enum::{FromPrimitive, IntoPrimitive}; -use crate::{arch::syscall::raw_syscall, object::ObjID}; - use super::{convert_codes_to_result, justval, BackingType, LifetimeType, Syscall}; +use crate::{arch::syscall::raw_syscall, object::ObjID}; #[derive( Debug, diff --git a/src/lib/twizzler-abi/src/syscall/security.rs b/src/lib/twizzler-abi/src/syscall/security.rs index 1b2eb5b7..5c360c92 100644 --- a/src/lib/twizzler-abi/src/syscall/security.rs +++ b/src/lib/twizzler-abi/src/syscall/security.rs @@ -1,8 +1,7 @@ use num_enum::{FromPrimitive, IntoPrimitive}; -use crate::{arch::syscall::raw_syscall, object::ObjID}; - use super::{convert_codes_to_result, Syscall}; +use crate::{arch::syscall::raw_syscall, object::ObjID}; #[derive( Debug, diff --git a/src/lib/twizzler-abi/src/syscall/spawn.rs b/src/lib/twizzler-abi/src/syscall/spawn.rs index 4794367d..c330808d 100644 --- a/src/lib/twizzler-abi/src/syscall/spawn.rs +++ b/src/lib/twizzler-abi/src/syscall/spawn.rs @@ -1,9 +1,8 @@ use bitflags::bitflags; use num_enum::{FromPrimitive, IntoPrimitive}; -use crate::{arch::syscall::raw_syscall, object::ObjID, upcall::UpcallTarget}; - use super::{convert_codes_to_result, Syscall}; +use crate::{arch::syscall::raw_syscall, object::ObjID, upcall::UpcallTarget}; bitflags! { /// Flags to pass to [sys_spawn]. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] @@ -14,12 +13,14 @@ bitflags! { #[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq)] #[repr(C)] pub enum UpcallTargetSpawnOption { - /// Set all sync event handlers to abort by default. Entry addresses will be zero, and upcalls will not be issued. + /// Set all sync event handlers to abort by default. Entry addresses will be zero, and upcalls + /// will not be issued. DefaultAbort, /// Inherit the upcall target entry address. All supervisor fields are cleared. Inherit, /// Set the upcall target directly. The following conditions must be met: - /// 1. The super_ctx field holds the ID of the current thread's active context (prevents priv escalation). + /// 1. The super_ctx field holds the ID of the current thread's active context (prevents priv + /// escalation). /// 2. The super_entry_address is at most r-x, and at least --x in the super_ctx. /// 3. The super_thread_pointer is exactly rw- in the super_ctx. /// 4. The super_stack_pointer is exactly rw- in the super_ctx. @@ -41,8 +42,9 @@ pub struct ThreadSpawnArgs { } impl ThreadSpawnArgs { - /// Construct a new ThreadSpawnArgs. If vm_context_handle is Some(handle), then spawn the thread in the - /// VM context defined by handle. Otherwise spawn it in the same VM context as the spawner. + /// Construct a new ThreadSpawnArgs. If vm_context_handle is Some(handle), then spawn the thread + /// in the VM context defined by handle. Otherwise spawn it in the same VM context as the + /// spawner. #[warn(clippy::too_many_arguments)] pub fn new( entry: usize, diff --git a/src/lib/twizzler-abi/src/syscall/thread_control.rs b/src/lib/twizzler-abi/src/syscall/thread_control.rs index f9d6ab07..0d680426 100644 --- a/src/lib/twizzler-abi/src/syscall/thread_control.rs +++ b/src/lib/twizzler-abi/src/syscall/thread_control.rs @@ -1,21 +1,21 @@ use num_enum::{FromPrimitive, IntoPrimitive}; +use super::Syscall; use crate::{ arch::syscall::raw_syscall, object::ObjID, upcall::{UpcallFrame, UpcallTarget}, }; -use super::Syscall; - #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, FromPrimitive, IntoPrimitive)] #[repr(u64)] /// Possible Thread Control operations pub enum ThreadControl { #[default] - /// Exit the thread. arg1 and arg2 should be code and location respectively, where code contains - /// a 64-bit value to write into *location, followed by the kernel performing a thread-wake - /// event on the memory word at location. If location is null, the write and thread-wake do not occur. + /// Exit the thread. arg1 and arg2 should be code and location respectively, where code + /// contains a 64-bit value to write into *location, followed by the kernel performing a + /// thread-wake event on the memory word at location. If location is null, the write and + /// thread-wake do not occur. Exit = 0, /// Yield the thread's CPU time now. The actual effect of this is unspecified, but it acts as a /// hint to the kernel that this thread does not need to run right now. The kernel, of course, @@ -25,7 +25,8 @@ pub enum ThreadControl { SetTls = 2, /// Get the thread's TLS pointer. GetTls = 3, - /// Set the thread's upcall pointer (child threads in the same virtual address space will inherit). + /// Set the thread's upcall pointer (child threads in the same virtual address space will + /// inherit). SetUpcall = 4, /// Get the upcall pointer. GetUpcall = 5, @@ -58,8 +59,8 @@ pub enum ThreadControl { GetSelfId = 17, } -/// Exit the thread. The code will be written to the [crate::thread::ThreadRepr] for the current thread as part -/// of updating the status and code to indicate thread has exited. +/// Exit the thread. The code will be written to the [crate::thread::ThreadRepr] for the current +/// thread as part of updating the status and code to indicate thread has exited. pub fn sys_thread_exit(code: u64) -> ! { unsafe { raw_syscall(Syscall::ThreadCtrl, &[ThreadControl::Exit as u64, code]); diff --git a/src/lib/twizzler-abi/src/syscall/thread_sync.rs b/src/lib/twizzler-abi/src/syscall/thread_sync.rs index 3b376ffa..cb4f6172 100644 --- a/src/lib/twizzler-abi/src/syscall/thread_sync.rs +++ b/src/lib/twizzler-abi/src/syscall/thread_sync.rs @@ -7,9 +7,8 @@ use core::{ use bitflags::bitflags; use num_enum::{FromPrimitive, IntoPrimitive}; -use crate::{arch::syscall::raw_syscall, object::ObjID}; - use super::{convert_codes_to_result, Syscall}; +use crate::{arch::syscall::raw_syscall, object::ObjID}; #[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq, Hash)] #[repr(u32)] /// Possible operations the kernel can perform when looking at the supplies reference and the @@ -41,7 +40,8 @@ bitflags! { #[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq, Hash)] #[repr(C)] -/// A reference to a piece of data. May either be a non-realized persistent reference or a virtual address. +/// A reference to a piece of data. May either be a non-realized persistent reference or a virtual +/// address. pub enum ThreadSyncReference { ObjectRef(ObjID, usize), Virtual(*const AtomicU64), @@ -121,7 +121,8 @@ impl ThreadSyncSleep { impl ThreadSyncWake { /// Construct a new thread wake request. The reference works the same was as in /// [ThreadSyncSleep]. The kernel will wake up `count` threads that are sleeping on this - /// particular word of object memory. If you want to wake up all threads, you can supply `usize::MAX`. + /// particular word of object memory. If you want to wake up all threads, you can supply + /// `usize::MAX`. pub fn new(reference: ThreadSyncReference, count: usize) -> Self { Self { reference, count } } @@ -165,7 +166,8 @@ pub type ThreadSyncResult = Result; #[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq)] #[repr(C)] -/// Either a sleep or wake request. The syscall comprises of a number of either sleep or wake requests. +/// Either a sleep or wake request. The syscall comprises of a number of either sleep or wake +/// requests. pub enum ThreadSync { Sleep(ThreadSyncSleep, ThreadSyncResult), Wake(ThreadSyncWake, ThreadSyncResult), @@ -207,11 +209,12 @@ impl ThreadSync { /// or slightly more due to scheduling uncertainty). If no operations are specified, the thread will /// sleep until the timeout expires. /// -/// Returns either Ok(ready_count), indicating how many operations were immediately ready, or Err([ThreadSyncError]), -/// indicating failure. After return, the kernel may have modified the ThreadSync entries to -/// indicate additional information about each request, with Err to indicate error and Ok(n) to -/// indicate success. For sleep requests, n is 0 if the operation went to sleep or 1 otherwise. For -/// wakeup requests, n indicates the number of threads woken up by this operation. +/// Returns either Ok(ready_count), indicating how many operations were immediately ready, or +/// Err([ThreadSyncError]), indicating failure. After return, the kernel may have modified the +/// ThreadSync entries to indicate additional information about each request, with Err to indicate +/// error and Ok(n) to indicate success. For sleep requests, n is 0 if the operation went to sleep +/// or 1 otherwise. For wakeup requests, n indicates the number of threads woken up by this +/// operation. /// /// Note that spurious wakeups are possible, and that even if a timeout occurs the function may /// return Ok(0). diff --git a/src/lib/twizzler-abi/src/syscall/time/clock.rs b/src/lib/twizzler-abi/src/syscall/time/clock.rs index e89ff679..ab078eab 100644 --- a/src/lib/twizzler-abi/src/syscall/time/clock.rs +++ b/src/lib/twizzler-abi/src/syscall/time/clock.rs @@ -1,6 +1,6 @@ use bitflags::bitflags; -use super::{ClockSource, ReadClockFlags, ReadClockListFlags, TimeSpan, FemtoSeconds}; +use super::{ClockSource, FemtoSeconds, ReadClockFlags, ReadClockListFlags, TimeSpan}; bitflags! { /// Flags about a given clock or clock read. @@ -25,10 +25,11 @@ impl ClockInfo { TimeSpan::ZERO, FemtoSeconds(0), FemtoSeconds(0), - ClockFlags::MONOTONIC + ClockFlags::MONOTONIC, ); - /// Construct a new ClockInfo. You probably want to be getting these from [sys_read_clock_info], though. + /// Construct a new ClockInfo. You probably want to be getting these from [sys_read_clock_info], + /// though. pub const fn new( current: TimeSpan, precision: FemtoSeconds, @@ -64,7 +65,6 @@ impl ClockInfo { } } - /// Different kinds of clocks exposed by the kernel. #[derive(Clone, Copy, Debug)] #[repr(C)] @@ -79,7 +79,7 @@ impl From for u64 { match clock { ClockKind::Monotonic => 0, ClockKind::RealTime => 1, - ClockKind::Unknown => 2 + ClockKind::Unknown => 2, } } } @@ -89,7 +89,7 @@ impl From for ClockKind { match x { 0 => ClockKind::Monotonic, 1 => ClockKind::RealTime, - _ => ClockKind::Unknown + _ => ClockKind::Unknown, } } } @@ -105,27 +105,27 @@ pub struct ClockID(pub u64); pub struct Clock { pub info: ClockInfo, id: ClockID, - kind: ClockKind + kind: ClockKind, } impl Clock { pub const ZERO: Clock = Clock { info: ClockInfo::ZERO, id: ClockID(0), - kind: ClockKind::Unknown + kind: ClockKind::Unknown, }; pub fn new(info: ClockInfo, id: ClockID, kind: ClockKind) -> Clock { - Self {info, id, kind} + Self { info, id, kind } } pub fn read(&self) -> TimeSpan { match super::sys_read_clock_info(ClockSource::ID(self.id), ReadClockFlags::empty()) { Ok(ci) => ci.current_value(), - _ => TimeSpan::ZERO + _ => TimeSpan::ZERO, } } - + pub fn info(&self) -> ClockInfo { self.info } @@ -133,9 +133,11 @@ impl Clock { /// Returns a new instance of a Clock from the specified ClockKind pub fn get(kind: ClockKind) -> Clock { let mut clk = [Clock::ZERO]; - if let Ok(filled) = super::sys_read_clock_list(kind, &mut clk, 0, ReadClockListFlags::FIRST_KIND) { + if let Ok(filled) = + super::sys_read_clock_list(kind, &mut clk, 0, ReadClockListFlags::FIRST_KIND) + { if filled > 0 { - return clk[0] + return clk[0]; } } Clock::ZERO diff --git a/src/lib/twizzler-abi/src/syscall/time/mod.rs b/src/lib/twizzler-abi/src/syscall/time/mod.rs index 958446e7..7204e25b 100644 --- a/src/lib/twizzler-abi/src/syscall/time/mod.rs +++ b/src/lib/twizzler-abi/src/syscall/time/mod.rs @@ -2,17 +2,16 @@ mod clock; mod timedefs; mod units; +use core::mem::MaybeUninit; + +use bitflags::bitflags; pub use clock::*; use num_enum::{FromPrimitive, IntoPrimitive}; pub use timedefs::*; pub use units::*; -use bitflags::bitflags; -use core::mem::MaybeUninit; - -use crate::arch::syscall::raw_syscall; - use super::{convert_codes_to_result, Syscall}; +use crate::arch::syscall::raw_syscall; #[derive( Debug, @@ -159,7 +158,12 @@ pub fn sys_read_clock_info( /// /// ```no_run /// let mut clocks = [Clock::ZERO; 4]; -/// let result = sys_read_clock_list(ClockKind::Monotonic, &mut clocks, 0, ReadClockListFlags::FIRST_KIND); +/// let result = sys_read_clock_list( +/// ClockKind::Monotonic, +/// &mut clocks, +/// 0, +/// ReadClockListFlags::FIRST_KIND, +/// ); /// if let Some(filled) = result { /// if filled > 0 { /// println!("time now: {}", clock[0].read().as_nanos()); diff --git a/src/lib/twizzler-abi/src/thread.rs b/src/lib/twizzler-abi/src/thread.rs index 4b29b8cc..d257f2d4 100644 --- a/src/lib/twizzler-abi/src/thread.rs +++ b/src/lib/twizzler-abi/src/thread.rs @@ -1,15 +1,12 @@ //! Functions for manipulating threads. use core::sync::atomic::{AtomicU64, Ordering}; - #[cfg(not(feature = "kernel"))] use core::time::Duration; use crate::marker::BaseType; - #[cfg(not(feature = "kernel"))] use crate::syscall::*; - #[allow(unused_imports)] use crate::{ object::{ObjID, Protections}, @@ -129,8 +126,8 @@ impl ThreadRepr { } #[cfg(not(feature = "kernel"))] - /// Wait for a thread's status to change, optionally timing out. Return value is None if timeout occurs, or - /// Some((ExecutionState, code)) otherwise. + /// Wait for a thread's status to change, optionally timing out. Return value is None if timeout + /// occurs, or Some((ExecutionState, code)) otherwise. pub fn wait(&self, timeout: Option) -> Option<(ExecutionState, u64)> { let mut status = self.get_state(); loop { diff --git a/src/lib/twizzler-async/src/async_source.rs b/src/lib/twizzler-async/src/async_source.rs index 0e2f8a54..912bc597 100644 --- a/src/lib/twizzler-async/src/async_source.rs +++ b/src/lib/twizzler-async/src/async_source.rs @@ -46,7 +46,8 @@ impl Async { } /// Asynchronously run an operation that will sleep if not ready. The closure to run must return - /// `Result<_, T::Error>`, and should return `Err(T::WOULD_BLOCK)` if the operation is not ready. + /// `Result<_, T::Error>`, and should return `Err(T::WOULD_BLOCK)` if the operation is not + /// ready. pub async fn run_with( &self, op: impl FnMut(&T) -> Result, @@ -123,8 +124,9 @@ impl AsyncDuplex { self.handle.as_ref().unwrap() } - /// Asynchronously run a read-like operation that will sleep if not ready. The closure to run must return - /// `Result<_, T::ReadError>`, and should return `Err(T::READ_WOULD_BLOCK)` if the operation is not ready. + /// Asynchronously run a read-like operation that will sleep if not ready. The closure to run + /// must return `Result<_, T::ReadError>`, and should return `Err(T::READ_WOULD_BLOCK)` if + /// the operation is not ready. pub async fn read_with( &self, op: impl FnMut(&T) -> Result, @@ -140,8 +142,9 @@ impl AsyncDuplex { } } - /// Asynchronously run a write-like operation that will sleep if not ready. The closure to run must return - /// `Result<_, T::WriteError>`, and should return `Err(T::WRITE_WOULD_BLOCK)` if the operation is not ready. + /// Asynchronously run a write-like operation that will sleep if not ready. The closure to run + /// must return `Result<_, T::WriteError>`, and should return `Err(T::WRITE_WOULD_BLOCK)` if + /// the operation is not ready. pub async fn write_with( &self, op: impl FnMut(&T) -> Result, diff --git a/src/lib/twizzler-async/src/block_on.rs b/src/lib/twizzler-async/src/block_on.rs index 37dfea56..767f3d6b 100644 --- a/src/lib/twizzler-async/src/block_on.rs +++ b/src/lib/twizzler-async/src/block_on.rs @@ -1,15 +1,13 @@ use std::{ + cell::RefCell, + future::Future, sync::{ atomic::{AtomicUsize, Ordering}, Arc, Condvar, Mutex, }, - task::{Context, Poll}, + task::{Context, Poll, Waker}, time::Duration, }; - -use std::cell::RefCell; -use std::future::Future; -use std::task::Waker; struct Parker { unparker: Unparker, } diff --git a/src/lib/twizzler-async/src/future.rs b/src/lib/twizzler-async/src/future.rs index 308792c5..db3a7bef 100644 --- a/src/lib/twizzler-async/src/future.rs +++ b/src/lib/twizzler-async/src/future.rs @@ -20,7 +20,8 @@ impl Unpin for WaitForFirst {} /// A future that waits on two sub-futures until the first one completes. If the second one /// completes first, this future will continue awaiting on the first future. If the first one -/// completes first, this future returns immediately without continuing to wait on the second future. +/// completes first, this future returns immediately without continuing to wait on the second +/// future. pub fn wait_for_first( one: FutOne, two: FutTwo, @@ -70,7 +71,8 @@ pub struct FlagBlockInner { #[derive(Default)] /// A basic condition variable for async tasks. If you call wait() you get back a future that you -/// can await on, which will complete once another tasks calls signal_all(). But there's a gotcha here. +/// can await on, which will complete once another tasks calls signal_all(). But there's a gotcha +/// here. /// /// Okay so you know the rule with mutexes and condition variables? Like, you have some predicate /// that tells you "ready" or not, and this is tested with the mutex held, followed by waiting on @@ -191,7 +193,8 @@ pub async fn timeout_after(f: F, dur: Duration) -> Option } /// Await a future until a timeout occurs (or that future completes). If the timeout happens, return -/// None, otherwise return Some of the result of the future. This timeout expires at an instant in time. +/// None, otherwise return Some of the result of the future. This timeout expires at an instant in +/// time. pub async fn timeout_at(f: F, at: Instant) -> Option { Timeout::at(Box::pin(f), at).await } diff --git a/src/lib/twizzler-async/src/lib.rs b/src/lib/twizzler-async/src/lib.rs index d5dbfb53..f6f055e3 100644 --- a/src/lib/twizzler-async/src/lib.rs +++ b/src/lib/twizzler-async/src/lib.rs @@ -22,7 +22,9 @@ //! ``` //! Now, this does assume that there is a thread that has called [mod@run()], eg: //! ``` -//! let result = run(async { Task::spawn(async { /* some async code */ }).await }); +//! let result = run(async { +//! Task::spawn(async { /* some async code */ }).await +//! }); //! ``` //! //! Generally, though, if you want a thread pool, you can spawn a thread into a pool like this: @@ -31,10 +33,12 @@ //! ``` //! //! Then, later on, you can spawn a Task and await it. You can also detach a Task with .detach(), -//! which just places the thread on the runqueues and runs it without you having to await the result. +//! which just places the thread on the runqueues and runs it without you having to await the +//! result. //! //! # AsyncSetup, and Async -//! Traits and types for asynchronous operations on objects that have generic wait and signal events. +//! Traits and types for asynchronous operations on objects that have generic wait and signal +//! events. //! //! For example, a queue might have the following interface presented to the user: //! 1. `async fn send(T)` @@ -44,7 +48,8 @@ //! something happens -- say we send and want to wait if the queue is full, or recv and want to wait //! if the queue is empty, and of course we don't want to busy-wait. The queue can implement //! [AsyncDuplexSetup] so that we can wrap the queue in a [AsyncDuplex] and then use its functions -//! to access the queue's underlying structures in a non-blocking way, automatically sleeping when necessary. +//! to access the queue's underlying structures in a non-blocking way, automatically sleeping when +//! necessary. mod async_source; mod block_on; @@ -58,9 +63,10 @@ mod thread_local; mod throttle; mod timer; -pub use self::block_on::block_on; pub use async_source::{Async, AsyncDuplex, AsyncDuplexSetup, AsyncSetup}; pub use future::{timeout_after, timeout_at, wait_for_first, FlagBlock}; pub use run::run; pub use task::Task; pub use timer::Timer; + +pub use self::block_on::block_on; diff --git a/src/lib/twizzler-async/src/run.rs b/src/lib/twizzler-async/src/run.rs index 6c7a4d04..7e205abd 100644 --- a/src/lib/twizzler-async/src/run.rs +++ b/src/lib/twizzler-async/src/run.rs @@ -15,9 +15,9 @@ pub(crate) fn enter(f: impl FnOnce() -> T) -> T { /// Runs executors. /// /// We run both the thread-local executor and the global executor, and also check for timer events. -/// If we cannot make progress, we call the reactor, which handles waiting and waking up on [crate::Async] -/// and [crate::AsyncDuplex] objects for use in externally signaled events that control non-blocking closures' -/// readiness. +/// If we cannot make progress, we call the reactor, which handles waiting and waking up on +/// [crate::Async] and [crate::AsyncDuplex] objects for use in externally signaled events that +/// control non-blocking closures' readiness. /// /// # Examples /// ```no_run @@ -39,10 +39,10 @@ pub(crate) fn enter(f: impl FnOnce() -> T) -> T { /// twizzler_async::block_on(async { /// twizzler_async::Task::spawn(async { /// println!("Hello from executor thread!"); -/// }).await; +/// }) +/// .await; /// }); /// ``` -/// pub fn run(future: impl Future) -> T { let local = ThreadLocalExecutor::new(); let exec = Executor::get(); diff --git a/src/lib/twizzler-async/src/task.rs b/src/lib/twizzler-async/src/task.rs index 8135f33a..563f80a6 100644 --- a/src/lib/twizzler-async/src/task.rs +++ b/src/lib/twizzler-async/src/task.rs @@ -14,8 +14,9 @@ pub(crate) type Runnable = async_task::Task; /// cancel a task explicitly with the [`cancel()`][Task::cancel()] method. /// /// Tasks that panic are immediately canceled, and awaiting a canceled task causes a panic. If the -/// future panics, the panic will be unwound into the [`run()`][crate::run()] invocation that polled it, but this -/// doesn't apply to the blocking executor, which will simply ignore panics and continue running. +/// future panics, the panic will be unwound into the [`run()`][crate::run()] invocation that polled +/// it, but this doesn't apply to the blocking executor, which will simply ignore panics and +/// continue running. #[must_use = "futures do nothing unless you `.await` or poll them; tasks, specifically, get canceled if you drop them, use `.detach()` to run them in the background"] pub struct Task(pub(crate) Option>); @@ -31,8 +32,8 @@ impl Task { impl Task { /// Spawns a future onto the global executor. /// - /// This future may be stolen and polled by any thread calling [`run()`][crate::run()], and thus the future - /// (and its output) must be Send. + /// This future may be stolen and polled by any thread calling [`run()`][crate::run()], and thus + /// the future (and its output) must be Send. pub fn spawn(future: impl Future + Send + 'static) -> Task { crate::exec::Executor::get().spawn(future) } @@ -61,9 +62,10 @@ impl Task<()> { /// # Examples /// /// ```no_run - /// use twizzler_async::{Task, Timer}; /// use std::time::Duration; /// + /// use twizzler_async::{Task, Timer}; + /// /// # twizzler_async::run(async { /// Task::spawn(async { /// loop { @@ -83,7 +85,8 @@ impl Task { /// Cancels the task and waits for it to stop running. If the task completed before canceling, /// return the task's output, or `None` if it wasn't complete. The advantage of calling /// `cancel()` explicitly over jus dropping the task is that it, one, waits for the task to stop - /// running before returning, and two, it returns the result if the task _did_ successfully complete. + /// running before returning, and two, it returns the result if the task _did_ successfully + /// complete. pub async fn cancel(self) -> Option { let handle = { self }.0.take().unwrap(); handle.cancel(); diff --git a/src/lib/twizzler-async/src/thread_local.rs b/src/lib/twizzler-async/src/thread_local.rs index e73cbb7e..857ce2dc 100644 --- a/src/lib/twizzler-async/src/thread_local.rs +++ b/src/lib/twizzler-async/src/thread_local.rs @@ -1,13 +1,12 @@ use std::{ cell::RefCell, collections::VecDeque, + future::Future, sync::{Arc, Mutex}, + thread::{self, ThreadId}, }; -use std::thread::{self, ThreadId}; - use scoped_tls_hkt::scoped_thread_local; -use std::future::Future; use crate::{ event::FlagEvent, diff --git a/src/lib/twizzler-async/src/throttle.rs b/src/lib/twizzler-async/src/throttle.rs index 77acb81e..5451e9a1 100644 --- a/src/lib/twizzler-async/src/throttle.rs +++ b/src/lib/twizzler-async/src/throttle.rs @@ -1,9 +1,10 @@ -use scoped_tls_hkt::scoped_thread_local; use std::{ cell::Cell, task::{Context, Poll}, }; +use scoped_tls_hkt::scoped_thread_local; + scoped_thread_local! { static BUDGET: Cell } diff --git a/src/lib/twizzler-driver/src/device/mod.rs b/src/lib/twizzler-driver/src/device/mod.rs index 2f7bf64c..cac9a49b 100644 --- a/src/lib/twizzler-driver/src/device/mod.rs +++ b/src/lib/twizzler-driver/src/device/mod.rs @@ -2,14 +2,9 @@ use std::fmt::Display; -pub use twizzler_abi::device::BusType; -pub use twizzler_abi::device::DeviceRepr; -pub use twizzler_abi::device::DeviceType; -use twizzler_abi::kso::KactionError; -use twizzler_abi::kso::KactionValue; -use twizzler_abi::kso::{KactionCmd, KactionFlags, KactionGenericCmd}; -use twizzler_object::Object; -use twizzler_object::{ObjID, ObjectInitError, ObjectInitFlags, Protections}; +pub use twizzler_abi::device::{BusType, DeviceRepr, DeviceType}; +use twizzler_abi::kso::{KactionCmd, KactionError, KactionFlags, KactionGenericCmd, KactionValue}; +use twizzler_object::{ObjID, Object, ObjectInitError, ObjectInitFlags, Protections}; mod children; pub mod events; diff --git a/src/lib/twizzler-driver/src/dma/mod.rs b/src/lib/twizzler-driver/src/dma/mod.rs index e9d8a159..440e2b30 100644 --- a/src/lib/twizzler-driver/src/dma/mod.rs +++ b/src/lib/twizzler-driver/src/dma/mod.rs @@ -10,18 +10,21 @@ mod region; use std::cell::UnsafeCell; -pub use super::arch::DMA_PAGE_SIZE; pub use object::DmaObject; pub use pin::{DmaPin, PinError}; pub use pool::DmaPool; pub use region::{DmaRegion, DmaSliceRegion}; +pub use super::arch::DMA_PAGE_SIZE; + #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Ord, Eq, Hash)] /// Intended access direction for DMA memory. pub enum Access { - /// The memory is used for the host to write and the device to read. Device writes may not be coherent. + /// The memory is used for the host to write and the device to read. Device writes may not be + /// coherent. HostToDevice, - /// The memory is used for the host to read and the device to write. Host writes may not be coherent. + /// The memory is used for the host to read and the device to write. Host writes may not be + /// coherent. DeviceToHost, /// The memory is accessed read/write by both device and host. BiDirectional, diff --git a/src/lib/twizzler-driver/src/dma/object.rs b/src/lib/twizzler-driver/src/dma/object.rs index b8a97824..b237faa1 100644 --- a/src/lib/twizzler-driver/src/dma/object.rs +++ b/src/lib/twizzler-driver/src/dma/object.rs @@ -10,7 +10,8 @@ use twizzler_object::{ObjID, Object}; use super::{Access, DeviceSync, DmaOptions, DmaRegion, DmaSliceRegion}; /// A handle for an object that can be used to perform DMA, and is most useful directly as a way to -/// perform DMA operations on a specific object. For an allocator-like DMA interface, see [crate::dma::DmaPool]. +/// perform DMA operations on a specific object. For an allocator-like DMA interface, see +/// [crate::dma::DmaPool]. pub struct DmaObject { obj: Object<()>, pub(crate) releasable_pins: Mutex>, diff --git a/src/lib/twizzler-driver/src/dma/pool.rs b/src/lib/twizzler-driver/src/dma/pool.rs index ad952a03..88ab5165 100644 --- a/src/lib/twizzler-driver/src/dma/pool.rs +++ b/src/lib/twizzler-driver/src/dma/pool.rs @@ -74,9 +74,8 @@ impl SplitPageRange { #[cfg(test)] pub mod tests_split_page_range { - use crate::dma::pool::compact_range_list; - use super::SplitPageRange; + use crate::dma::pool::compact_range_list; #[test] fn spr_split_multiple() { @@ -310,10 +309,7 @@ impl DmaPool { /// Allocate a new [DmaRegion] from the pool. The region will be initialized with the /// provided initial value. - pub fn allocate<'a, T: DeviceSync>( - &'a self, - init: T, - ) -> Result, AllocationError> { + pub fn allocate<'a, T: DeviceSync>(&'a self, init: T) -> Result, AllocationError> { let len = core::mem::size_of::(); let (ado, range) = self.do_allocate(len)?; let mut reg = DmaRegion::new( @@ -372,9 +368,8 @@ impl DmaPool { #[cfg(test)] mod tests { - use crate::dma::{Access, DmaOptions}; - use super::DmaPool; + use crate::dma::{Access, DmaOptions}; #[test] fn allocate() { diff --git a/src/lib/twizzler-driver/src/dma/region.rs b/src/lib/twizzler-driver/src/dma/region.rs index 5f6e5da2..c200a5be 100644 --- a/src/lib/twizzler-driver/src/dma/region.rs +++ b/src/lib/twizzler-driver/src/dma/region.rs @@ -1,5 +1,4 @@ -use core::marker::PhantomData; -use core::ops::Range; +use core::{marker::PhantomData, ops::Range}; use std::sync::Arc; use twizzler_abi::{ @@ -11,13 +10,12 @@ use twizzler_abi::{ syscall::{sys_kaction, PinnedPage}, }; -use crate::arch::DMA_PAGE_SIZE; - use super::{ pin::{PhysInfo, PinError}, pool::{AllocatableDmaObject, SplitPageRange}, Access, DeviceSync, DmaObject, DmaOptions, DmaPin, SyncMode, }; +use crate::arch::DMA_PAGE_SIZE; /// A region of DMA memory, represented in virtual memory as type `T`, with a particular access mode /// and options. @@ -33,8 +31,8 @@ pub struct DmaRegion { _pd: PhantomData, } -/// A region of DMA memory, represented in virtual memory as type `[T; len]`, with a particular access mode -/// and options. +/// A region of DMA memory, represented in virtual memory as type `[T; len]`, with a particular +/// access mode and options. pub struct DmaSliceRegion { region: DmaRegion, len: usize, @@ -191,7 +189,8 @@ impl<'a, T: DeviceSync> DmaRegion { /// Release any pin created for this region. /// /// # Safety - /// Caller must ensure that no device is using the information from any active pins for this region. + /// Caller must ensure that no device is using the information from any active pins for this + /// region. pub unsafe fn release_pin(&mut self) { if let Some((_, token)) = self.backing { super::object::release_pin(self.dma_object().object().id(), token); @@ -305,7 +304,8 @@ impl<'a, T: DeviceSync> DmaSliceRegion { ret } - // Run a closure that takes a mutable reference to a subslice of the DMA data, ensuring coherence. + // Run a closure that takes a mutable reference to a subslice of the DMA data, ensuring + // coherence. pub fn with_mut(&mut self, range: Range, f: F) -> R where F: FnOnce(&mut [T]) -> R, @@ -338,7 +338,8 @@ impl<'a, T: DeviceSync> DmaSliceRegion { /// Release any pin created for this region. /// /// # Safety - /// Caller must ensure that no device is using the information from any active pins for this region. + /// Caller must ensure that no device is using the information from any active pins for this + /// region. #[inline] pub unsafe fn release_pin(&mut self) { self.region.release_pin() diff --git a/src/lib/twizzler-driver/src/request/inflight.rs b/src/lib/twizzler-driver/src/request/inflight.rs index bb98e27e..74fcc3cf 100644 --- a/src/lib/twizzler-driver/src/request/inflight.rs +++ b/src/lib/twizzler-driver/src/request/inflight.rs @@ -140,8 +140,9 @@ impl InFlight { } #[derive(Debug)] -/// A future for a set of in-flight requests for which we are uninterested in any responses from the device, -/// we only care if the responses were completed successfully or not. On await, returns a [SubmitSummary]. +/// A future for a set of in-flight requests for which we are uninterested in any responses from the +/// device, we only care if the responses were completed successfully or not. On await, returns a +/// [SubmitSummary]. pub struct InFlightFuture { inflight: Arc>, } @@ -176,8 +177,8 @@ impl InFlightFutureWithResponses { } #[derive(Debug)] -/// A future for a set of in-flight requests for which we are interested in all responses from the device. -/// On await, returns a [SubmitSummaryWithResponses]. +/// A future for a set of in-flight requests for which we are interested in all responses from the +/// device. On await, returns a [SubmitSummaryWithResponses]. pub struct InFlightFutureWithResponses { inflight: Arc>, } diff --git a/src/lib/twizzler-driver/src/request/mod.rs b/src/lib/twizzler-driver/src/request/mod.rs index 12a961bf..8829c2ae 100644 --- a/src/lib/twizzler-driver/src/request/mod.rs +++ b/src/lib/twizzler-driver/src/request/mod.rs @@ -7,7 +7,8 @@ //! A user of the requester can call the [Requester::submit] or [Requester::submit_for_response] //! functions to submit a set a requests depending on if the caller wants the responses or just //! wants to know if the requests succeeded. The reason this distinction is maintained is that -//! collecting responses has an overhead. The requester interacts with the driver to submit the requests. +//! collecting responses has an overhead. The requester interacts with the driver to submit the +//! requests. //! //! Internally, the requester assigns IDs to requests for use in communicating with the driver. //! These IDs are not necessarily allocated sequentially and can only be relied upon to be unique @@ -51,10 +52,8 @@ pub trait RequestDriver { // TODO: drop for inflight tracker, so we can remove it to save work? -pub use inflight::InFlightFuture; -pub use inflight::InFlightFutureWithResponses; +pub use inflight::{InFlightFuture, InFlightFutureWithResponses}; pub use requester::Requester; pub use response_info::ResponseInfo; pub use submit::SubmitRequest; -pub use summary::SubmitSummary; -pub use summary::SubmitSummaryWithResponses; +pub use summary::{SubmitSummary, SubmitSummaryWithResponses}; diff --git a/src/lib/twizzler-driver/src/request/requester.rs b/src/lib/twizzler-driver/src/request/requester.rs index 16afc5c6..e6c60191 100644 --- a/src/lib/twizzler-driver/src/request/requester.rs +++ b/src/lib/twizzler-driver/src/request/requester.rs @@ -101,9 +101,9 @@ impl Requester { Ok(()) } - /// Submit a set of requests, for which we are **not** interested in the specific responses from the - /// device. Returns a future that awaits on an [InFlightFuture], so awaiting on this function - /// ensures that all requests are submitted, not necessarily handled. + /// Submit a set of requests, for which we are **not** interested in the specific responses from + /// the device. Returns a future that awaits on an [InFlightFuture], so awaiting on this + /// function ensures that all requests are submitted, not necessarily handled. pub async fn submit( &self, reqs: &mut [SubmitRequest], @@ -118,8 +118,8 @@ impl Requester { } /// Submit a set of requests, for which we **are** interested in the specific responses from the - /// device. Returns a future that awaits on an [InFlightFutureWithResponses], so awaiting on this function - /// ensures that all requests are submitted, not necessarily handled. + /// device. Returns a future that awaits on an [InFlightFutureWithResponses], so awaiting on + /// this function ensures that all requests are submitted, not necessarily handled. pub async fn submit_for_response( &self, reqs: &mut [SubmitRequest], diff --git a/src/lib/twizzler-net/src/lib.rs b/src/lib/twizzler-net/src/lib.rs index 356ac4d7..c17e7917 100644 --- a/src/lib/twizzler-net/src/lib.rs +++ b/src/lib/twizzler-net/src/lib.rs @@ -1,5 +1,7 @@ use std::sync::atomic::{AtomicU64, Ordering}; +#[cfg(feature = "manager")] +use twizzler_abi::syscall::{BackingType, LifetimeType, ObjectCreate, ObjectCreateFlags}; use twizzler_abi::{ marker::BaseType, syscall::{ @@ -9,23 +11,18 @@ use twizzler_abi::{ }; use twizzler_object::{ObjID, Object, ObjectInitFlags, Protections}; -#[cfg(feature = "manager")] -use twizzler_abi::syscall::{BackingType, LifetimeType, ObjectCreate, ObjectCreateFlags}; - pub mod addr; pub mod buffer; mod nm_handle; mod req; mod rx_req; mod tx_req; -pub use req::{CloseInfo, ConnectionId, PacketData}; -pub use rx_req::{Connection, RxCompletion, RxRequest}; -pub use tx_req::{ListenFlags, ListenInfo, TxCompletion, TxCompletionError, TxRequest}; - pub use nm_handle::{open_nm_handle, NmHandle}; - #[cfg(feature = "manager")] pub use nm_handle::{server_open_nm_handle, NmHandleManager}; +pub use req::{CloseInfo, ConnectionId, PacketData}; +pub use rx_req::{Connection, RxCompletion, RxRequest}; +pub use tx_req::{ListenFlags, ListenInfo, TxCompletion, TxCompletionError, TxRequest}; struct Rendezvous { ready: AtomicU64, diff --git a/src/lib/twizzler-net/src/nm_handle.rs b/src/lib/twizzler-net/src/nm_handle.rs index 5ecfd6ae..0688d7ce 100644 --- a/src/lib/twizzler-net/src/nm_handle.rs +++ b/src/lib/twizzler-net/src/nm_handle.rs @@ -9,6 +9,8 @@ use std::{ use twizzler_object::{ObjID, Object, ObjectInitFlags, Protections}; use twizzler_queue::{CallbackQueueReceiver, QueueBase, QueueError, QueueSender, SubmissionFlags}; +#[cfg(feature = "manager")] +use crate::server_rendezvous; use crate::{ buffer::{BufferBase, BufferController, ManagedBuffer}, client_rendezvous, @@ -17,9 +19,6 @@ use crate::{ tx_req::{TxCompletion, TxRequest}, }; -#[cfg(feature = "manager")] -use crate::server_rendezvous; - struct NmHandleObjects { tx_queue: Object>, rx_queue: Object>, diff --git a/src/lib/twizzler-object/src/init.rs b/src/lib/twizzler-object/src/init.rs index 4f560fe2..beb79585 100644 --- a/src/lib/twizzler-object/src/init.rs +++ b/src/lib/twizzler-object/src/init.rs @@ -1,9 +1,9 @@ use std::marker::PhantomData; +pub use twizzler_abi::object::Protections; use twizzler_abi::{object::ObjID, syscall::ObjectMapError}; use crate::object::Object; -pub use twizzler_abi::object::Protections; bitflags::bitflags! { /// Flags to pass to object initialization routines. diff --git a/src/lib/twizzler-queue-raw/src/lib.rs b/src/lib/twizzler-queue-raw/src/lib.rs index bc19a90b..c2565d68 100644 --- a/src/lib/twizzler-queue-raw/src/lib.rs +++ b/src/lib/twizzler-queue-raw/src/lib.rs @@ -4,7 +4,8 @@ //! interacts with the object system. //! //! This library exists to provide an underlying implementation of the concurrent data structure for -//! each individual raw queue so that this complex code can be reused in both userspace and the kernel. +//! each individual raw queue so that this complex code can be reused in both userspace and the +//! kernel. //! //! The basic design of a raw queue is two parts: //! @@ -85,7 +86,8 @@ use twizzler_abi::marker::BaseType; #[repr(C)] /// A queue entry. All queues must be formed of these, as the queue algorithm uses data inside this /// struct as part of its operation. The cmd_slot is used internally to track turn, and the info is -/// used by the full queue structure to manage completion. The data T is user data passed around the queue. +/// used by the full queue structure to manage completion. The data T is user data passed around the +/// queue. pub struct QueueEntry { cmd_slot: u32, info: u32, @@ -153,7 +155,8 @@ impl BaseType for QueueBase { } #[repr(C)] -/// A raw queue header. This contains all the necessary counters and info to run the queue algorithm. +/// A raw queue header. This contains all the necessary counters and info to run the queue +/// algorithm. pub struct RawQueueHdr { l2len: usize, stride: usize, @@ -513,10 +516,10 @@ unsafe impl Sync for RawQueue {} /// Wait for receiving on multiple raw queues. If any of the passed raw queues can return data, they /// will do so by writing it into the output array at the same index that they are in the `queues` /// variable. The queues and output arrays must be the same length. If no data is available in any -/// queues, then the function will call back on multi_wait, which it expects to wait until **any** of -/// the pairs (&x, y) meet the condition that *x != y. Before returning any data, the function will -/// callback on multi_ring, to inform multiple queues that data was taken from them. It expects the -/// multi_ring function to wake up any waiting threads on the supplied words of memory. +/// queues, then the function will call back on multi_wait, which it expects to wait until **any** +/// of the pairs (&x, y) meet the condition that *x != y. Before returning any data, the function +/// will callback on multi_ring, to inform multiple queues that data was taken from them. It expects +/// the multi_ring function to wake up any waiting threads on the supplied words of memory. /// /// Note that both call backs specify the pointers as Option. In the case that an entry is None, /// there was no requested wait or wake operation for that queue, and that entry should be ignored. @@ -533,7 +536,6 @@ unsafe impl Sync for RawQueue {} /// /// The complexity of the multi_wait and multi_ring callbacks is present to avoid calling into the /// kernel often for high-contention queues. -/// pub fn multi_receive, u64)]), R: Fn(&[Option<&AtomicU64>])>( queues: &[&RawQueue], output: &mut [Option>], @@ -584,10 +586,8 @@ mod tests { use std::sync::atomic::{AtomicU64, Ordering}; // use syscalls::SyscallArgs; - use crate::multi_receive; - use crate::QueueError; - use crate::{QueueEntry, RawQueue, RawQueueHdr, ReceiveFlags, SubmissionFlags}; + use crate::{QueueEntry, QueueError, RawQueue, RawQueueHdr, ReceiveFlags, SubmissionFlags}; fn wait(x: &AtomicU64, v: u64) { // println!("wait"); diff --git a/src/lib/twizzler-queue/src/lib.rs b/src/lib/twizzler-queue/src/lib.rs index 44542b38..e612f312 100644 --- a/src/lib/twizzler-queue/src/lib.rs +++ b/src/lib/twizzler-queue/src/lib.rs @@ -1,4 +1,5 @@ -//! Provides a duplex send/completion queue, where each direction is multiple-producer/single-consumer. +//! Provides a duplex send/completion queue, where each direction is +//! multiple-producer/single-consumer. //! //! The core queue abstraction is built around two subqueues, each providing an MPSC //! interface. These subqueues are stored in a single object, and so the verbs to interact with the diff --git a/src/lib/twizzler-queue/src/queue.rs b/src/lib/twizzler-queue/src/queue.rs index 1d9d2fee..e1210389 100644 --- a/src/lib/twizzler-queue/src/queue.rs +++ b/src/lib/twizzler-queue/src/queue.rs @@ -1,18 +1,15 @@ use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; -use twizzler_abi::object::NULLPAGE_SIZE; -use twizzler_abi::syscall::{ - sys_thread_sync, ThreadSync, ThreadSyncFlags, ThreadSyncOp, ThreadSyncReference, - ThreadSyncSleep, ThreadSyncWake, +use twizzler_abi::{ + object::NULLPAGE_SIZE, + syscall::{ + sys_thread_sync, ThreadSync, ThreadSyncFlags, ThreadSyncOp, ThreadSyncReference, + ThreadSyncSleep, ThreadSyncWake, + }, }; use twizzler_object::{CreateError, CreateSpec, Object}; -use twizzler_queue_raw::RawQueue; -use twizzler_queue_raw::{QueueEntry, RawQueueHdr}; - -pub use twizzler_queue_raw::QueueBase; -pub use twizzler_queue_raw::QueueError; -pub use twizzler_queue_raw::ReceiveFlags; -pub use twizzler_queue_raw::SubmissionFlags; +pub use twizzler_queue_raw::{QueueBase, QueueError, ReceiveFlags, SubmissionFlags}; +use twizzler_queue_raw::{QueueEntry, RawQueue, RawQueueHdr}; /// A single queue, holding two subqueues (sending and completion). Objects of type S are sent /// across the sending queue, and completions of type C are sent back. diff --git a/src/lib/twizzler-runtime-api/src/lib.rs b/src/lib/twizzler-runtime-api/src/lib.rs index 00855ddd..e6356c09 100644 --- a/src/lib/twizzler-runtime-api/src/lib.rs +++ b/src/lib/twizzler-runtime-api/src/lib.rs @@ -1,16 +1,18 @@ -//! The Twizzler Runtime API is the core interface definition for Twizzler programs, including startup, execution, and libstd support. -//! It defines a set of traits that, when all implemented, form the full interface that Rust's libstd expects from a Twizzler runtime. +//! The Twizzler Runtime API is the core interface definition for Twizzler programs, including +//! startup, execution, and libstd support. It defines a set of traits that, when all implemented, +//! form the full interface that Rust's libstd expects from a Twizzler runtime. //! -//! From a high level, a Twizzler program links against Rust's libstd and a particular runtime that will support libstd. That runtime -//! must implement the minimum set of interfaces required by the [Runtime] trait. Libstd then invokes the runtime functions when needed -//! (e.g. allocating memory, exiting a thread, etc.). Other libraries may invoke runtime functions directly as well (bypassing libstd), -//! but note that doing so may not play nicely with libstd's view of the world. +//! From a high level, a Twizzler program links against Rust's libstd and a particular runtime that +//! will support libstd. That runtime must implement the minimum set of interfaces required by the +//! [Runtime] trait. Libstd then invokes the runtime functions when needed (e.g. allocating memory, +//! exiting a thread, etc.). Other libraries may invoke runtime functions directly as well +//! (bypassing libstd), but note that doing so may not play nicely with libstd's view of the world. //! //! # What does it look like to use the runtime? //! -//! When a program (including libstd) wishes to use the runtime, it invokes this library's [get_runtime] function, which will return -//! a reference (a &'static dyn reference) to a type that implements the Runtime trait. From there, runtime functions can be called: -//! ``` +//! When a program (including libstd) wishes to use the runtime, it invokes this library's +//! [get_runtime] function, which will return a reference (a &'static dyn reference) to a type that +//! implements the Runtime trait. From there, runtime functions can be called: ``` //! let runtime = get_runtime(); //! runtime.get_monotonic() //! ``` @@ -18,18 +20,20 @@ //! //! # So who is providing that type that implements [Runtime]? //! -//! Another library! Right now, Twizzler defines two runtimes: a "minimal" runtime, and a "reference" runtime. Those are not implemented -//! in this crate. The minimal runtime is implemented as part of the twizzler-abi crate, as it's the most "baremetal" runtime. The -//! reference runtime is implemented as a standalone set of crates. Of course, other runtimes can be implemented, as long as they implement -//! the required interface in this crate, libstd will work. +//! Another library! Right now, Twizzler defines two runtimes: a "minimal" runtime, and a +//! "reference" runtime. Those are not implemented in this crate. The minimal runtime is implemented +//! as part of the twizzler-abi crate, as it's the most "baremetal" runtime. The reference runtime +//! is implemented as a standalone set of crates. Of course, other runtimes can be implemented, as +//! long as they implement the required interface in this crate, libstd will work. //! //! ## Okay but how does get_runtime work? //! -//! Well, [get_runtime] is just a wrapper around calling an extern "C" function, [__twz_get_runtime]. This symbol is external, so not -//! defined in this crate. A crate that implements [Runtime] then defines [__twz_get_runtime], allowing link-time swapping of runtimes. -//! The twizzler-abi crate defines this symbol with (weak linkage)[https://en.wikipedia.org/wiki/Weak_symbol], causing it to be linked -//! only if another (strong) definition is not present. Thus, a program can link to a specific runtime, but it can also be loaded by a -//! dynamic linker and have its runtime selected at load time. +//! Well, [get_runtime] is just a wrapper around calling an extern "C" function, +//! [__twz_get_runtime]. This symbol is external, so not defined in this crate. A crate that +//! implements [Runtime] then defines [__twz_get_runtime], allowing link-time swapping of runtimes. The twizzler-abi crate defines this symbol with (weak linkage)[https://en.wikipedia.org/wiki/Weak_symbol], causing it to be linked +//! only if another (strong) definition is not present. Thus, a program can link to a specific +//! runtime, but it can also be loaded by a dynamic linker and have its runtime selected at load +//! time. #![no_std] #![feature(unboxed_closures)] @@ -227,21 +231,23 @@ pub trait ThreadRuntime { /// Wait for the specified thread to terminate, or optionally time out. fn join(&self, id: u32, timeout: Option) -> Result<(), JoinError>; - /// Implements the __tls_get_addr functionality. If the runtime feature is enabled, this crate defines the - /// extern "C" function __tls_get_addr as a wrapper around calling this function after getting the runtime from [get_runtime]. - /// If the provided index is invalid, return None. + /// Implements the __tls_get_addr functionality. If the runtime feature is enabled, this crate + /// defines the extern "C" function __tls_get_addr as a wrapper around calling this function + /// after getting the runtime from [get_runtime]. If the provided index is invalid, return + /// None. fn tls_get_addr(&self, tls_index: &TlsIndex) -> Option<*const u8>; } /// All the object related runtime functions. pub trait ObjectRuntime { - /// Map an object to an [ObjectHandle]. The handle may reference the same internal mapping as other calls to this function. + /// Map an object to an [ObjectHandle]. The handle may reference the same internal mapping as + /// other calls to this function. fn map_object(&self, id: ObjID, flags: MapFlags) -> Result; /// Called on drop of an object handle. fn release_handle(&self, handle: &mut ObjectHandle); - /// Map two objects in sequence, useful for executable loading. The default implementation makes no guarantees about - /// ordering. + /// Map two objects in sequence, useful for executable loading. The default implementation makes + /// no guarantees about ordering. fn map_two_objects( &self, in_id_a: ObjID, @@ -410,8 +416,8 @@ impl Clone for ObjectHandle { let rc = unsafe { self.internal_refs.as_ref() }; // This use of Relaxed ordering is justified by https://doc.rust-lang.org/nomicon/arc-mutex/arc-clone.html. let old_count = rc.count.fetch_add(1, Ordering::Relaxed); - // The above link also justifies the following behavior. If our count gets this high, we have probably - // run into a problem somewhere. + // The above link also justifies the following behavior. If our count gets this high, we + // have probably run into a problem somewhere. if old_count >= isize::MAX as usize { get_runtime().abort(); } @@ -451,14 +457,16 @@ pub trait CoreRuntime { /// Called by libstd after returning from main. fn post_main_hook(&self) {} - /// Exit the calling thread. This is allowed to cause a full exit of the entire program and all threads. + /// Exit the calling thread. This is allowed to cause a full exit of the entire program and all + /// threads. fn exit(&self, code: i32) -> !; /// Thread abort. This is allowed to cause a full exit of the entire program and all threads. fn abort(&self) -> !; - /// Called by rt0 code to start the runtime. Once the runtime has initialized, it should call the provided entry function. The pointer - /// arg is a pointer to an array of [AuxEntry] that terminates with an [AuxEntry::Null]. + /// Called by rt0 code to start the runtime. Once the runtime has initialized, it should call + /// the provided entry function. The pointer arg is a pointer to an array of [AuxEntry] that + /// terminates with an [AuxEntry::Null]. fn runtime_entry( &self, arg: *const AuxEntry, @@ -641,8 +649,8 @@ impl Library { /// Internal library ID type. pub struct LibraryId(pub usize); -/// The runtime must ensure that the addresses are constant for the whole life of the library type, and that all threads -/// may see the type. +/// The runtime must ensure that the addresses are constant for the whole life of the library type, +/// and that all threads may see the type. unsafe impl Send for Library {} #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] @@ -669,7 +677,8 @@ pub trait DebugRuntime { fn get_library(&self, id: LibraryId) -> Option; /// Returns the ID of the main executable, if there is one. fn get_exeid(&self) -> Option; - /// Get a segment of a library, if the segment index exists. All segment IDs are indexes, so they range from [0, N). + /// Get a segment of a library, if the segment index exists. All segment IDs are indexes, so + /// they range from [0, N). fn get_library_segment(&self, lib: &Library, seg: usize) -> Option; /// Get the full mapping of the underlying library. fn get_full_mapping(&self, lib: &Library) -> Option; @@ -706,8 +715,8 @@ pub mod __imp { } } -/// Public definition of __tls_get_addr, a function that gets automatically called by the compiler when needed for TLS -/// pointer resolution. +/// Public definition of __tls_get_addr, a function that gets automatically called by the compiler +/// when needed for TLS pointer resolution. #[cfg(feature = "rustc-dep-of-std")] #[no_mangle] pub unsafe extern "C" fn __tls_get_addr(arg: usize) -> *const u8 { @@ -721,7 +730,8 @@ pub unsafe extern "C" fn __tls_get_addr(arg: usize) -> *const u8 { .expect("index passed to __tls_get_addr is invalid") } -/// Public definition of dl_iterate_phdr, used by libunwind for learning where loaded objects (executables, libraries, ...) are. +/// Public definition of dl_iterate_phdr, used by libunwind for learning where loaded objects +/// (executables, libraries, ...) are. #[cfg(feature = "rustc-dep-of-std")] #[no_mangle] pub unsafe extern "C" fn dl_iterate_phdr( diff --git a/src/lib/twizzler-runtime-api/src/rt0.rs b/src/lib/twizzler-runtime-api/src/rt0.rs index 28e55184..cd85eece 100644 --- a/src/lib/twizzler-runtime-api/src/rt0.rs +++ b/src/lib/twizzler-runtime-api/src/rt0.rs @@ -1,4 +1,5 @@ -//! rt0 defines a collection of functions that the basic Rust ABI expects to be defined by some part of the C runtime: +//! rt0 defines a collection of functions that the basic Rust ABI expects to be defined by some part +//! of the C runtime: //! //! - __tls_get_addr for handling non-local TLS regions. //! - _start, the entry point of an executable (per-arch, as this is assembly code). diff --git a/src/runtime/dynlink/src/arch/aarch64.rs b/src/runtime/dynlink/src/arch/aarch64.rs index 650935d6..3532994a 100644 --- a/src/runtime/dynlink/src/arch/aarch64.rs +++ b/src/runtime/dynlink/src/arch/aarch64.rs @@ -2,14 +2,12 @@ use crate::tls::{Tcb, TlsRegion}; pub(crate) const MINIMUM_TLS_ALIGNMENT: usize = 32; -pub use elf::abi::R_AARCH64_ABS64 as REL_SYMBOLIC; -pub use elf::abi::R_AARCH64_COPY as REL_COPY; -pub use elf::abi::R_AARCH64_GLOB_DAT as REL_GOT; -pub use elf::abi::R_AARCH64_JUMP_SLOT as REL_PLT; -pub use elf::abi::R_AARCH64_RELATIVE as REL_RELATIVE; -pub use elf::abi::R_AARCH64_TLS_DTPMOD as REL_DTPMOD; -pub use elf::abi::R_AARCH64_TLS_DTPREL as REL_DTPOFF; -pub use elf::abi::R_AARCH64_TLS_TPREL as REL_TPOFF; +pub use elf::abi::{ + R_AARCH64_ABS64 as REL_SYMBOLIC, R_AARCH64_COPY as REL_COPY, R_AARCH64_GLOB_DAT as REL_GOT, + R_AARCH64_JUMP_SLOT as REL_PLT, R_AARCH64_RELATIVE as REL_RELATIVE, + R_AARCH64_TLS_DTPMOD as REL_DTPMOD, R_AARCH64_TLS_DTPREL as REL_DTPOFF, + R_AARCH64_TLS_TPREL as REL_TPOFF, +}; /// Get a pointer to the current thread control block, using the thread pointer. /// @@ -29,4 +27,4 @@ impl TlsRegion { pub unsafe fn get_thread_control_block(&self) -> *mut Tcb { todo!() } -} \ No newline at end of file +} diff --git a/src/runtime/dynlink/src/arch/x86_64.rs b/src/runtime/dynlink/src/arch/x86_64.rs index f7471d44..667bf99c 100644 --- a/src/runtime/dynlink/src/arch/x86_64.rs +++ b/src/runtime/dynlink/src/arch/x86_64.rs @@ -1,16 +1,12 @@ -use crate::tls::Tcb; -use crate::tls::TlsRegion; +use crate::tls::{Tcb, TlsRegion}; pub(crate) const MINIMUM_TLS_ALIGNMENT: usize = 32; -pub use elf::abi::R_X86_64_64 as REL_SYMBOLIC; -pub use elf::abi::R_X86_64_COPY as REL_COPY; -pub use elf::abi::R_X86_64_DTPMOD64 as REL_DTPMOD; -pub use elf::abi::R_X86_64_DTPOFF64 as REL_DTPOFF; -pub use elf::abi::R_X86_64_GLOB_DAT as REL_GOT; -pub use elf::abi::R_X86_64_JUMP_SLOT as REL_PLT; -pub use elf::abi::R_X86_64_RELATIVE as REL_RELATIVE; -pub use elf::abi::R_X86_64_TPOFF64 as REL_TPOFF; +pub use elf::abi::{ + R_X86_64_64 as REL_SYMBOLIC, R_X86_64_COPY as REL_COPY, R_X86_64_DTPMOD64 as REL_DTPMOD, + R_X86_64_DTPOFF64 as REL_DTPOFF, R_X86_64_GLOB_DAT as REL_GOT, R_X86_64_JUMP_SLOT as REL_PLT, + R_X86_64_RELATIVE as REL_RELATIVE, R_X86_64_TPOFF64 as REL_TPOFF, +}; /// Get a pointer to the current thread control block, using the thread pointer. /// diff --git a/src/runtime/dynlink/src/compartment.rs b/src/runtime/dynlink/src/compartment.rs index 720c4b40..e5073cf0 100644 --- a/src/runtime/dynlink/src/compartment.rs +++ b/src/runtime/dynlink/src/compartment.rs @@ -1,11 +1,11 @@ //! Compartments are an abstraction for isolation of library components, but they are not done yet. -use petgraph::stable_graph::NodeIndex; use std::{ collections::HashMap, fmt::{Debug, Display}, }; +use petgraph::stable_graph::NodeIndex; use talc::{ErrOnOom, Talc}; use crate::{library::BackingData, tls::TlsInfo}; diff --git a/src/runtime/dynlink/src/compartment/tls.rs b/src/runtime/dynlink/src/compartment/tls.rs index 0ca07fc1..4a947986 100644 --- a/src/runtime/dynlink/src/compartment/tls.rs +++ b/src/runtime/dynlink/src/compartment/tls.rs @@ -2,14 +2,13 @@ use std::{alloc::Layout, ptr::NonNull}; use tracing::{debug, trace}; +use super::Compartment; use crate::{ library::BackingData, tls::{TlsInfo, TlsModId, TlsModule, TlsRegion}, DynlinkError, DynlinkErrorKind, }; -use super::Compartment; - impl Compartment { pub(crate) fn insert(&mut self, tm: TlsModule) -> TlsModId { let prev_gen = self.tls_gen; diff --git a/src/runtime/dynlink/src/context.rs b/src/runtime/dynlink/src/context.rs index 0500ef68..3a6b6297 100644 --- a/src/runtime/dynlink/src/context.rs +++ b/src/runtime/dynlink/src/context.rs @@ -1,22 +1,16 @@ //! Management of global context. -use std::collections::HashMap; -use std::fmt::Display; +use std::{collections::HashMap, fmt::Display}; -use petgraph::stable_graph::NodeIndex; -use petgraph::stable_graph::StableDiGraph; +use petgraph::stable_graph::{NodeIndex, StableDiGraph}; -use crate::compartment::CompartmentId; -use crate::library::LibraryId; -use crate::DynlinkErrorKind; +use self::engine::ContextEngine; use crate::{ - compartment::Compartment, - library::{BackingData, Library, UnloadedLibrary}, - DynlinkError, + compartment::{Compartment, CompartmentId}, + library::{BackingData, Library, LibraryId, UnloadedLibrary}, + DynlinkError, DynlinkErrorKind, }; -use self::engine::ContextEngine; - mod deps; pub mod engine; mod load; @@ -35,12 +29,14 @@ pub struct Context { compartments: Vec>, // This is the primary list of libraries, all libraries have an entry here, and they are - // placed here independent of compartment. Edges denote dependency relationships, and may also cross compartments. + // placed here independent of compartment. Edges denote dependency relationships, and may also + // cross compartments. pub(crate) library_deps: StableDiGraph, ()>, } // Libraries in the dependency graph are placed there before loading, so that they can participate -// in dependency search. So we need to track both kinds of libraries that may be at a given index in the graph. +// in dependency search. So we need to track both kinds of libraries that may be at a given index in +// the graph. pub enum LoadedOrUnloaded { Unloaded(UnloadedLibrary), Loaded(Library), @@ -171,7 +167,8 @@ impl Context { rets } - /// Traverse the library graph with DFS postorder, calling the callback for each library (mutable ref). + /// Traverse the library graph with DFS postorder, calling the callback for each library + /// (mutable ref). pub fn with_dfs_postorder_mut( &mut self, root_id: LibraryId, diff --git a/src/runtime/dynlink/src/context/deps.rs b/src/runtime/dynlink/src/context/deps.rs index 0e739956..33ec72a2 100644 --- a/src/runtime/dynlink/src/context/deps.rs +++ b/src/runtime/dynlink/src/context/deps.rs @@ -1,14 +1,12 @@ +use elf::abi::DT_NEEDED; use tracing::trace; +use super::{engine::ContextEngine, Context}; use crate::{ library::{Library, UnloadedLibrary}, DynlinkError, DynlinkErrorKind, }; -use elf::abi::DT_NEEDED; - -use super::{engine::ContextEngine, Context}; - impl Context { /// Get a list of dependencies for this library. pub(crate) fn enumerate_needed( diff --git a/src/runtime/dynlink/src/context/load.rs b/src/runtime/dynlink/src/context/load.rs index 3bb87255..c6999eac 100644 --- a/src/runtime/dynlink/src/context/load.rs +++ b/src/runtime/dynlink/src/context/load.rs @@ -9,6 +9,7 @@ use petgraph::stable_graph::NodeIndex; use secgate::RawSecGateInfo; use tracing::{debug, trace, warn}; +use super::{engine::ContextEngine, Context, LoadedOrUnloaded}; use crate::{ compartment::{Compartment, CompartmentId}, context::engine::{LoadDirective, LoadFlags}, @@ -17,8 +18,6 @@ use crate::{ DynlinkError, DynlinkErrorKind, HeaderError, }; -use super::{engine::ContextEngine, Context, LoadedOrUnloaded}; - impl Context { pub(crate) fn get_secgate_info( &self, @@ -54,7 +53,8 @@ impl Context { name: "dynamic".to_string(), })?; - // If this isn't present, just call it 0, since if there's an init_array, this entry must be present in valid ELF files. + // If this isn't present, just call it 0, since if there's an init_array, this entry must be + // present in valid ELF files. let init_array_len = dynamic .iter() .find_map(|d| { @@ -104,7 +104,8 @@ impl Context { }) } - // Load (map) a single library into memory via creating two objects, one for text, and one for data. + // Load (map) a single library into memory via creating two objects, one for text, and one for + // data. fn load( &mut self, comp_id: CompartmentId, @@ -115,8 +116,9 @@ impl Context { where Namer: FnMut(&str) -> Option, { - // At this point, all we know is a name. Ask the system implementation to use the name resolver to get a backing object from the name, - // and then map it for access (this will be the full ELF file data). + // At this point, all we know is a name. Ask the system implementation to use the name + // resolver to get a backing object from the name, and then map it for access (this + // will be the full ELF file data). let backing = self.engine.load_object(&unlib, namer)?; let elf = backing.get_elf()?; @@ -182,7 +184,8 @@ impl Context { .into()); } - // Step 1: map the PT_LOAD directives to copy-from commands Twizzler can use for creating objects. + // Step 1: map the PT_LOAD directives to copy-from commands Twizzler can use for creating + // objects. let directives: Vec<_> = elf .segments() .ok_or_else(|| DynlinkErrorKind::MissingSection { @@ -329,9 +332,12 @@ impl Context { .map(|dep_unlib| { // Dependency search + load alg: // 1. Search library name in current compartment. If found, use that. - // 2. Fallback to searching globally for the name, by checking compartment by compartment. If found, use that. - // 3. Okay, now we know we need to load the dep, so check if it can go in the current compartment. If not, create a new compartment. - // 4. Finally, recurse to load it and its dependencies into either the current compartment or the new one, if created. + // 2. Fallback to searching globally for the name, by checking compartment by + // compartment. If found, use that. + // 3. Okay, now we know we need to load the dep, so check if it can go in the + // current compartment. If not, create a new compartment. + // 4. Finally, recurse to load it and its dependencies into either the current + // compartment or the new one, if created. let comp = self.get_compartment(comp_id)?; let (existing_idx, load_comp) = @@ -358,7 +364,8 @@ impl Context { ) }; - // If we decided to use an existing library, then use that. Otherwise, load into the chosen compartment. + // If we decided to use an existing library, then use that. Otherwise, load into the + // chosen compartment. let idx = if let Some(existing_idx) = existing_idx { existing_idx } else { @@ -393,8 +400,9 @@ impl Context { Ok(idx) } - /// Load a library into a given compartment. The namer callback resolves names to Backing objects, allowing - /// the caller to hook into the "name-of-dependency" -> backing object pipeline. + /// Load a library into a given compartment. The namer callback resolves names to Backing + /// objects, allowing the caller to hook into the "name-of-dependency" -> backing object + /// pipeline. pub fn load_library_in_compartment( &mut self, comp_id: CompartmentId, diff --git a/src/runtime/dynlink/src/context/relocate.rs b/src/runtime/dynlink/src/context/relocate.rs index 58e1a856..22f6f399 100644 --- a/src/runtime/dynlink/src/context/relocate.rs +++ b/src/runtime/dynlink/src/context/relocate.rs @@ -13,18 +13,14 @@ use elf::{ }; use tracing::{debug, error, trace}; +use super::{engine::ContextEngine, Context, Library}; use crate::{ + arch::{REL_DTPMOD, REL_DTPOFF, REL_GOT, REL_PLT, REL_RELATIVE, REL_SYMBOLIC, REL_TPOFF}, library::{LibraryId, RelocState}, symbol::LookupFlags, DynlinkError, DynlinkErrorKind, }; -use crate::arch::{ - REL_DTPMOD, REL_DTPOFF, REL_GOT, REL_PLT, REL_RELATIVE, REL_SYMBOLIC, REL_TPOFF, -}; - -use super::{engine::ContextEngine, Context, Library}; - // A relocation is either a REL type or a RELA type. The only difference is that // the RELA type contains an addend (used in the reloc calculations below). #[derive(Debug)] @@ -207,8 +203,8 @@ impl Context { name, sz / ent ); - // Try to parse the table as REL or RELA, according to ent size. If get_parsing_iter succeeds for a given - // relocation type, that's the correct one. + // Try to parse the table as REL or RELA, according to ent size. If get_parsing_iter + // succeeds for a given relocation type, that's the correct one. if let Some(rels) = self.get_parsing_iter(start, ent, sz) { DynlinkError::collect( DynlinkErrorKind::RelocationSectionFail { @@ -259,7 +255,8 @@ impl Context { // Helper to lookup a single value in the dynamic table. let find_dyn_value = |tag| dynamic.iter().find(|d| d.d_tag == tag).map(|d| d.d_val()); - // Many of the relocation tables are described in a similar way -- start, entry size, and table size (in bytes). + // Many of the relocation tables are described in a similar way -- start, entry size, and + // table size (in bytes). let find_dyn_rels = |tag, ent, sz| { let rel = find_dyn_entry(tag); let relent = find_dyn_value(ent); @@ -328,7 +325,8 @@ impl Context { )?; } - // This one is a little special in that instead of an entry size, we are given a relocation type. + // This one is a little special in that instead of an entry size, we are given a relocation + // type. if let Some((rel, kind, sz)) = jmprels { let ent = match kind as i64 { DT_REL => 2, // 2 usize long, according to ELF @@ -366,9 +364,10 @@ impl Context { } } - // We do this recursively instead of using a traversal, since we want to be able to prune nodes that - // we know we no longer need to relocate. But since the reloc state gets set at the end (so we can do this pruning), - // we'll need to track the visit states. In the end, this is depth-first postorder. + // We do this recursively instead of using a traversal, since we want to be able to prune + // nodes that we know we no longer need to relocate. But since the reloc state gets + // set at the end (so we can do this pruning), we'll need to track the visit states. + // In the end, this is depth-first postorder. let deps = self .library_deps .neighbors_directed(root_id.0, petgraph::Direction::Outgoing) @@ -402,7 +401,8 @@ impl Context { res } - /// Iterate through all libraries and process relocations for any libraries that haven't yet been relocated. + /// Iterate through all libraries and process relocations for any libraries that haven't yet + /// been relocated. pub fn relocate_all(&mut self, root_id: LibraryId) -> Result<(), DynlinkError> { let name = self.get_library(root_id)?.name.to_string(); self.relocate_recursive(root_id).map_err(|e| { diff --git a/src/runtime/dynlink/src/context/runtime.rs b/src/runtime/dynlink/src/context/runtime.rs index 0b79055b..064c4865 100644 --- a/src/runtime/dynlink/src/context/runtime.rs +++ b/src/runtime/dynlink/src/context/runtime.rs @@ -2,14 +2,13 @@ use std::alloc::Layout; use twizzler_abi::object::MAX_SIZE; +use super::{engine::ContextEngine, Context, LoadedOrUnloaded}; use crate::{ library::{CtorInfo, LibraryId}, tls::TlsRegion, DynlinkError, }; -use super::{engine::ContextEngine, Context, LoadedOrUnloaded}; - #[repr(C)] pub struct RuntimeInitInfo { pub tls_region: TlsRegion, diff --git a/src/runtime/dynlink/src/context/syms.rs b/src/runtime/dynlink/src/context/syms.rs index 9c77161c..160eb2cf 100644 --- a/src/runtime/dynlink/src/context/syms.rs +++ b/src/runtime/dynlink/src/context/syms.rs @@ -1,16 +1,16 @@ use tracing::trace; +use super::{engine::ContextEngine, Context, LoadedOrUnloaded}; use crate::{ library::{Library, LibraryId}, symbol::{LookupFlags, RelocatedSymbol}, DynlinkError, DynlinkErrorKind, }; -use super::{engine::ContextEngine, Context, LoadedOrUnloaded}; - impl Context { - /// Search for a symbol, starting from library denoted by start_id. For normal symbol lookup, this should be the - /// ID of the library that needs a symbol looked up. Flags can be specified which allow control over where to look for the symbol. + /// Search for a symbol, starting from library denoted by start_id. For normal symbol lookup, + /// this should be the ID of the library that needs a symbol looked up. Flags can be + /// specified which allow control over where to look for the symbol. pub fn lookup_symbol<'a>( &'a self, start_id: LibraryId, diff --git a/src/runtime/dynlink/src/lib.rs b/src/runtime/dynlink/src/lib.rs index 301e2d9a..edacc9d9 100644 --- a/src/runtime/dynlink/src/lib.rs +++ b/src/runtime/dynlink/src/lib.rs @@ -6,14 +6,14 @@ //! 3. Manage TLS regions //! //! On the surface, this isn't too bad. But it's mired in a long history, compatibility, deep -//! magic for performance, and a lack of good, easy to understand "official" documentation. However, we -//! will, in this crate, try to be as clear and forthcoming with what we are doing and why. +//! magic for performance, and a lack of good, easy to understand "official" documentation. However, +//! we will, in this crate, try to be as clear and forthcoming with what we are doing and why. //! //! # Basic Dynamic Linking Concepts //! *What is a dynamic shared object (DSO)?* -//! Practically speaking (and for our purposes), it's an ELF file that has been prepared in such a way that we can load it -//! into memory, fix it up a bit based on where we loaded it (the file is relocatable), and then call code within it. The -//! overall process looks like this: +//! Practically speaking (and for our purposes), it's an ELF file that has been prepared in such a +//! way that we can load it into memory, fix it up a bit based on where we loaded it (the file is +//! relocatable), and then call code within it. The overall process looks like this: //! //! Loading: //! 1. Map the library into memory @@ -26,69 +26,79 @@ //! Relocating (from a starting point DSO): //! 1. If marked done, return. //! 2. Recurse on all dependencies -//! 3. For each relocation entry, -//! 3a. Fixup the relocation entry according to its contents, possibly looking up a symbol if necessary. +//! 3. For each relocation entry, 3a. Fixup the relocation entry according to its contents, possibly +//! looking up a symbol if necessary. //! 4. Mark as done //! -//! Let's talk about loading first. In step 1, for example, we need to iterate the program headers of the ELF file, -//! looking for PT_LOAD statements. These statements tell us how to setup the virtual memory for this program. Since these -//! DSOs are relocatable, we can load them _at a specific base address_. Each DSO gets loaded to its own base address and -//! is mapped into memory according to the base address and the PT_LOAD entries. In Twizzler, we can leverage the powerful +//! Let's talk about loading first. In step 1, for example, we need to iterate the program headers +//! of the ELF file, looking for PT_LOAD statements. These statements tell us how to setup the +//! virtual memory for this program. Since these DSOs are relocatable, we can load them _at a +//! specific base address_. Each DSO gets loaded to its own base address and is mapped into memory +//! according to the base address and the PT_LOAD entries. In Twizzler, we can leverage the powerful //! copy-from primitive to make this easier. //! -//! In steps 2 and 3 we are noting down information ahead of time. We want to record the loaded libraries for TLS purposes -//! in this order, since we must reserve one exalted DSO to live right next to the thread pointer. On most systems, this is -//! reserved for the executable. For us, it's just the first DSO to be loaded. We also note down if this library has any -//! constructors, that is, code that needs to be run before we can call any other code in the DSO. -//! -//! In step 4, we just add the library into global context. At this point, we have recorded enough info that we can make -//! this library namable and searchable for symbols. Finally, in the last two steps, we recurse on each dependency, and -//! add edges to the graph to note dependencies. I should note that dependencies may have already been loaded (e.g. a -//! library foo depends on bar and baz, and library bar depends on baz, only one copy of baz will be loaded), and thus -//! if we try to load a library that already has been loaded according to some namespace, we can just point the graph -//! to the existing node instead of loading up a fresh copy of the library. This is why the graph may have cycles, by -//! the way. -//! -//! When relocating a DSO, we need to ensure that it is fixed up to run at the base address we loaded it to. As a simple -//! mental model, we can imagine that if we had some static variable, foo, that lives in a DSO. When linking, the linker -//! has no idea where the dynamic linker will end up putting the DSO in memory. So when accessing foo, the compiler emits -//! some _relative_ address for reaching foo, say "0x300 + BASE", where BASE is a 64-bit value in the code. But again, -//! we don't know the base address, so we need to emit an entry in a relocation table that tells the dynamic linker, "hey, -//! when you load this DSO, go to _this spot_ (where BASE is) and change it to the actual base address of the DSO". -//! -//! In practice, of course, its more complex, there are optimizations, there are indirections, etc, but this is basically -//! the idea. In the steps listed above, we perform a post-order depth-first walk over the graph, performing all relocations -//! that the DSO specifies. -//! -//! One key idea that happens in relocations is _symbol lookup_. A relocation can say, "hey, write into me the address of -//! the symbol foo", and the dynamic linker will go look for that symbol's address by name. This is possible because each -//! DSO has a symbol table for symbols that it is advertising as useable for dynamic linking. The dynamic linker thus, when -//! looking up symbols, transitively looks though a DSO's dependencies until it finds the symbol. If it doesn't, it +//! In steps 2 and 3 we are noting down information ahead of time. We want to record the loaded +//! libraries for TLS purposes in this order, since we must reserve one exalted DSO to live right +//! next to the thread pointer. On most systems, this is reserved for the executable. For us, it's +//! just the first DSO to be loaded. We also note down if this library has any constructors, that +//! is, code that needs to be run before we can call any other code in the DSO. +//! +//! In step 4, we just add the library into global context. At this point, we have recorded enough +//! info that we can make this library namable and searchable for symbols. Finally, in the last two +//! steps, we recurse on each dependency, and add edges to the graph to note dependencies. I should +//! note that dependencies may have already been loaded (e.g. a library foo depends on bar and baz, +//! and library bar depends on baz, only one copy of baz will be loaded), and thus if we try to load +//! a library that already has been loaded according to some namespace, we can just point the graph +//! to the existing node instead of loading up a fresh copy of the library. This is why the graph +//! may have cycles, by the way. +//! +//! When relocating a DSO, we need to ensure that it is fixed up to run at the base address we +//! loaded it to. As a simple mental model, we can imagine that if we had some static variable, foo, +//! that lives in a DSO. When linking, the linker has no idea where the dynamic linker will end up +//! putting the DSO in memory. So when accessing foo, the compiler emits some _relative_ address for +//! reaching foo, say "0x300 + BASE", where BASE is a 64-bit value in the code. But again, +//! we don't know the base address, so we need to emit an entry in a relocation table that tells the +//! dynamic linker, "hey, when you load this DSO, go to _this spot_ (where BASE is) and change it to +//! the actual base address of the DSO". +//! +//! In practice, of course, its more complex, there are optimizations, there are indirections, etc, +//! but this is basically the idea. In the steps listed above, we perform a post-order depth-first +//! walk over the graph, performing all relocations that the DSO specifies. +//! +//! One key idea that happens in relocations is _symbol lookup_. A relocation can say, "hey, write +//! into me the address of the symbol foo", and the dynamic linker will go look for that symbol's +//! address by name. This is possible because each DSO has a symbol table for symbols that it is +//! advertising as useable for dynamic linking. The dynamic linker thus, when looking up symbols, +//! transitively looks though a DSO's dependencies until it finds the symbol. If it doesn't, it //! falls back to a global lookup, where it traverses the entire graph looking for the symbol. //! //! # Basic Concepts for this crate //! //! ## Context -//! All of the work of dynlink happens inside a Context, which contains, essentially, a single "invocation" of the dynamic -//! linker. It defines the symbol namespace, the compartments that exist, and manages the library dependency graph. +//! All of the work of dynlink happens inside a Context, which contains, essentially, a single +//! "invocation" of the dynamic linker. It defines the symbol namespace, the compartments that +//! exist, and manages the library dependency graph. //! //! ## Library //! This crate calls DSOs Libraries, because in Twizzler, there is usually little difference. //! //! ## Error Handling -//! This crate reports error with the [error::DynlinkError] type, which implements std::error::Error and miette's Diagnostic. +//! This crate reports error with the [error::DynlinkError] type, which implements std::error::Error +//! and miette's Diagnostic. //! //! ## Compartments -//! We add one major concept to the dynamic linking scene: compartments. A compartment is a collection of DSOs that operate -//! within a single, shared isolation group. Calls inside a compartment operate like normal calls, but cross-compartment -//! calls or accesses may be subject to additional processing and checks. Compartments modify the dependency algorithm a bit: -//! -//! When loading a DSO and enumerating dependencies, we check if a dependency can be satified within the same compartment. If -//! so, dependencies act like normal. If not, we do a _global compartment search_ for that same dependency (subject to restrictions, -//! e.g., permissions). If we don't find it there, we try to load it in either the same compartment as its parent (if allowed) or in -//! a new compartment (only if we must). Thus the dependency graph is still correct, and still allows symbol lookup to work, even -//! if libraries' dependency relationships may cross compartment boundaries. -//! +//! We add one major concept to the dynamic linking scene: compartments. A compartment is a +//! collection of DSOs that operate within a single, shared isolation group. Calls inside a +//! compartment operate like normal calls, but cross-compartment calls or accesses may be subject to +//! additional processing and checks. Compartments modify the dependency algorithm a bit: +//! +//! When loading a DSO and enumerating dependencies, we check if a dependency can be satified within +//! the same compartment. If so, dependencies act like normal. If not, we do a _global compartment +//! search_ for that same dependency (subject to restrictions, e.g., permissions). If we don't find +//! it there, we try to load it in either the same compartment as its parent (if allowed) or in +//! a new compartment (only if we must). Thus the dependency graph is still correct, and still +//! allows symbol lookup to work, even if libraries' dependency relationships may cross compartment +//! boundaries. #![feature(strict_provenance)] #![feature(never_type)] diff --git a/src/runtime/dynlink/src/library.rs b/src/runtime/dynlink/src/library.rs index 0be352af..688eac17 100644 --- a/src/runtime/dynlink/src/library.rs +++ b/src/runtime/dynlink/src/library.rs @@ -9,7 +9,6 @@ use elf::{ symbol::Symbol, ParseError, }; - use petgraph::stable_graph::NodeIndex; use secgate::RawSecGateInfo; use twizzler_runtime_api::AuxEntry; @@ -30,8 +29,8 @@ pub(crate) enum RelocState { /// The core trait that represents loaded or mapped data. pub trait BackingData: Clone { - /// Get a pointer to the start of a region, and a length, denoting valid memory representing this object. The memory - /// region is valid. + /// Get a pointer to the start of a region, and a length, denoting valid memory representing + /// this object. The memory region is valid. fn data(&self) -> (*mut u8, usize); fn load_addr(&self) -> usize; @@ -355,7 +354,8 @@ impl core::fmt::Display for UnloadedLibrary { pub struct CtorInfo { /// Legacy pointer to _init function for a library. Can be called with the C abi. pub legacy_init: usize, - /// Pointer to start of the init array, which contains functions pointers that can be called by the C abi. + /// Pointer to start of the init array, which contains functions pointers that can be called by + /// the C abi. pub init_array: usize, /// Length of the init array. pub init_array_len: usize, diff --git a/src/runtime/dynlink/src/symbol.rs b/src/runtime/dynlink/src/symbol.rs index d5aa3b0e..747a1ab3 100644 --- a/src/runtime/dynlink/src/symbol.rs +++ b/src/runtime/dynlink/src/symbol.rs @@ -2,8 +2,8 @@ use crate::library::{BackingData, Library}; -/// A (relocated) symbol. Contains information about the symbol itself, like value and size, along with a reference to -/// the library that it comes from. +/// A (relocated) symbol. Contains information about the symbol itself, like value and size, along +/// with a reference to the library that it comes from. pub struct RelocatedSymbol<'lib, Backing: BackingData> { sym: elf::symbol::Symbol, pub(crate) lib: &'lib Library, @@ -14,7 +14,8 @@ impl<'lib, Backing: BackingData> RelocatedSymbol<'lib, Backing> { Self { sym, lib } } - /// Returns the relocated address of the symbol, i.e. the value of the symbol added to the base address of the library it comes from. + /// Returns the relocated address of the symbol, i.e. the value of the symbol added to the base + /// address of the library it comes from. pub fn reloc_value(&self) -> u64 { self.sym.st_value + self.lib.base_addr() as u64 } diff --git a/src/runtime/dynlink/src/tls.rs b/src/runtime/dynlink/src/tls.rs index 835b46dc..35ae964c 100644 --- a/src/runtime/dynlink/src/tls.rs +++ b/src/runtime/dynlink/src/tls.rs @@ -1,6 +1,11 @@ -//! Implements ELF TLS Variant II. I highly recommend reading the Fuchsia docs on thread-local storage as prep for this code. +//! Implements ELF TLS Variant II. I highly recommend reading the Fuchsia docs on thread-local +//! storage as prep for this code. -use std::{alloc::Layout, mem::align_of, mem::size_of, ptr::NonNull}; +use std::{ + alloc::Layout, + mem::{align_of, size_of}, + ptr::NonNull, +}; use tracing::{error, trace}; use twizzler_runtime_api::TlsIndex; @@ -160,7 +165,8 @@ impl TlsInfo { // Ensure that the alignment is enough for the control block. let align = std::cmp::max(self.max_align, align_of::>()).next_power_of_two(); // Region needs space for each module, and we just assume they all need the max alignment. - // Add two to the mods length for calculating align padding, one for the dtv, one for the tcb. + // Add two to the mods length for calculating align padding, one for the dtv, one for the + // tcb. let region_size = self.alloc_size_mods + align * (self.tls_mods.len() + 2); let dtv_size = self.dtv_len() * size_of::(); // We also need space for the control block and the dtv. diff --git a/src/runtime/monitor-api/src/lib.rs b/src/runtime/monitor-api/src/lib.rs index 1f490154..6d294357 100644 --- a/src/runtime/monitor-api/src/lib.rs +++ b/src/runtime/monitor-api/src/lib.rs @@ -1,5 +1,6 @@ -//! This crate exists to break a circular dependency between twz-rt and monitor. We use extern symbols so that we -//! can just call into the monitor without having to have it as an explicit dependency. +//! This crate exists to break a circular dependency between twz-rt and monitor. We use extern +//! symbols so that we can just call into the monitor without having to have it as an explicit +//! dependency. #![feature(naked_functions)] #![feature(pointer_byte_offsets)] @@ -23,10 +24,12 @@ mod gates { pub use gates::*; use twizzler_runtime_api::LibraryId; -/// Shared data between the monitor and a compartment runtime. Written to by the monitor, and read-only from the compartment. +/// Shared data between the monitor and a compartment runtime. Written to by the monitor, and +/// read-only from the compartment. #[repr(C)] pub struct SharedCompConfig { - /// The security context that this compartment derives from. Read-only, will not be overwritten. + /// The security context that this compartment derives from. Read-only, will not be + /// overwritten. pub sctx: ObjID, // Pointer to the current TLS template. Read-only by compartment, writable by monitor. tls_template: AtomicPtr, @@ -38,7 +41,8 @@ struct CompConfigFinder { config: *const SharedCompConfig, } -// Safety: the compartment config address is stable over the life of the compartment and doesn't change after init. +// Safety: the compartment config address is stable over the life of the compartment and doesn't +// change after init. unsafe impl Sync for CompConfigFinder {} unsafe impl Send for CompConfigFinder {} @@ -115,7 +119,8 @@ impl TlsTemplateInfo { let dtv_ptr = new.add(self.dtv_offset) as *mut *mut u8; let dtv = core::slice::from_raw_parts_mut(dtv_ptr, self.num_dtv_entries); - // Step 2a: "relocate" the pointers inside the DTV. First entry is the gen count, so skip that. + // Step 2a: "relocate" the pointers inside the DTV. First entry is the gen count, so skip + // that. for entry in dtv.iter_mut().skip(1) { let offset = (*entry).byte_offset_from(self.alloc_base.as_ptr()); *entry = new.byte_offset(offset); diff --git a/src/runtime/monitor/secapi/gates.rs b/src/runtime/monitor/secapi/gates.rs index f939b089..3b2f4798 100644 --- a/src/runtime/monitor/secapi/gates.rs +++ b/src/runtime/monitor/secapi/gates.rs @@ -53,7 +53,8 @@ pub struct LibraryInfo { pub next_id: Option, } -// Safety: the broken part is just DlPhdrInfo. We ensure that any pointers in there are intra-compartment. +// Safety: the broken part is just DlPhdrInfo. We ensure that any pointers in there are +// intra-compartment. unsafe impl Crossing for LibraryInfo {} #[cfg_attr(feature = "secgate-impl", secgate::secure_gate(options(info)))] diff --git a/src/runtime/monitor/src/thread.rs b/src/runtime/monitor/src/thread.rs index b0ebd951..8e83b2b0 100644 --- a/src/runtime/monitor/src/thread.rs +++ b/src/runtime/monitor/src/thread.rs @@ -45,7 +45,8 @@ fn spawn_thread( ); let comp = state.comps.get(&src_ctx).unwrap(); - // Lock before spawn so we guarantee we can fill out the manager entry before the thread can look there. + // Lock before spawn so we guarantee we can fill out the manager entry before the thread can + // look there. let mut mgr = THREAD_MGR.lock().map_err(|_| SpawnError::Other)?; let thid = unsafe { sys_spawn(twizzler_abi::syscall::ThreadSpawnArgs { diff --git a/src/runtime/secgate/src/lib.rs b/src/runtime/secgate/src/lib.rs index 3b0ac567..d3aefae0 100644 --- a/src/runtime/secgate/src/lib.rs +++ b/src/runtime/secgate/src/lib.rs @@ -36,12 +36,13 @@ impl SecGateReturn { } } -/// A struct of information about a secure gate. These are auto-generated by the [crate::secure_gate] macro, and -/// stored in a special ELF section (.twz_secgate_info) as an array. The dynamic linker and monitor can then use -/// this to easily enumerate gates. +/// A struct of information about a secure gate. These are auto-generated by the +/// [crate::secure_gate] macro, and stored in a special ELF section (.twz_secgate_info) as an array. +/// The dynamic linker and monitor can then use this to easily enumerate gates. #[repr(C)] pub struct SecGateInfo { - /// A pointer to the implementation entry function. This must be a pointer, and we statically check that is has the same size as usize (sorry cheri, we'll fix this another time) + /// A pointer to the implementation entry function. This must be a pointer, and we statically + /// check that is has the same size as usize (sorry cheri, we'll fix this another time) pub imp: F, /// The name of this secure gate. This must be a pointer to a null-terminated C string. name: *const i8, @@ -61,9 +62,11 @@ impl SecGateInfo { } } -// Safety: If F is Send, we are too because the name field points to a static C string that cannot be written to. +// Safety: If F is Send, we are too because the name field points to a static C string that cannot +// be written to. unsafe impl Send for SecGateInfo {} -// Safety: If F is Sync, we are too because the name field points to a static C string that cannot be written to. +// Safety: If F is Sync, we are too because the name field points to a static C string that cannot +// be written to. unsafe impl Sync for SecGateInfo {} /// Minimum alignment of secure trampolines. @@ -74,7 +77,8 @@ pub type RawSecGateInfo = SecGateInfo; // Ensure that these are the same size because the dynamic linker uses the raw variant. static_assertions::assert_eq_size!(RawSecGateInfo, SecGateInfo<&fn()>); -/// Arguments that will be passed to the secure call. Concrete versions of this are generated by the macro. +/// Arguments that will be passed to the secure call. Concrete versions of this are generated by the +/// macro. #[derive(Clone, Copy)] #[repr(C)] pub struct Arguments { @@ -98,7 +102,8 @@ impl Arguments { } } -/// Return value to be filled by the secure call. Concrete versions of this are generated by the macro. +/// Return value to be filled by the secure call. Concrete versions of this are generated by the +/// macro. #[derive(Copy)] #[repr(C)] pub struct Return { diff --git a/src/runtime/twz-rt/src/lib.rs b/src/runtime/twz-rt/src/lib.rs index 7eef930e..3768d13e 100644 --- a/src/runtime/twz-rt/src/lib.rs +++ b/src/runtime/twz-rt/src/lib.rs @@ -1,6 +1,6 @@ //! # The Twizzler Reference Runtime -//! The Reference Runtime implements the Runtime trait from twizzler-runtime-abi, and is designed to be the primary, fully supported -//! programming environment on Twizzler. +//! The Reference Runtime implements the Runtime trait from twizzler-runtime-abi, and is designed to +//! be the primary, fully supported programming environment on Twizzler. //! //! This is a work in progress. diff --git a/src/runtime/twz-rt/src/runtime.rs b/src/runtime/twz-rt/src/runtime.rs index fec9775c..2b11d0e1 100644 --- a/src/runtime/twz-rt/src/runtime.rs +++ b/src/runtime/twz-rt/src/runtime.rs @@ -18,6 +18,7 @@ mod time; pub(crate) mod upcall; pub use core::CompartmentInitInfo; + pub use thread::RuntimeThreadControl; pub use upcall::set_upcall_handler; @@ -76,9 +77,8 @@ pub static OUR_RUNTIME: ReferenceRuntime = ReferenceRuntime { pub(crate) mod do_impl { use twizzler_runtime_api::Runtime; - use crate::preinit_println; - use super::ReferenceRuntime; + use crate::preinit_println; impl Runtime for ReferenceRuntime {} @@ -94,9 +94,9 @@ pub(crate) mod do_impl { static USE_MARKER: fn() -> &'static (dyn Runtime + Sync) = __twz_get_runtime; } -// These are exported by libunwind, but not re-exported by the standard library that pulls that in. Or, -// at least, that's what it seems like. In any case, they're no-ops in libunwind and musl, so this is -// fine for now. +// These are exported by libunwind, but not re-exported by the standard library that pulls that in. +// Or, at least, that's what it seems like. In any case, they're no-ops in libunwind and musl, so +// this is fine for now. #[no_mangle] pub fn __register_frame_info() {} #[no_mangle] diff --git a/src/runtime/twz-rt/src/runtime/alloc.rs b/src/runtime/twz-rt/src/runtime/alloc.rs index 4e7cf213..c8b6fa8f 100644 --- a/src/runtime/twz-rt/src/runtime/alloc.rs +++ b/src/runtime/twz-rt/src/runtime/alloc.rs @@ -1,6 +1,7 @@ -//! Primary allocator, for compartment-local allocation. One tricky aspect to this is that we need to support allocation before the -//! runtime is fully ready, so to avoid calling into std, we implement a manual spinlock around the allocator until the better Mutex -//! is available. Once it is, we move the allocator into the mutex, and use that. +//! Primary allocator, for compartment-local allocation. One tricky aspect to this is that we need +//! to support allocation before the runtime is fully ready, so to avoid calling into std, we +//! implement a manual spinlock around the allocator until the better Mutex is available. Once it +//! is, we move the allocator into the mutex, and use that. use core::{ alloc::{GlobalAlloc, Layout}, @@ -8,7 +9,6 @@ use core::{ ptr::NonNull, sync::atomic::{AtomicBool, Ordering}, }; - use std::{ alloc::Allocator, mem::size_of, @@ -26,9 +26,8 @@ use twizzler_abi::{ }; use twizzler_runtime_api::MapFlags; -use crate::{preinit_println, runtime::RuntimeState}; - use super::{ReferenceRuntime, OUR_RUNTIME}; +use crate::{preinit_println, runtime::RuntimeState}; static LOCAL_ALLOCATOR: LocalAllocator = LocalAllocator { runtime: &OUR_RUNTIME, @@ -103,9 +102,11 @@ fn create_and_map() -> Option<(usize, ObjID)> { impl OomHandler for RuntimeOom { fn handle_oom(talc: &mut Talc, _layout: Layout) -> Result<(), ()> { let (slot, id) = create_and_map().ok_or(())?; - // reserve an additional page size at the base of the object for future use. This behavior may change as the runtime is fleshed out. + // reserve an additional page size at the base of the object for future use. This behavior + // may change as the runtime is fleshed out. const HEAP_OFFSET: usize = NULLPAGE_SIZE * 2; - // offset from the endpoint of the object to where the endpoint of the heap is. Reserve a page for the metadata + a few pages for any future FOT entries. + // offset from the endpoint of the object to where the endpoint of the heap is. Reserve a + // page for the metadata + a few pages for any future FOT entries. const TOP_OFFSET: usize = NULLPAGE_SIZE * 4; let base = slot * MAX_SIZE + HEAP_OFFSET; let top = (slot + 1) * MAX_SIZE - TOP_OFFSET; @@ -159,7 +160,8 @@ unsafe impl GlobalAlloc for LocalAllocator { // Runtime is ready, we can use normal locking let mut inner = self.inner.lock().unwrap(); if inner.is_none() { - // First ones in after bootstrap. Lock, and then grab the early_alloc, using it for ourselves. + // First ones in after bootstrap. Lock, and then grab the early_alloc, using it for + // ourselves. while !self.early_lock.swap(true, Ordering::SeqCst) { core::hint::spin_loop() } @@ -194,8 +196,10 @@ unsafe impl GlobalAlloc for LocalAllocator { Layout::from_size_align(layout.size(), core::cmp::max(layout.align(), MIN_ALIGN)) .expect("layout alignment bump failed"); - // The monitor runtime has to deal with some weirdness in that some allocations may have happened during bootstrap. It's possible - // that these could be freed into _this_ allocator, which would be wrong. So just ignore deallocations of bootstrap-allocated memory. + // The monitor runtime has to deal with some weirdness in that some allocations may have + // happened during bootstrap. It's possible that these could be freed into _this_ + // allocator, which would be wrong. So just ignore deallocations of bootstrap-allocated + // memory. let ignore_slot = self.bootstrap_alloc_slot.load(Ordering::SeqCst); if ignore_slot != 0 && Span::new( @@ -211,7 +215,8 @@ unsafe impl GlobalAlloc for LocalAllocator { // Runtime is ready, we can use normal locking let mut inner = self.inner.lock().unwrap(); if inner.is_none() { - // First ones in after bootstrap. Lock, and then grab the early_alloc, using it for ourselves. + // First ones in after bootstrap. Lock, and then grab the early_alloc, using it for + // ourselves. while !self.early_lock.swap(true, Ordering::SeqCst) { core::hint::spin_loop() } diff --git a/src/runtime/twz-rt/src/runtime/core.rs b/src/runtime/twz-rt/src/runtime/core.rs index 38553831..0874530f 100644 --- a/src/runtime/twz-rt/src/runtime/core.rs +++ b/src/runtime/twz-rt/src/runtime/core.rs @@ -5,6 +5,7 @@ use monitor_api::SharedCompConfig; use twizzler_abi::upcall::{UpcallFlags, UpcallInfo, UpcallMode, UpcallOptions, UpcallTarget}; use twizzler_runtime_api::{AuxEntry, BasicAux, CoreRuntime}; +use super::{slot::mark_slot_reserved, thread::TLS_GEN_MGR, ReferenceRuntime}; use crate::{ preinit::{preinit_abort, preinit_unwrap}, preinit_println, @@ -12,8 +13,6 @@ use crate::{ RuntimeThreadControl, }; -use super::{slot::mark_slot_reserved, thread::TLS_GEN_MGR, ReferenceRuntime}; - #[repr(C)] pub struct CompartmentInitInfo { pub ctor_array_start: usize, diff --git a/src/runtime/twz-rt/src/runtime/debug.rs b/src/runtime/twz-rt/src/runtime/debug.rs index 840a69ef..18623a75 100644 --- a/src/runtime/twz-rt/src/runtime/debug.rs +++ b/src/runtime/twz-rt/src/runtime/debug.rs @@ -2,9 +2,8 @@ use elf::segment::Elf64_Phdr; use monitor_api::get_comp_config; use twizzler_runtime_api::{AddrRange, DebugRuntime, Library, MapFlags}; -use crate::preinit_println; - use super::{object::new_object_handle, ReferenceRuntime}; +use crate::preinit_println; impl DebugRuntime for ReferenceRuntime { fn get_library( diff --git a/src/runtime/twz-rt/src/runtime/object.rs b/src/runtime/twz-rt/src/runtime/object.rs index 7e6ed9b7..d04ab229 100644 --- a/src/runtime/twz-rt/src/runtime/object.rs +++ b/src/runtime/twz-rt/src/runtime/object.rs @@ -38,7 +38,8 @@ pub(crate) fn new_object_handle( } fn map_sys_err(sys_err: ObjectMapError) -> twizzler_runtime_api::MapError { - // TODO (dbittman): in a future PR, I plan to cleanup all the error handling between the API and ABI crates. + // TODO (dbittman): in a future PR, I plan to cleanup all the error handling between the API and + // ABI crates. match sys_err { ObjectMapError::Unknown => MapError::Other, ObjectMapError::ObjectNotFound => MapError::NoSuchObject, diff --git a/src/runtime/twz-rt/src/runtime/slot.rs b/src/runtime/twz-rt/src/runtime/slot.rs index bb80c7b7..355a1460 100644 --- a/src/runtime/twz-rt/src/runtime/slot.rs +++ b/src/runtime/twz-rt/src/runtime/slot.rs @@ -1,20 +1,24 @@ -//! Slot allocator. This proceeds in two phases. During the initialization phase, before the runtime is -//! marked ready, we use early slot allocation. After the runtime is ready, we use normal slot allocation. -//! Right before switching, the runtime must call in and initialize the proper slot allocator. +//! Slot allocator. This proceeds in two phases. During the initialization phase, before the runtime +//! is marked ready, we use early slot allocation. After the runtime is ready, we use normal slot +//! allocation. Right before switching, the runtime must call in and initialize the proper slot +//! allocator. //! -//! Slots are organized into pairs, (0,1), (2,3), (4,5), ..., (n-2,n-1). This is because the dynamic linker -//! needs to be able to load an ELF into adjacent objects in virtual memory, and is not fundamental to Twizzler. -//! To allocate single slots, we allocate a pair and split it, recording one of the slots as available for single -//! allocation, and returning the other. When a single slot is released, it also gets marked as available for -//! single allocation. However, eventually we'll need to consolidate the single slots back into pairs, or we will -//! run out. When the number of single slots allocated from pairs grows past a high watermark, we do a GC run on -//! the slot list, which sorts the list and then finds and removes pairs, freeing those pairs back up for future allocation. +//! Slots are organized into pairs, (0,1), (2,3), (4,5), ..., (n-2,n-1). This is because the dynamic +//! linker needs to be able to load an ELF into adjacent objects in virtual memory, and is not +//! fundamental to Twizzler. To allocate single slots, we allocate a pair and split it, recording +//! one of the slots as available for single allocation, and returning the other. When a single slot +//! is released, it also gets marked as available for single allocation. However, eventually we'll +//! need to consolidate the single slots back into pairs, or we will run out. When the number of +//! single slots allocated from pairs grows past a high watermark, we do a GC run on the slot list, +//! which sorts the list and then finds and removes pairs, freeing those pairs back up for future +//! allocation. //! -//! One thing that makes this tricky is that we cannot allocate memory within the slot allocator, as we hold a lock on it, -//! and the allocator might call us if it needs another object for allocating memory. Thus we must be careful during operation -//! to not allocate memory. We manage this by being a bit wasteful: the slot allocator reserves two vectors ahead of time, -//! each of capacity SLOTS (which is the max number of slots we can have). The first is a stack of single allocated slots, and -//! the second is used during the GC pass described above. +//! One thing that makes this tricky is that we cannot allocate memory within the slot allocator, as +//! we hold a lock on it, and the allocator might call us if it needs another object for allocating +//! memory. Thus we must be careful during operation to not allocate memory. We manage this by being +//! a bit wasteful: the slot allocator reserves two vectors ahead of time, each of capacity SLOTS +//! (which is the max number of slots we can have). The first is a stack of single allocated slots, +//! and the second is used during the GC pass described above. use std::sync::{ atomic::{AtomicUsize, Ordering}, @@ -24,9 +28,8 @@ use std::sync::{ use tracing::trace; use twizzler_abi::arch::SLOTS; -use crate::{preinit::preinit_abort, preinit_println}; - use super::{ReferenceRuntime, RuntimeState}; +use crate::{preinit::preinit_abort, preinit_println}; fn early_slot_alloc() -> Option { Some(EARLY_SLOT_ALLOC.next.fetch_add(1, Ordering::SeqCst)) @@ -120,15 +123,18 @@ impl SlotAllocatorInner { "slot allocator: GC single slots (len = {})", self.singles.len() ); - // Step 1: setup the aux vector and sort the singles. Use unstable sort because it doesn't allocate memory. + // Step 1: setup the aux vector and sort the singles. Use unstable sort because it doesn't + // allocate memory. self.singles_aux.truncate(0); self.singles.sort_unstable(); - // Step 2: collect a list of valid pairs by iterating over all windows of size 2 and checking if a window contains a pair. - // Note that this is exactly correct and not an overcount because we know that each slot in here is unique, so imagine: - // [2,3,4,5]. This will produce pairs (2,3) and (4,5), even though it considers and sees (3,4) as a pair of consecutive - // indices. But (3,4) is not a valid pair because it does not start with an even number, and all valid pairs do. - // [1,2,3,4] => (2,3), [2,4,5] => (4,5) + // Step 2: collect a list of valid pairs by iterating over all windows of size 2 and + // checking if a window contains a pair. Note that this is exactly correct and not + // an overcount because we know that each slot in here is unique, so imagine: + // [2,3,4,5]. This will produce pairs (2,3) and (4,5), even though it considers and sees + // (3,4) as a pair of consecutive indices. But (3,4) is not a valid pair because it + // does not start with an even number, and all valid pairs do. [1,2,3,4] => (2,3), + // [2,4,5] => (4,5) let pair_firsts = self.singles.array_windows::<2>().filter_map(|maybe_pair| { if (maybe_pair[0] % 2 == 0) && maybe_pair[1] == maybe_pair[0] + 1 { Some(maybe_pair[0]) diff --git a/src/runtime/twz-rt/src/runtime/stdio.rs b/src/runtime/twz-rt/src/runtime/stdio.rs index af87251a..fa2cd3c0 100644 --- a/src/runtime/twz-rt/src/runtime/stdio.rs +++ b/src/runtime/twz-rt/src/runtime/stdio.rs @@ -1,6 +1,7 @@ -//! Implements stdio streams. For each one (stdin, stdout, and stderr), we have two possible sinks: a thread-local, and a "local" -//! destination (here, local to this particular linking to this runtime). We try the thread-local first, if available, and if not, try -//! the runtime local option. If that isn't present, we fallback to the Fallback writer and reader. +//! Implements stdio streams. For each one (stdin, stdout, and stderr), we have two possible sinks: +//! a thread-local, and a "local" destination (here, local to this particular linking to this +//! runtime). We try the thread-local first, if available, and if not, try the runtime local option. +//! If that isn't present, we fallback to the Fallback writer and reader. use std::{ panic::RefUnwindSafe, @@ -66,7 +67,8 @@ impl ReferenceRuntime { impl RustStdioRuntime for ReferenceRuntime { fn with_panic_output(&self, cb: twizzler_runtime_api::IoWritePanicDynCallback<'_, ()>) { // For panic output, try to never wait on any locks. Also, catch unwinds and treat - // the output option as None if the callback panics, to try to ensure the output goes somewhere. + // the output option as None if the callback panics, to try to ensure the output goes + // somewhere. // Unwrap-Ok: we ensure that no one can panic when holding the read lock. if let Ok(ref out) = THREAD_STDERR.try_read() { diff --git a/src/runtime/twz-rt/src/runtime/thread.rs b/src/runtime/twz-rt/src/runtime/thread.rs index 2d974e7e..20981eb4 100644 --- a/src/runtime/twz-rt/src/runtime/thread.rs +++ b/src/runtime/twz-rt/src/runtime/thread.rs @@ -8,11 +8,9 @@ use twizzler_abi::syscall::{ }; use twizzler_runtime_api::{CoreRuntime, JoinError, SpawnError, ThreadRuntime, TlsIndex}; -use crate::{preinit_println, runtime::thread::mgr::ThreadManager}; - use self::tcb::with_current_thread; - use super::ReferenceRuntime; +use crate::{preinit_println, runtime::thread::mgr::ThreadManager}; mod internal; mod mgr; diff --git a/src/runtime/twz-rt/src/runtime/thread/internal.rs b/src/runtime/twz-rt/src/runtime/thread/internal.rs index f4317ae7..26125b11 100644 --- a/src/runtime/twz-rt/src/runtime/thread/internal.rs +++ b/src/runtime/twz-rt/src/runtime/thread/internal.rs @@ -11,9 +11,8 @@ use tracing::trace; use twizzler_abi::{object::NULLPAGE_SIZE, thread::ThreadRepr}; use twizzler_runtime_api::{CoreRuntime, ObjectHandle, ThreadSpawnArgs}; -use crate::runtime::{thread::MIN_STACK_ALIGN, OUR_RUNTIME}; - use super::RuntimeThreadControl; +use crate::runtime::{thread::MIN_STACK_ALIGN, OUR_RUNTIME}; /// Internal representation of a thread, tracking the resources /// allocated for this thread. diff --git a/src/runtime/twz-rt/src/runtime/thread/mgr.rs b/src/runtime/twz-rt/src/runtime/thread/mgr.rs index 2c92ec39..5b97a8c6 100644 --- a/src/runtime/twz-rt/src/runtime/thread/mgr.rs +++ b/src/runtime/twz-rt/src/runtime/thread/mgr.rs @@ -11,6 +11,7 @@ use twizzler_runtime_api::{ CoreRuntime, JoinError, MapFlags, ObjectRuntime, SpawnError, ThreadSpawnArgs, }; +use super::internal::InternalThread; use crate::runtime::{ thread::{ tcb::{trampoline, RuntimeThreadControl, TLS_GEN_MGR}, @@ -19,8 +20,6 @@ use crate::runtime::{ ReferenceRuntime, OUR_RUNTIME, }; -use super::internal::InternalThread; - pub(super) struct ThreadManager { inner: Mutex, } @@ -139,8 +138,8 @@ impl ReferenceRuntime { .alloc_zeroed(Layout::from_size_align(args.stack_size, MIN_STACK_ALIGN).unwrap()) } as usize; - // Take the thread management lock, so that when the new thread starts we cannot observe that thread - // running without the management data being recorded. + // Take the thread management lock, so that when the new thread starts we cannot observe + // that thread running without the management data being recorded. let mut inner = THREAD_MGR.inner.lock().unwrap(); let id = inner.next_id(); diff --git a/src/runtime/twz-rt/src/runtime/thread/tcb.rs b/src/runtime/twz-rt/src/runtime/thread/tcb.rs index b4b85ca8..5f143a2e 100644 --- a/src/runtime/twz-rt/src/runtime/thread/tcb.rs +++ b/src/runtime/twz-rt/src/runtime/thread/tcb.rs @@ -174,5 +174,6 @@ impl TlsGenMgr { } } - // TODO: when threads exit or move on to a different TLS gen, track that in thread_count, and if it hits zero, notify the monitor. + // TODO: when threads exit or move on to a different TLS gen, track that in thread_count, and if + // it hits zero, notify the monitor. } diff --git a/tools/image_builder/src/main.rs b/tools/image_builder/src/main.rs index f5a1848b..643335dd 100755 --- a/tools/image_builder/src/main.rs +++ b/tools/image_builder/src/main.rs @@ -1,29 +1,30 @@ -use anyhow::{bail, Context}; use std::{ convert::TryFrom, fs::{self, File}, io::{self, Seek, Write}, path::{Path, PathBuf}, }; + +use anyhow::{bail, Context}; use clap::Parser; #[derive(Parser, Debug)] struct Args { - /// Path where disk image should be created - #[clap(short, long)] - disk_path: String, - /// Path to kernel binary - #[clap(short, long)] - kernel_path: String, - /// Path to initial ram disk - #[clap(short, long)] - initrd_path: String, - /// Command line string to be passed to kernel - #[clap(short, long)] - cmdline: Vec, - /// EFI application binary used by bootloader - #[clap(short, long)] - efi_binary: String, + /// Path where disk image should be created + #[clap(short, long)] + disk_path: String, + /// Path to kernel binary + #[clap(short, long)] + kernel_path: String, + /// Path to initial ram disk + #[clap(short, long)] + initrd_path: String, + /// Command line string to be passed to kernel + #[clap(short, long)] + cmdline: Vec, + /// EFI application binary used by bootloader + #[clap(short, long)] + efi_binary: String, } fn main() { @@ -37,7 +38,13 @@ fn main() { let path = PathBuf::from(args.initrd_path); path.canonicalize().unwrap() }; - create_disk_images(&disk_image_path, &kernel_binary_path, &initrd_path, args.cmdline.join(" "), args.efi_binary); + create_disk_images( + &disk_image_path, + &kernel_binary_path, + &initrd_path, + args.cmdline.join(" "), + args.efi_binary, + ); } pub fn create_disk_images( @@ -49,9 +56,13 @@ pub fn create_disk_images( ) -> PathBuf { //let kernel_manifest_path = locate_cargo_manifest::locate_manifest().unwrap(); //let kernel_binary_name = kernel_binary_path.file_name().unwrap().to_str().unwrap(); - if let Err(e) = - create_uefi_disk_image(disk_image_path, kernel_binary_path, initrd_path, cmdline, efi_binary) - { + if let Err(e) = create_uefi_disk_image( + disk_image_path, + kernel_binary_path, + initrd_path, + cmdline, + efi_binary, + ) { panic!("failed to create disk image: {:?}", e); } if !disk_image_path.exists() { diff --git a/tools/initrd_gen/src/main.rs b/tools/initrd_gen/src/main.rs index 24fd0796..6bde2929 100644 --- a/tools/initrd_gen/src/main.rs +++ b/tools/initrd_gen/src/main.rs @@ -1,6 +1,6 @@ use std::{fs::File, path::Path}; -use clap::{Command, Arg}; +use clap::{Arg, Command}; use tar::Builder; fn main() { @@ -22,7 +22,10 @@ fn main() { .num_args(1..), ); let matches = app.get_matches(); - let initrd_output = matches.get_one::("output").map(|s| s.as_str()).unwrap(); + let initrd_output = matches + .get_one::("output") + .map(|s| s.as_str()) + .unwrap(); let files = matches.get_many::("files"); let outfile = File::create(initrd_output).unwrap(); let mut archive = Builder::new(outfile); diff --git a/tools/xtask/src/toolchain.rs b/tools/xtask/src/toolchain.rs index abe9693b..23b3705f 100644 --- a/tools/xtask/src/toolchain.rs +++ b/tools/xtask/src/toolchain.rs @@ -270,7 +270,8 @@ pub fn set_cc() { let inc_path = Path::new("toolchain/src/bootstrap-include") .canonicalize() .unwrap(); - // We don't yet support stack protector. Also, don't pull in standard lib includes, as those may go to the system includes. + // We don't yet support stack protector. Also, don't pull in standard lib includes, as those may + // go to the system includes. let cflags = format!("-fno-stack-protector -nostdlibinc -I{}", inc_path.display()); std::env::set_var("CFLAGS", cflags); }