From 4202056631731cf3cf510a272805f714305a5e54 Mon Sep 17 00:00:00 2001 From: Yuekai Jia Date: Thu, 27 Jun 2024 00:18:56 +0800 Subject: [PATCH] monolithic: introduce crate memory_set to implement mmap and munmap --- Cargo.lock | 8 ++ Cargo.toml | 1 + crates/memory_set/Cargo.toml | 15 +++ crates/memory_set/src/area.rs | 151 ++++++++++++++++++++++++++ crates/memory_set/src/lib.rs | 28 +++++ crates/memory_set/src/set.rs | 185 ++++++++++++++++++++++++++++++++ crates/memory_set/src/tests.rs | 184 +++++++++++++++++++++++++++++++ modules/axmm/Cargo.toml | 1 + modules/axmm/src/aspace.rs | 80 ++++++++------ modules/axmm/src/backend.rs | 25 +++++ modules/axmm/src/lib.rs | 18 ++-- variants/monolithic/src/main.rs | 3 +- 12 files changed, 656 insertions(+), 43 deletions(-) create mode 100644 crates/memory_set/Cargo.toml create mode 100644 crates/memory_set/src/area.rs create mode 100644 crates/memory_set/src/lib.rs create mode 100644 crates/memory_set/src/set.rs create mode 100644 crates/memory_set/src/tests.rs create mode 100644 modules/axmm/src/backend.rs diff --git a/Cargo.lock b/Cargo.lock index e052dd27d4..528175cea4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -453,6 +453,7 @@ dependencies = [ "lazy_init", "log", "memory_addr", + "memory_set", ] [[package]] @@ -1227,6 +1228,13 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" name = "memory_addr" version = "0.2.0" +[[package]] +name = "memory_set" +version = "0.1.0" +dependencies = [ + "memory_addr", +] + [[package]] name = "micromath" version = "2.0.0" diff --git a/Cargo.toml b/Cargo.toml index 66bca37b05..86fef51113 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,7 @@ members = [ "crates/lazy_init", "crates/linked_list", "crates/memory_addr", + "crates/memory_set", "crates/page_table", "crates/page_table_entry", "crates/percpu", diff --git a/crates/memory_set/Cargo.toml b/crates/memory_set/Cargo.toml new file mode 100644 index 0000000000..5f8323e307 --- /dev/null +++ b/crates/memory_set/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "memory_set" +version = "0.1.0" +edition = "2021" +authors = ["Yuekai Jia "] +description = "Data structures and operations for managing memory mappings" +license = "GPL-3.0-or-later OR Apache-2.0" +homepage = "https://github.com/rcore-os/arceos" +repository = "https://github.com/rcore-os/arceos/tree/main/crates/memory_set" +documentation = "https://rcore-os.github.io/arceos/memory_set/index.html" +keywords = ["arceos", "virtual-memory", "memory-area", "mmap"] +categories = ["os", "memory-management", "no-std"] + +[dependencies] +memory_addr = { path = "../memory_addr" } diff --git a/crates/memory_set/src/area.rs b/crates/memory_set/src/area.rs new file mode 100644 index 0000000000..607a1b6789 --- /dev/null +++ b/crates/memory_set/src/area.rs @@ -0,0 +1,151 @@ +use core::fmt; +use core::marker::PhantomData; + +use memory_addr::{VirtAddr, VirtAddrRange}; + +use crate::{MappingError, MappingResult}; + +/// Underlying memory mapping operations specific to a memory area. +/// +/// The backend can be different for different memory areas. e.g., for fixed +/// mappings, the target physical address is known when it is added to the page +/// table. For lazy mappings, an empty mapping needs to be added to the page table +/// to trigger a page fault. +pub trait MappingBackend: Clone { + /// Maps a memory region with the given flags. + fn map(&self, start: VirtAddr, size: usize, flags: F, page_table: &mut P) -> bool; + /// Unmaps a memory region. + fn unmap(&self, start: VirtAddr, size: usize, page_table: &mut P) -> bool; +} + +/// A memory area represents a continuous range of virtual memory with the same +/// flags. +/// +/// The target physical memory frames are determined by [`MappingBackend`] and +/// may not be contiguous. +pub struct MemoryArea> { + va_range: VirtAddrRange, + flags: F, + backend: B, + _phantom: PhantomData<(F, P)>, +} + +impl> MemoryArea { + /// Creates a new memory area. + pub const fn new(start: VirtAddr, size: usize, flags: F, backend: B) -> Self { + Self { + va_range: VirtAddrRange::from_start_size(start, size), + flags, + backend, + _phantom: PhantomData, + } + } + + /// Returns the virtual address range. + pub const fn va_range(&self) -> VirtAddrRange { + self.va_range + } + + /// Returns the memory flags, e.g., the permission bits. + pub const fn flags(&self) -> F { + self.flags + } + + /// Returns the start address of the memory area. + pub const fn start(&self) -> VirtAddr { + self.va_range.start + } + + /// Returns the end address of the memory area. + pub const fn end(&self) -> VirtAddr { + self.va_range.end + } + + /// Returns the size of the memory area. + pub const fn size(&self) -> usize { + self.va_range.size() + } +} + +impl> MemoryArea { + /// Maps the whole memory area in the page table. + pub(crate) fn map_area(&self, page_table: &mut P) -> MappingResult { + self.backend + .map(self.start(), self.size(), self.flags, page_table) + .then_some(()) + .ok_or(MappingError::BadState) + } + + /// Unmaps the whole memory area in the page table. + pub(crate) fn unmap_area(&self, page_table: &mut P) -> MappingResult { + self.backend + .unmap(self.start(), self.size(), page_table) + .then_some(()) + .ok_or(MappingError::BadState) + } + + /// Shrinks the memory area at the left side. + /// + /// The start address of the memory area is increased by `new_size`. The + /// shrunk part is unmapped. + pub(crate) fn shrink_left(&mut self, new_size: usize, page_table: &mut P) -> MappingResult { + let unmap_size = self.size() - new_size; + if !self.backend.unmap(self.start(), unmap_size, page_table) { + return Err(MappingError::BadState); + } + self.va_range.start += unmap_size; + Ok(()) + } + + /// Shrinks the memory area at the right side. + /// + /// The end address of the memory area is decreased by `new_size`. The + /// shrunk part is unmapped. + pub(crate) fn shrink_right(&mut self, new_size: usize, page_table: &mut P) -> MappingResult { + let unmap_size = self.size() - new_size; + if !self + .backend + .unmap(self.start() + new_size, unmap_size, page_table) + { + return Err(MappingError::BadState); + } + self.va_range.end -= unmap_size; + Ok(()) + } + + /// Splits the memory area at the given position. + /// + /// The original memory area is shrunk to the left part, and the right part + /// is returned. + /// + /// Returns `None` if the given position is not in the memory area, or one + /// of the parts is empty after splitting. + pub(crate) fn split(&mut self, pos: VirtAddr) -> Option { + let start = self.start(); + let end = self.end(); + if start < pos && pos < end { + let new_area = Self::new( + pos, + end.as_usize() - pos.as_usize(), + self.flags, + self.backend.clone(), + ); + self.va_range.end = pos; + Some(new_area) + } else { + None + } + } +} + +impl> fmt::Debug for MemoryArea +where + F: fmt::Debug + Copy, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("MemoryArea") + .field("va_range", &self.va_range) + .field("flags", &self.flags) + .finish() + } +} diff --git a/crates/memory_set/src/lib.rs b/crates/memory_set/src/lib.rs new file mode 100644 index 0000000000..166c3f374e --- /dev/null +++ b/crates/memory_set/src/lib.rs @@ -0,0 +1,28 @@ +//! Data structures and operations for managing memory mappings. + +#![cfg_attr(not(test), no_std)] + +extern crate alloc; + +mod area; +mod set; + +#[cfg(test)] +mod tests; + +pub use self::area::{MappingBackend, MemoryArea}; +pub use self::set::MemorySet; + +/// Error type for memory mapping operations. +#[derive(Debug, Eq, PartialEq)] +pub enum MappingError { + /// Invalid parameter (e.g., `addr`, `size`, `flags`, etc.) + InvalidParam, + /// The given range clashes with an existing. + AlreadyExists, + /// The backend page table is in a bad state. + BadState, +} + +/// A [`Result`] type with [`MappingError`] as the error type. +pub type MappingResult = Result; diff --git a/crates/memory_set/src/set.rs b/crates/memory_set/src/set.rs new file mode 100644 index 0000000000..1da2e81df8 --- /dev/null +++ b/crates/memory_set/src/set.rs @@ -0,0 +1,185 @@ +use alloc::collections::BTreeMap; +use core::fmt; + +use memory_addr::{VirtAddr, VirtAddrRange}; + +use crate::{MappingBackend, MappingError, MappingResult, MemoryArea}; + +/// A container that maintains memory mappings ([`MemoryArea`]). +pub struct MemorySet> { + areas: BTreeMap>, +} + +impl> MemorySet { + /// Creates a new memory set. + pub const fn new() -> Self { + Self { + areas: BTreeMap::new(), + } + } + + /// Returns the number of memory areas in the memory set. + pub fn len(&self) -> usize { + self.areas.len() + } + + /// Returns `true` if the memory set contains no memory areas. + pub fn is_empty(&self) -> bool { + self.areas.is_empty() + } + + /// Returns the iterator over all memory areas. + pub fn iter(&self) -> impl Iterator> { + self.areas.values() + } + + /// Returns whether the given address range overlaps with any existing area. + pub fn overlaps(&self, range: VirtAddrRange) -> bool { + if let Some((_, before)) = self.areas.range(..range.start).last() { + if before.va_range().overlaps(range) { + return true; + } + } + if let Some((_, after)) = self.areas.range(range.start..).next() { + if after.va_range().overlaps(range) { + return true; + } + } + false + } + + /// Finds the memory area that contains the given address. + pub fn find(&self, addr: VirtAddr) -> Option<&MemoryArea> { + let candidate = self.areas.range(..=addr).last().map(|(_, a)| a); + candidate.filter(|a| a.va_range().contains(addr)) + } + + /// Finds a free area that can accommodate the given size. + /// + /// The search starts from the given `hint` address, and the area should be + /// within the given `limit` range. + /// + /// Returns the start address of the free area. Returns `None` if no such + /// area is found. + pub fn find_free_area( + &self, + hint: VirtAddr, + size: usize, + limit: VirtAddrRange, + ) -> Option { + // brute force: try each area's end address as the start. + let mut last_end = hint.max(limit.start); + for (addr, area) in self.areas.iter() { + if last_end + size <= *addr { + return Some(last_end); + } + last_end = area.end(); + } + if last_end + size <= limit.end { + Some(last_end) + } else { + None + } + } + + /// Add a new memory mapping. + /// + /// The mapping is represented by a [`MemoryArea`]. + /// + /// If the new area overlaps with any existing area, the behavior is + /// determined by the `unmap_overlap` parameter. If it is `true`, the + /// overlapped regions will be unmapped first. Otherwise, it returns an + /// error. + pub fn map( + &mut self, + area: MemoryArea, + page_table: &mut P, + unmap_overlap: bool, + ) -> MappingResult { + if area.va_range().is_empty() { + return Err(MappingError::InvalidParam); + } + + if self.overlaps(area.va_range()) { + if unmap_overlap { + self.unmap(area.start(), area.size(), page_table)?; + } else { + return Err(MappingError::AlreadyExists); + } + } + + area.map_area(page_table)?; + assert!(self.areas.insert(area.start(), area).is_none()); + Ok(()) + } + + /// Remove memory mappings within the given address range. + /// + /// All memory areas that are fully contained in the range will be removed + /// directly. If the area intersects with the boundary, it will be shrinked. + /// If the unmapped range is in the middle of an existing area, it will be + /// split into two areas. + pub fn unmap(&mut self, start: VirtAddr, size: usize, page_table: &mut P) -> MappingResult { + let range = VirtAddrRange::from_start_size(start, size); + let end = range.end; + if range.is_empty() { + return Ok(()); + } + + // Unmap entire areas that are contained by the range. + self.areas.retain(|_, area| { + if area.va_range().contained_in(range) { + area.unmap_area(page_table).unwrap(); + false + } else { + true + } + }); + + // Shrink right if the area intersects with the left boundary. + if let Some((before_start, before)) = self.areas.range_mut(..start).last() { + let before_end = before.end(); + if before_end > start { + if before_end <= end { + // the unmapped area is at the end of `before`. + before.shrink_right(start.as_usize() - before_start.as_usize(), page_table)?; + } else { + // the unmapped area is in the middle `before`, need to split. + let right_part = before.split(end).unwrap(); + before.shrink_right(start.as_usize() - before_start.as_usize(), page_table)?; + assert_eq!(right_part.start(), end); + self.areas.insert(end, right_part); + } + } + } + + // Shrink left if the area intersects with the right boundary. + if let Some((&after_start, after)) = self.areas.range_mut(start..).next() { + let after_end = after.end(); + if after_start < end { + // the unmapped area is at the start of `after`. + let mut new_area = self.areas.remove(&after_start).unwrap(); + new_area.shrink_left(after_end.as_usize() - end.as_usize(), page_table)?; + assert_eq!(new_area.start(), end); + self.areas.insert(end, new_area); + } + } + + Ok(()) + } + + /// Remove all memory areas and the underlying mappings. + pub fn clear(&mut self, page_table: &mut P) -> MappingResult { + for (_, area) in self.areas.iter() { + area.unmap_area(page_table)?; + } + self.areas.clear(); + Ok(()) + } +} + +impl> fmt::Debug for MemorySet { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_list().entries(self.areas.values()).finish() + } +} diff --git a/crates/memory_set/src/tests.rs b/crates/memory_set/src/tests.rs new file mode 100644 index 0000000000..0a815fdeb5 --- /dev/null +++ b/crates/memory_set/src/tests.rs @@ -0,0 +1,184 @@ +use memory_addr::VirtAddr; + +use crate::{MappingBackend, MappingError, MemoryArea, MemorySet}; + +const MAX_ADDR: usize = 0x10000; + +type DummyFlags = u8; +type DummyPageTable = [u8; MAX_ADDR]; + +#[derive(Clone)] +struct DummyBackend; + +type DummyMemorySet = MemorySet; + +impl MappingBackend for DummyBackend { + fn map( + &self, + start: VirtAddr, + size: usize, + flags: DummyFlags, + pt: &mut DummyPageTable, + ) -> bool { + for entry in pt.iter_mut().take(size).skip(start.as_usize()) { + if *entry != 0 { + return false; + } + *entry = flags; + } + true + } + + fn unmap(&self, start: VirtAddr, size: usize, pt: &mut DummyPageTable) -> bool { + for entry in pt.iter_mut().take(size).skip(start.as_usize()) { + if *entry == 0 { + return false; + } + *entry = 0; + } + true + } +} + +macro_rules! assert_ok { + ($expr: expr) => { + assert!(($expr).is_ok()) + }; +} + +macro_rules! assert_err { + ($expr: expr) => { + assert!(($expr).is_err()) + }; + ($expr: expr, $err: ident) => { + assert_eq!(($expr).err(), Some(MappingError::$err)) + }; +} + +fn dump_memory_set(set: &DummyMemorySet) { + use std::sync::Mutex; + static DUMP_LOCK: Mutex<()> = Mutex::new(()); + + let _lock = DUMP_LOCK.lock().unwrap(); + println!("Number of areas: {}", set.len()); + for area in set.iter() { + println!("{:?}", area); + } +} + +#[test] +fn test_map_unmap() { + let mut set = DummyMemorySet::new(); + let mut pt = [0; MAX_ADDR]; + + // Map [0, 0x1000), [0x2000, 0x3000), [0x4000, 0x5000), ... + for start in (0..MAX_ADDR).step_by(0x2000) { + assert_ok!(set.map( + MemoryArea::new(start.into(), 0x1000, 1, DummyBackend), + &mut pt, + false, + )); + } + // Map [0x1000, 0x2000), [0x3000, 0x4000), [0x5000, 0x6000), ... + for start in (0x1000..MAX_ADDR).step_by(0x2000) { + assert_ok!(set.map( + MemoryArea::new(start.into(), 0x1000, 2, DummyBackend), + &mut pt, + false, + )); + } + dump_memory_set(&set); + assert_eq!(set.len(), 16); + + // Found [0x4000, 0x5000), flags = 1. + let area = set.find(0x4100.into()).unwrap(); + assert_eq!(area.start(), 0x4000.into()); + assert_eq!(area.end(), 0x5000.into()); + assert_eq!(area.flags(), 1); + + // The area [0x4000, 0x8000) is already mapped, map returns an error. + assert_err!( + set.map( + MemoryArea::new(0x4000.into(), 0x4000, 3, DummyBackend), + &mut pt, + false + ), + AlreadyExists + ); + // Unmap overlapped areas before adding the new mapping [0x4000, 0x8000). + assert_ok!(set.map( + MemoryArea::new(0x4000.into(), 0x4000, 3, DummyBackend), + &mut pt, + true + )); + dump_memory_set(&set); + assert_eq!(set.len(), 13); + + // Found [0x4000, 0x8000), flags = 3. + let area = set.find(0x4100.into()).unwrap(); + assert_eq!(area.start(), 0x4000.into()); + assert_eq!(area.end(), 0x8000.into()); + assert_eq!(area.flags(), 3); + + // Unmap areas in the middle. + assert_ok!(set.unmap(0x4000.into(), 0x8000, &mut pt)); + assert_eq!(set.len(), 8); + // Unmap the remaining areas, including the unmapped ranges. + assert_ok!(set.unmap(0.into(), MAX_ADDR * 2, &mut pt)); + assert_eq!(set.len(), 0); +} + +#[test] +fn test_unmap_split() { + let mut set = DummyMemorySet::new(); + let mut pt = [0; MAX_ADDR]; + + // Map [0, 0x1000), [0x2000, 0x3000), [0x4000, 0x5000), ... + for start in (0..MAX_ADDR).step_by(0x2000) { + assert_ok!(set.map( + MemoryArea::new(start.into(), 0x1000, 1, DummyBackend), + &mut pt, + false, + )); + } + assert_eq!(set.len(), 8); + + // Unmap [0xc00, 0x2400), [0x2c00, 0x4400), [0x4c00, 0x6400), ... + // The areas are shrinked at the left and right boundaries. + for start in (0..MAX_ADDR).step_by(0x2000) { + assert_ok!(set.unmap((start + 0xc00).into(), 0x1800, &mut pt)); + } + dump_memory_set(&set); + assert_eq!(set.len(), 8); + + for area in set.iter() { + if area.start().as_usize() == 0 { + assert_eq!(area.size(), 0xc00); + } else { + assert_eq!(area.start().align_offset_4k(), 0x400); + assert_eq!(area.end().align_offset_4k(), 0xc00); + assert_eq!(area.size(), 0x800); + } + } + + // Unmap [0x800, 0x900), [0x2800, 0x4400), [0x4800, 0x4900), ... + // The areas are split into two areas. + for start in (0..MAX_ADDR).step_by(0x2000) { + assert_ok!(set.unmap((start + 0x800).into(), 0x100, &mut pt)); + } + dump_memory_set(&set); + assert_eq!(set.len(), 16); + + for area in set.iter() { + let off = area.start().align_offset_4k(); + if off == 0 { + assert_eq!(area.size(), 0x800); + } else if off == 0x400 { + assert_eq!(area.size(), 0x400); + } else if off == 0x900 { + assert_eq!(area.size(), 0x300); + } else { + unreachable!(); + } + } +} diff --git a/modules/axmm/Cargo.toml b/modules/axmm/Cargo.toml index 5ef4a49d7a..a28de46268 100644 --- a/modules/axmm/Cargo.toml +++ b/modules/axmm/Cargo.toml @@ -17,3 +17,4 @@ log = "0.4" axerrno = { path = "../../crates/axerrno" } lazy_init = { path = "../../crates/lazy_init" } memory_addr = { path = "../../crates/memory_addr" } +memory_set = { path = "../../crates/memory_set" } diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index 0c6f86ea60..62005b47f9 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -1,32 +1,34 @@ use core::fmt; -use axerrno::{ax_err, AxResult}; +use axerrno::{ax_err, AxError, AxResult}; use axhal::paging::{MappingFlags, PageTable}; -use memory_addr::{PhysAddr, VirtAddr}; +use memory_addr::{is_aligned_4k, PhysAddr, VirtAddr, VirtAddrRange}; +use memory_set::{MemoryArea, MemorySet}; -use crate::paging_err_to_ax_err; +use crate::backend::FixedBackend; +use crate::mapping_err_to_ax_err; /// The virtual memory address space. pub struct AddrSpace { - base: VirtAddr, - end: VirtAddr, + va_range: VirtAddrRange, + areas: MemorySet, pt: PageTable, } impl AddrSpace { /// Returns the address space base. pub const fn base(&self) -> VirtAddr { - self.base + self.va_range.start } /// Returns the address space end. pub const fn end(&self) -> VirtAddr { - self.end + self.va_range.end } /// Returns the address space size. pub const fn size(&self) -> usize { - self.end.as_usize() - self.base.as_usize() + self.va_range.size() } /// Returns the reference to the inner page table. @@ -39,28 +41,18 @@ impl AddrSpace { self.pt.root_paddr() } - /// Checks if the address space contains the given virtual address. - pub const fn contains(&self, addr: VirtAddr) -> bool { - self.base.as_usize() <= addr.as_usize() && addr.as_usize() < self.end.as_usize() - } - - /// Checks if the address space contains the given virtual address range. + /// Checks if the address space contains the given address range. pub const fn contains_range(&self, start: VirtAddr, size: usize) -> bool { - self.base.as_usize() <= start.as_usize() && start.as_usize() + size < self.end.as_usize() - } - - /// Checks if the address space overlaps with the given virtual address range. - pub const fn overlaps_with(&self, start: VirtAddr, size: usize) -> bool { - let end = start.as_usize() + size; - !(end <= self.base.as_usize() || start.as_usize() >= self.end.as_usize()) + self.va_range + .contains_range(VirtAddrRange::from_start_size(start, size)) } /// Creates a new empty address space. pub(crate) fn new_empty(base: VirtAddr, size: usize) -> AxResult { Ok(Self { - base, - end: base + size, - pt: PageTable::try_new().map_err(paging_err_to_ax_err)?, + va_range: VirtAddrRange::from_start_size(base, size), + areas: MemorySet::new(), + pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?, }) } @@ -70,7 +62,7 @@ impl AddrSpace { /// usually usually used to copy a portion of the kernel space mapping to /// the user space. pub fn copy_mappings_from(&mut self, other: &AddrSpace) -> AxResult { - if self.overlaps_with(other.base(), other.size()) { + if self.va_range.overlaps(other.va_range) { return ax_err!(InvalidInput, "address space overlap"); } self.pt.copy_from(&other.pt, other.base(), other.size()); @@ -94,29 +86,51 @@ impl AddrSpace { if !self.contains_range(start_vaddr, size) { return ax_err!(InvalidInput, "address out of range"); } - self.pt - .map_region(start_vaddr, start_paddr, size, flags, true) - .map_err(paging_err_to_ax_err)?; + if !start_vaddr.is_aligned_4k() || !start_paddr.is_aligned_4k() || !is_aligned_4k(size) { + return ax_err!(InvalidInput, "address not aligned"); + } + + let offset = start_vaddr.as_usize() - start_paddr.as_usize(); + let area = MemoryArea::new(start_vaddr, size, flags, FixedBackend::new(offset)); + self.areas + .map(area, &mut self.pt, false) + .map_err(mapping_err_to_ax_err)?; Ok(()) } - /// Removes the mappings for the specified virtual address range. + /// Removes mappings within the specified virtual address range. pub fn unmap(&mut self, start: VirtAddr, size: usize) -> AxResult { if !self.contains_range(start, size) { return ax_err!(InvalidInput, "address out of range"); } - self.pt - .unmap_region(start, size) - .map_err(paging_err_to_ax_err)?; + if !start.is_aligned_4k() || !is_aligned_4k(size) { + return ax_err!(InvalidInput, "address not aligned"); + } + + self.areas + .unmap(start, size, &mut self.pt) + .map_err(mapping_err_to_ax_err)?; Ok(()) } + + /// Removes all mappings in the address space. + pub fn clear(&mut self) { + self.areas.clear(&mut self.pt).unwrap(); + } } impl fmt::Debug for AddrSpace { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("AddrSpace") - .field("va_range", &(self.base.as_usize()..self.end.as_usize())) + .field("va_range", &self.va_range) .field("page_table_root", &self.pt.root_paddr()) + .field("areas", &self.areas) .finish() } } + +impl Drop for AddrSpace { + fn drop(&mut self) { + self.clear(); + } +} diff --git a/modules/axmm/src/backend.rs b/modules/axmm/src/backend.rs new file mode 100644 index 0000000000..d25aaa50bb --- /dev/null +++ b/modules/axmm/src/backend.rs @@ -0,0 +1,25 @@ +use axhal::paging::{MappingFlags, PageTable}; +use memory_addr::{PhysAddr, VirtAddr}; +use memory_set::MappingBackend; + +#[derive(Clone)] +pub struct FixedBackend { + pa_va_offset: usize, +} + +impl MappingBackend for FixedBackend { + fn map(&self, start: VirtAddr, size: usize, flags: MappingFlags, pt: &mut PageTable) -> bool { + let pa_start = PhysAddr::from(start.as_usize() - self.pa_va_offset); + pt.map_region(start, pa_start, size, flags, false).is_ok() + } + + fn unmap(&self, start: VirtAddr, size: usize, pt: &mut PageTable) -> bool { + pt.unmap_region(start, size).is_ok() + } +} + +impl FixedBackend { + pub const fn new(pa_va_offset: usize) -> Self { + Self { pa_va_offset } + } +} diff --git a/modules/axmm/src/lib.rs b/modules/axmm/src/lib.rs index 4467e306eb..8abb7e17e6 100644 --- a/modules/axmm/src/lib.rs +++ b/modules/axmm/src/lib.rs @@ -7,28 +7,28 @@ extern crate log; extern crate alloc; mod aspace; +mod backend; pub use self::aspace::AddrSpace; use axerrno::{AxError, AxResult}; use axhal::mem::phys_to_virt; -use axhal::paging::{PageTable, PagingError}; +use axhal::paging::PageTable; use lazy_init::LazyInit; use memory_addr::{PhysAddr, VirtAddr}; +use memory_set::MappingError; -const USER_ASPACE_BASE: usize = 0; +const USER_ASPACE_BASE: usize = 0x1000; const USER_ASPACE_SIZE: usize = 0x7fff_ffff_f000; static KERNEL_ASPACE: LazyInit = LazyInit::new(); -fn paging_err_to_ax_err(err: PagingError) -> AxError { - warn!("Paging error: {:?}", err); +fn mapping_err_to_ax_err(err: MappingError) -> AxError { + warn!("Mapping error: {:?}", err); match err { - PagingError::NoMemory => AxError::NoMemory, - PagingError::NotAligned => AxError::InvalidInput, - PagingError::NotMapped => AxError::NotFound, - PagingError::AlreadyMapped => AxError::AlreadyExists, - PagingError::MappedToHugePage => AxError::InvalidInput, + MappingError::InvalidParam => AxError::InvalidInput, + MappingError::AlreadyExists => AxError::AlreadyExists, + MappingError::BadState => AxError::BadState, } } diff --git a/variants/monolithic/src/main.rs b/variants/monolithic/src/main.rs index 34b5a93415..b5ae6fae48 100644 --- a/variants/monolithic/src/main.rs +++ b/variants/monolithic/src/main.rs @@ -22,11 +22,11 @@ const KERNEL_STACK_SIZE: usize = 0x40000; // 256 KiB fn app_main(arg0: usize) { unsafe { core::arch::asm!( - "2:", "int3", "mov rax, r12", "syscall", "add r12, 1", + "2:", "jmp 2b", in("r12") arg0, in("rdi") 2, @@ -89,6 +89,7 @@ fn run_apps() -> ! { ) .unwrap(); + info!("New user address space: {:#x?}", uspace); spawn_user_task( uspace.page_table_root(), UspaceContext::new(entry_vaddr.into(), ustack_top, 2333),