From 7e1cf44a853cfc9ddc6d57984956d198adf8b83c Mon Sep 17 00:00:00 2001 From: aarkegz Date: Tue, 20 Aug 2024 17:08:54 +0800 Subject: [PATCH 1/8] use local `memory_addr` for `memory_set`, update `memory_set`, move generic params of mapping structures into `MappingBackEnd` as associated types --- memory_set/Cargo.toml | 4 +- memory_set/README.md | 8 ++- memory_set/src/area.rs | 109 +++++++++++++++------------------ memory_set/src/backend.rs | 33 ++++++++++ memory_set/src/lib.rs | 4 +- memory_set/src/set.rs | 126 ++++++++++++++++++++++---------------- memory_set/src/tests.rs | 8 ++- 7 files changed, 172 insertions(+), 120 deletions(-) create mode 100644 memory_set/src/backend.rs diff --git a/memory_set/Cargo.toml b/memory_set/Cargo.toml index d4bdecf..e380e30 100644 --- a/memory_set/Cargo.toml +++ b/memory_set/Cargo.toml @@ -13,6 +13,4 @@ repository.workspace = true categories.workspace = true [dependencies] -memory_addr = "0.2" -# todo: update it to local one once it gets adapted to the current version -# memory_addr = { path = "../memory_addr", version = "0.2" } +memory_addr = { path = "../memory_addr", version = "0.3.0-dev" } diff --git a/memory_set/README.md b/memory_set/README.md index 1e1a7ce..d2d1e0c 100644 --- a/memory_set/README.md +++ b/memory_set/README.md @@ -29,7 +29,7 @@ type MockPageTable = [MockFlags; MAX_ADDR]; struct MockBackend; let mut pt = [0; MAX_ADDR]; -let mut memory_set = MemorySet::::new(); +let mut memory_set = MemorySet::::new(); // Map [0x1000..0x5000). memory_set.map( @@ -46,7 +46,11 @@ assert_eq!(areas[0].va_range(), va_range!(0x1000..0x2000)); assert_eq!(areas[1].va_range(), va_range!(0x4000..0x5000)); // Underlying operations to do when manipulating mappings. -impl MappingBackend for MockBackend { +impl MappingBackend for MockBackend { + type Addr = VirtAddr; + type Flags = MockFlags; + type PageTable = MockPageTable; + fn map(&self, start: VirtAddr, size: usize, flags: MockFlags, pt: &mut MockPageTable) -> bool { for entry in pt.iter_mut().skip(start.as_usize()).take(size) { if *entry != 0 { diff --git a/memory_set/src/area.rs b/memory_set/src/area.rs index 5d89931..b439bee 100644 --- a/memory_set/src/area.rs +++ b/memory_set/src/area.rs @@ -1,71 +1,52 @@ use core::fmt; -use core::marker::PhantomData; -use memory_addr::{VirtAddr, VirtAddrRange}; +use memory_addr::AddrRange; -use crate::{MappingError, MappingResult}; - -/// Underlying operations to do when manipulating mappings within the specific -/// [`MemoryArea`]. -/// -/// The backend can be different for different memory areas. e.g., for linear -/// mappings, the target physical address is known when it is added to the page -/// table. For lazy mappings, an empty mapping needs to be added to the page table -/// to trigger a page fault. -pub trait MappingBackend: Clone { - /// What to do when mapping a region within the area with the given flags. - fn map(&self, start: VirtAddr, size: usize, flags: F, page_table: &mut P) -> bool; - /// What to do when unmaping a memory region within the area. - fn unmap(&self, start: VirtAddr, size: usize, page_table: &mut P) -> bool; - /// What to do when changing access flags. - fn protect(&self, start: VirtAddr, size: usize, new_flags: F, page_table: &mut P) -> bool; -} +use crate::{MappingBackend, MappingError, MappingResult}; /// A memory area represents a continuous range of virtual memory with the same /// flags. /// /// The target physical memory frames are determined by [`MappingBackend`] and /// may not be contiguous. -pub struct MemoryArea> { - va_range: VirtAddrRange, - flags: F, +pub struct MemoryArea { + va_range: AddrRange, + flags: B::Flags, backend: B, - _phantom: PhantomData<(F, P)>, } -impl> MemoryArea { +impl MemoryArea { /// Creates a new memory area. - pub const fn new(start: VirtAddr, size: usize, flags: F, backend: B) -> Self { + pub fn new(start: B::Addr, size: usize, flags: B::Flags, backend: B) -> Self { Self { - va_range: VirtAddrRange::from_start_size(start, size), + va_range: AddrRange::from_start_size(start, size), flags, backend, - _phantom: PhantomData, } } /// Returns the virtual address range. - pub const fn va_range(&self) -> VirtAddrRange { + pub const fn va_range(&self) -> AddrRange { self.va_range } /// Returns the memory flags, e.g., the permission bits. - pub const fn flags(&self) -> F { + pub const fn flags(&self) -> B::Flags { self.flags } /// Returns the start address of the memory area. - pub const fn start(&self) -> VirtAddr { + pub const fn start(&self) -> B::Addr { self.va_range.start } /// Returns the end address of the memory area. - pub const fn end(&self) -> VirtAddr { + pub const fn end(&self) -> B::Addr { self.va_range.end } /// Returns the size of the memory area. - pub const fn size(&self) -> usize { + pub fn size(&self) -> usize { self.va_range.size() } @@ -75,19 +56,19 @@ impl> MemoryArea { } } -impl> MemoryArea { +impl MemoryArea { /// Changes the flags. - pub(crate) fn set_flags(&mut self, new_flags: F) { + pub(crate) fn set_flags(&mut self, new_flags: B::Flags) { self.flags = new_flags; } /// Changes the end address of the memory area. - pub(crate) fn set_end(&mut self, new_end: VirtAddr) { + pub(crate) fn set_end(&mut self, new_end: B::Addr) { self.va_range.end = new_end; } /// Maps the whole memory area in the page table. - pub(crate) fn map_area(&self, page_table: &mut P) -> MappingResult { + pub(crate) fn map_area(&self, page_table: &mut B::PageTable) -> MappingResult { self.backend .map(self.start(), self.size(), self.flags, page_table) .then_some(()) @@ -95,7 +76,7 @@ impl> MemoryArea { } /// Unmaps the whole memory area in the page table. - pub(crate) fn unmap_area(&self, page_table: &mut P) -> MappingResult { + pub(crate) fn unmap_area(&self, page_table: &mut B::PageTable) -> MappingResult { self.backend .unmap(self.start(), self.size(), page_table) .then_some(()) @@ -103,7 +84,11 @@ impl> MemoryArea { } /// Changes the flags in the page table. - pub(crate) fn protect_area(&mut self, new_flags: F, page_table: &mut P) -> MappingResult { + pub(crate) fn protect_area( + &mut self, + new_flags: B::Flags, + page_table: &mut B::PageTable, + ) -> MappingResult { self.backend .protect(self.start(), self.size(), new_flags, page_table); Ok(()) @@ -113,12 +98,16 @@ impl> MemoryArea { /// /// The start address of the memory area is increased by `new_size`. The /// shrunk part is unmapped. - pub(crate) fn shrink_left(&mut self, new_size: usize, page_table: &mut P) -> MappingResult { + pub(crate) fn shrink_left( + &mut self, + new_size: usize, + page_table: &mut B::PageTable, + ) -> MappingResult { let unmap_size = self.size() - new_size; if !self.backend.unmap(self.start(), unmap_size, page_table) { return Err(MappingError::BadState); } - self.va_range.start += unmap_size; + self.va_range.start = (self.va_range.start.into() + unmap_size).into(); Ok(()) } @@ -126,15 +115,20 @@ impl> MemoryArea { /// /// The end address of the memory area is decreased by `new_size`. The /// shrunk part is unmapped. - pub(crate) fn shrink_right(&mut self, new_size: usize, page_table: &mut P) -> MappingResult { + pub(crate) fn shrink_right( + &mut self, + new_size: usize, + page_table: &mut B::PageTable, + ) -> MappingResult { let unmap_size = self.size() - new_size; - if !self - .backend - .unmap(self.start() + new_size, unmap_size, page_table) - { + if !self.backend.unmap( + (self.start().into() + new_size).into(), + unmap_size, + page_table, + ) { return Err(MappingError::BadState); } - self.va_range.end -= unmap_size; + self.va_range.end = (self.va_range.end.into() - unmap_size).into(); Ok(()) } @@ -145,17 +139,15 @@ impl> MemoryArea { /// /// Returns `None` if the given position is not in the memory area, or one /// of the parts is empty after splitting. - pub(crate) fn split(&mut self, pos: VirtAddr) -> Option { - let start = self.start(); - let end = self.end(); + pub(crate) fn split(&mut self, pos: B::Addr) -> Option { + let pos: usize = pos.into(); + + let start: usize = self.start().into(); + let end: usize = self.end().into(); + // todo: is it a bug when `pos == end - 1`? if start < pos && pos < end { - let new_area = Self::new( - pos, - end.as_usize() - pos.as_usize(), - self.flags, - self.backend.clone(), - ); - self.va_range.end = pos; + let new_area = Self::new(pos.into(), end - pos, self.flags, self.backend.clone()); + self.va_range.end = pos.into(); Some(new_area) } else { None @@ -163,9 +155,10 @@ impl> MemoryArea { } } -impl> fmt::Debug for MemoryArea +impl fmt::Debug for MemoryArea where - F: fmt::Debug + Copy, + B::Addr: fmt::Debug, + B::Flags: fmt::Debug + Copy, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("MemoryArea") diff --git a/memory_set/src/backend.rs b/memory_set/src/backend.rs new file mode 100644 index 0000000..fb15aea --- /dev/null +++ b/memory_set/src/backend.rs @@ -0,0 +1,33 @@ +use memory_addr::MemoryAddr; + +/// Underlying operations to do when manipulating mappings within the specific +/// [`MemoryArea`]. +/// +/// The backend can be different for different memory areas. e.g., for linear +/// mappings, the target physical address is known when it is added to the page +/// table. For lazy mappings, an empty mapping needs to be added to the page table +/// to trigger a page fault. +pub trait MappingBackend: Clone { + type Addr: MemoryAddr; + type Flags: Copy; + type PageTable; + + /// What to do when mapping a region within the area with the given flags. + fn map( + &self, + start: Self::Addr, + size: usize, + flags: Self::Flags, + page_table: &mut Self::PageTable, + ) -> bool; + /// What to do when unmaping a memory region within the area. + fn unmap(&self, start: Self::Addr, size: usize, page_table: &mut Self::PageTable) -> bool; + /// What to do when changing access flags. + fn protect( + &self, + start: Self::Addr, + size: usize, + new_flags: Self::Flags, + page_table: &mut Self::PageTable, + ) -> bool; +} diff --git a/memory_set/src/lib.rs b/memory_set/src/lib.rs index 0a623f9..3d90cea 100644 --- a/memory_set/src/lib.rs +++ b/memory_set/src/lib.rs @@ -4,12 +4,14 @@ extern crate alloc; mod area; +mod backend; mod set; #[cfg(test)] mod tests; -pub use self::area::{MappingBackend, MemoryArea}; +pub use self::area::MemoryArea; +pub use self::backend::MappingBackend; pub use self::set::MemorySet; /// Error type for memory mapping operations. diff --git a/memory_set/src/set.rs b/memory_set/src/set.rs index 5f4979c..a6ae04a 100644 --- a/memory_set/src/set.rs +++ b/memory_set/src/set.rs @@ -1,16 +1,18 @@ -use alloc::{collections::BTreeMap, vec::Vec}; +use alloc::collections::BTreeMap; +#[allow(unused_imports)] // this is a weird false alarm +use alloc::vec::Vec; use core::fmt; -use memory_addr::{VirtAddr, VirtAddrRange}; +use memory_addr::AddrRange; use crate::{MappingBackend, MappingError, MappingResult, MemoryArea}; /// A container that maintains memory mappings ([`MemoryArea`]). -pub struct MemorySet> { - areas: BTreeMap>, +pub struct MemorySet { + areas: BTreeMap>, } -impl> MemorySet { +impl MemorySet { /// Creates a new memory set. pub const fn new() -> Self { Self { @@ -29,18 +31,18 @@ impl> MemorySet { } /// Returns the iterator over all memory areas. - pub fn iter(&self) -> impl Iterator> { + pub fn iter(&self) -> impl Iterator> { self.areas.values() } /// Returns whether the given address range overlaps with any existing area. - pub fn overlaps(&self, range: VirtAddrRange) -> bool { - if let Some((_, before)) = self.areas.range(..range.start).last() { + pub fn overlaps(&self, range: AddrRange) -> bool { + if let Some((_, before)) = self.areas.range(..range.start.into()).last() { if before.va_range().overlaps(range) { return true; } } - if let Some((_, after)) = self.areas.range(range.start..).next() { + if let Some((_, after)) = self.areas.range(range.start.into()..).next() { if after.va_range().overlaps(range) { return true; } @@ -49,8 +51,8 @@ impl> MemorySet { } /// Finds the memory area that contains the given address. - pub fn find(&self, addr: VirtAddr) -> Option<&MemoryArea> { - let candidate = self.areas.range(..=addr).last().map(|(_, a)| a); + pub fn find(&self, addr: B::Addr) -> Option<&MemoryArea> { + let candidate = self.areas.range(..=addr.into()).last().map(|(_, a)| a); candidate.filter(|a| a.va_range().contains(addr)) } @@ -63,20 +65,21 @@ impl> MemorySet { /// area is found. pub fn find_free_area( &self, - hint: VirtAddr, + hint: B::Addr, size: usize, - limit: VirtAddrRange, - ) -> Option { + limit: AddrRange, + ) -> Option { // brute force: try each area's end address as the start. - let mut last_end = hint.max(limit.start); + let hint: usize = hint.into(); + let mut last_end = hint.max(limit.start.into()); for (addr, area) in self.areas.iter() { if last_end + size <= *addr { - return Some(last_end); + return Some(last_end.into()); } - last_end = area.end(); + last_end = area.end().into(); } - if last_end + size <= limit.end { - Some(last_end) + if last_end + size <= limit.end.into() { + Some(last_end.into()) } else { None } @@ -92,8 +95,8 @@ impl> MemorySet { /// error. pub fn map( &mut self, - area: MemoryArea, - page_table: &mut P, + area: MemoryArea, + page_table: &mut B::PageTable, unmap_overlap: bool, ) -> MappingResult { if area.va_range().is_empty() { @@ -109,7 +112,7 @@ impl> MemorySet { } area.map_area(page_table)?; - assert!(self.areas.insert(area.start(), area).is_none()); + assert!(self.areas.insert(area.start().into(), area).is_none()); Ok(()) } @@ -119,13 +122,20 @@ impl> MemorySet { /// directly. If the area intersects with the boundary, it will be shrinked. /// If the unmapped range is in the middle of an existing area, it will be /// split into two areas. - pub fn unmap(&mut self, start: VirtAddr, size: usize, page_table: &mut P) -> MappingResult { - let range = VirtAddrRange::from_start_size(start, size); - let end = range.end; + pub fn unmap( + &mut self, + start: B::Addr, + size: usize, + page_table: &mut B::PageTable, + ) -> MappingResult { + let range = AddrRange::from_start_size(start, size); if range.is_empty() { return Ok(()); } + let start: usize = start.into(); + let end = range.end.into(); + // Unmap entire areas that are contained by the range. self.areas.retain(|_, area| { if area.va_range().contained_in(range) { @@ -138,16 +148,16 @@ impl> MemorySet { // Shrink right if the area intersects with the left boundary. if let Some((before_start, before)) = self.areas.range_mut(..start).last() { - let before_end = before.end(); + let before_end = before.end().into(); if before_end > start { if before_end <= end { // the unmapped area is at the end of `before`. - before.shrink_right(start.as_usize() - before_start.as_usize(), page_table)?; + before.shrink_right(start - before_start, page_table)?; } else { // the unmapped area is in the middle `before`, need to split. - let right_part = before.split(end).unwrap(); - before.shrink_right(start.as_usize() - before_start.as_usize(), page_table)?; - assert_eq!(right_part.start(), end); + let right_part = before.split(end.into()).unwrap(); + before.shrink_right(start - before_start, page_table)?; + assert_eq!(right_part.start().into(), end); self.areas.insert(end, right_part); } } @@ -155,12 +165,12 @@ impl> MemorySet { // Shrink left if the area intersects with the right boundary. if let Some((&after_start, after)) = self.areas.range_mut(start..).next() { - let after_end = after.end(); + let after_end = after.end().into(); if after_start < end { // the unmapped area is at the start of `after`. let mut new_area = self.areas.remove(&after_start).unwrap(); - new_area.shrink_left(after_end.as_usize() - end.as_usize(), page_table)?; - assert_eq!(new_area.start(), end); + new_area.shrink_left(after_end - end, page_table)?; + assert_eq!(new_area.start().into(), end); self.areas.insert(end, new_area); } } @@ -169,7 +179,7 @@ impl> MemorySet { } /// Remove all memory areas and the underlying mappings. - pub fn clear(&mut self, page_table: &mut P) -> MappingResult { + pub fn clear(&mut self, page_table: &mut B::PageTable) -> MappingResult { for (_, area) in self.areas.iter() { area.unmap_area(page_table)?; } @@ -188,66 +198,74 @@ impl> MemorySet { /// with the boundary will be handled similarly to `munmap`. pub fn protect( &mut self, - start: VirtAddr, + start: B::Addr, size: usize, - update_flags: impl Fn(F) -> Option, - page_table: &mut P, + update_flags: impl Fn(B::Flags) -> Option, + page_table: &mut B::PageTable, ) -> MappingResult { + let start = start.into(); let end = start + size; let mut to_insert = Vec::new(); - for (_, area) in self.areas.iter_mut() { + for (area_start, area) in self.areas.iter_mut() { + let area_start = *area_start; + let area_end = area.end().into(); + if let Some(new_flags) = update_flags(area.flags()) { - if area.start() >= end { + if area_start >= end { // [ prot ] // [ area ] break; - } else if area.end() <= start { + } else if area_end <= start { // [ prot ] // [ area ] // Do nothing - } else if area.start() >= start && area.end() <= end { + } else if area_start >= start && area_end <= end { // [ prot ] // [ area ] area.protect_area(new_flags, page_table)?; area.set_flags(new_flags); - } else if area.start() < start && area.end() > end { + } else if area_start < start && area_end > end { // [ prot ] // [ left | area | right ] - let right_part = area.split(end).unwrap(); - area.set_end(start); + let right_part = area.split(end.into()).unwrap(); + area.set_end(start.into()); let mut middle_part = - MemoryArea::new(start, size, area.flags(), area.backend().clone()); + MemoryArea::new(start.into(), size, area.flags(), area.backend().clone()); middle_part.protect_area(new_flags, page_table)?; middle_part.set_flags(new_flags); - to_insert.push((right_part.start(), right_part)); - to_insert.push((middle_part.start(), middle_part)); - } else if area.end() > end { + to_insert.push((right_part.start().into(), right_part)); + to_insert.push((middle_part.start().into(), middle_part)); + } else if area_end > end { // [ prot ] // [ area | right ] - let right_part = area.split(end).unwrap(); + let right_part = area.split(end.into()).unwrap(); area.protect_area(new_flags, page_table)?; area.set_flags(new_flags); - to_insert.push((right_part.start(), right_part)); + to_insert.push((right_part.start().into(), right_part)); } else { // [ prot ] // [ left | area ] - let mut right_part = area.split(start).unwrap(); + let mut right_part = area.split(start.into()).unwrap(); right_part.protect_area(new_flags, page_table)?; right_part.set_flags(new_flags); - to_insert.push((right_part.start(), right_part)); + to_insert.push((right_part.start().into(), right_part)); } } } - self.areas.extend(to_insert.into_iter()); + self.areas.extend(to_insert); Ok(()) } } -impl> fmt::Debug for MemorySet { +impl fmt::Debug for MemorySet +where + B::Addr: fmt::Debug, + B::Flags: fmt::Debug, +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.areas.values()).finish() } diff --git a/memory_set/src/tests.rs b/memory_set/src/tests.rs index e2b3e9b..0b1fe00 100644 --- a/memory_set/src/tests.rs +++ b/memory_set/src/tests.rs @@ -10,9 +10,13 @@ type MockPageTable = [MockFlags; MAX_ADDR]; #[derive(Clone)] struct MockBackend; -type MockMemorySet = MemorySet; +type MockMemorySet = MemorySet; + +impl MappingBackend for MockBackend { + type Addr = VirtAddr; + type Flags = MockFlags; + type PageTable = MockPageTable; -impl MappingBackend for MockBackend { fn map(&self, start: VirtAddr, size: usize, flags: MockFlags, pt: &mut MockPageTable) -> bool { for entry in pt.iter_mut().skip(start.as_usize()).take(size) { if *entry != 0 { From 4e58d27aad951d30b49bbd334436b5c886a35c13 Mon Sep 17 00:00:00 2001 From: aarkegz Date: Tue, 20 Aug 2024 17:15:45 +0800 Subject: [PATCH 2/8] fix docs --- memory_set/src/backend.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/memory_set/src/backend.rs b/memory_set/src/backend.rs index fb15aea..e4a9d1f 100644 --- a/memory_set/src/backend.rs +++ b/memory_set/src/backend.rs @@ -1,15 +1,18 @@ use memory_addr::MemoryAddr; /// Underlying operations to do when manipulating mappings within the specific -/// [`MemoryArea`]. +/// [`MemoryArea`](crate::MemoryArea). /// /// The backend can be different for different memory areas. e.g., for linear /// mappings, the target physical address is known when it is added to the page /// table. For lazy mappings, an empty mapping needs to be added to the page table /// to trigger a page fault. pub trait MappingBackend: Clone { + /// The address type used in the memory area. type Addr: MemoryAddr; + /// The flags type used in the memory area. type Flags: Copy; + /// The page table type used in the memory area. type PageTable; /// What to do when mapping a region within the area with the given flags. From 5f526d691c80c92c62ec88db55426e01be63fde5 Mon Sep 17 00:00:00 2001 From: Su Mingxian Date: Mon, 26 Aug 2024 11:43:21 +0800 Subject: [PATCH 3/8] Enhanced `MemoryAddr` (#4) * demo: what if we add more bounds to `MemoryAddr` * remove arithmetic bounds for `MemoryAddr`, add some provided methods * rename `MemoryAddr::sub_addr` to `MemoryAddr::offset_from` * remove a wrong bug todo * add two more functions to `VirtAddr` * add `add` and `sub` to `MemoryAddr` * clarify the behavior of arithmetic operations of `MemoryAddr`, add detailed unit tests * formatted * add `overflowing_add/sub` to `MemoryAddr` * add `checked_add/sub` to `MemoryAddr` * `AddrRange` candidate B: allow malformed ranges and treat them as empty * doc and tests improvement for `MemoryAddr` and `AddrRange` * update `PageIter` * formatted * even more tests, ready to merge * switch to `AddrRange` candidate A --- memory_addr/src/addr.rs | 595 ++++++++++++++++++++++++++++++++------ memory_addr/src/iter.rs | 16 +- memory_addr/src/lib.rs | 78 +---- memory_addr/src/range.rs | 372 +++++++++++++++++++----- memory_set/src/area.rs | 31 +- memory_set/src/backend.rs | 4 +- memory_set/src/set.rs | 75 +++-- memory_set/src/tests.rs | 2 +- rustfmt.toml | 1 + 9 files changed, 879 insertions(+), 295 deletions(-) create mode 100644 rustfmt.toml diff --git a/memory_addr/src/addr.rs b/memory_addr/src/addr.rs index 9ca3861..4054771 100644 --- a/memory_addr/src/addr.rs +++ b/memory_addr/src/addr.rs @@ -1,44 +1,240 @@ +use core::cmp::Ord; + /// A trait for memory address types. /// -/// Memory address types here include both physical and virtual addresses, as well as any other -/// similar types like guest physical addresses in a hypervisor. +/// Memory address types here include both physical and virtual addresses, as +/// well as any other similar types like guest physical addresses in a +/// hypervisor. +/// +/// This trait is automatically implemented for any type that is `Copy`, +/// `From`, `Into`, and `Ord`, providing a set of utility methods +/// for address alignment and arithmetic. pub trait MemoryAddr: // The address type should be trivially copyable. This implies `Clone`. Copy // The address type should be convertible to and from `usize`. + From + Into + // The address type should be comparable. + + Ord { - // Empty for now. + // No required methods for now. Following are some utility methods. + + // + // This section contains utility methods for address alignment. + // + + /// Aligns the address downwards to the given alignment. + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn align_down(self, align: U) -> Self + where + U: Into, + { + Self::from(crate::align_down(self.into(), align.into())) + } + + /// Aligns the address upwards to the given alignment. + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn align_up(self, align: U) -> Self + where + U: Into, + { + Self::from(crate::align_up(self.into(), align.into())) + } + + /// Returns the offset of the address within the given alignment. + #[inline] + #[must_use = "this function has no side effects, so it can be removed if the return value is not used"] + fn align_offset(self, align: U) -> usize + where + U: Into, + { + crate::align_offset(self.into(), align.into()) + } + + /// Checks whether the address has the demanded alignment. + #[inline] + #[must_use = "this function has no side effects, so it can be removed if the return value is not used"] + fn is_aligned(self, align: U) -> bool + where + U: Into, + { + crate::is_aligned(self.into(), align.into()) + } + + /// Aligns the address downwards to 4096 (bytes). + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn align_down_4k(self) -> Self { + Self::from(crate::align_down(self.into(), crate::PAGE_SIZE_4K)) + } + + /// Aligns the address upwards to 4096 (bytes). + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn align_up_4k(self) -> Self { + Self::from(crate::align_up(self.into(), crate::PAGE_SIZE_4K)) + } + + /// Returns the offset of the address within a 4K-sized page. + #[inline] + #[must_use = "this function has no side effects, so it can be removed if the return value is not used"] + fn align_offset_4k(self) -> usize { + crate::align_offset(self.into(), crate::PAGE_SIZE_4K) + } + + /// Checks whether the address is 4K-aligned. + #[inline] + #[must_use = "this function has no side effects, so it can be removed if the return value is not used"] + fn is_aligned_4k(self) -> bool { + crate::is_aligned(self.into(), crate::PAGE_SIZE_4K) + } + + // + // This section contains utility methods for address arithmetic. + // + + /// Adds a given offset to the address to get a new address. + /// + /// This method will panic on overflow. + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn offset(self, offset: isize) -> Self { + // todo: use `strict_add_signed` when it's stable. + Self::from(usize::checked_add_signed(self.into(), offset).unwrap()) + } + + /// Adds a given offset to the address to get a new address. + /// + /// Unlike `offset`, this method always wraps around on overflow. + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn wrapping_offset(self, offset: isize) -> Self { + Self::from(usize::wrapping_add_signed(self.into(), offset)) + } + + /// Gets the distance between two addresses. + /// + /// This method will panic if the result is not representable by `isize`. + #[inline] + #[must_use = "this function has no side effects, so it can be removed if the return value is not used"] + fn offset_from(self, base: Self) -> isize { + let result = usize::wrapping_sub(self.into(), base.into()) as isize; + if (result > 0) ^ (base < self) { + // The result has overflowed. + panic!("overflow in `MemoryAddr::offset_from`"); + } else { + result + } + } + + /// Adds a given **unsigned** offset to the address to get a new address. + /// + /// This method is similar to `offset`, but it takes an unsigned offset. This method + /// will also panic on overflow. + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn add(self, rhs: usize) -> Self { + Self::from(usize::checked_add(self.into(), rhs).unwrap()) + } + + /// Adds a given **unsigned** offset to the address to get a new address. + /// + /// Unlike `add`, this method always wraps around on overflow. + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn wrapping_add(self, rhs: usize) -> Self { + Self::from(usize::wrapping_add(self.into(), rhs)) + } + + /// Adds a given **unsigned** offset to the address to get a new address. + /// + /// Unlike `add`, this method returns a tuple of the new address and a boolean indicating + /// whether the addition has overflowed. + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn overflowing_add(self, rhs: usize) -> (Self, bool) { + let (result, overflow) = self.into().overflowing_add(rhs); + (Self::from(result), overflow) + } + + /// Adds a given **unsigned** offset to the address to get a new address. + /// + /// Unlike `add`, this method returns `None` on overflow. + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn checked_add(self, rhs: usize) -> Option { + usize::checked_add(self.into(), rhs).map(Self::from) + } + + /// Subtracts a given **unsigned** offset from the address to get a new address. + /// + /// This method is similar to `offset(-rhs)`, but it takes an unsigned offset. This method + /// will also panic on overflowed. + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn sub(self, rhs: usize) -> Self { + Self::from(usize::checked_sub(self.into(), rhs).unwrap()) + } + + /// Subtracts a given **unsigned** offset from the address to get a new address. + /// + /// Unlike `sub`, this method always wraps around on overflowed. + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn wrapping_sub(self, rhs: usize) -> Self { + Self::from(usize::wrapping_sub(self.into(), rhs)) + } + + /// Subtracts a given **unsigned** offset from the address to get a new address. + /// + /// Unlike `sub`, this method returns a tuple of the new address and a boolean indicating + /// whether the subtraction has overflowed. + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn overflowing_sub(self, rhs: usize) -> (Self, bool) { + let (result, overflow) = self.into().overflowing_sub(rhs); + (Self::from(result), overflow) + } + + /// Subtracts a given **unsigned** offset from the address to get a new address. + /// + /// Unlike `sub`, this method returns `None` on overflow. + #[inline] + #[must_use = "this returns a new address, without modifying the original"] + fn checked_sub(self, rhs: usize) -> Option { + usize::checked_sub(self.into(), rhs).map(Self::from) + } } -/// Implement the `MemoryAddr` trait for any type that satisfies the required bounds. -impl MemoryAddr for T where T: Copy + From + Into {} +/// Implement the `MemoryAddr` trait for any type that is `Copy`, `From`, +/// `Into`, and `Ord`. +impl MemoryAddr for T where T: Copy + From + Into + Ord {} /// Creates a new address type by wrapping an `usize`. /// /// For each `$vis type $name;`, this macro generates the following items: -/// - Definition of the new address type `$name`, which contains a single private unnamed field of -/// type `usize`. -/// - Default implementations (i.e. derived implementations) for the following traits: +/// - Definition of the new address type `$name`, which contains a single +/// private unnamed field of type `usize`. +/// - Default implementations (i.e. derived implementations) for the following +/// traits: /// - `Copy`, `Clone`, /// - `Default`, /// - `Ord`, `PartialOrd`, `Eq`, and `PartialEq`. /// - Implementations for the following traits: /// - `From`, `Into` (by implementing `From<$name> for usize`), -/// - `Add`, `AddAssign`, `Sub`, `SubAssign`, as well as +/// - `Add`, `AddAssign`, `Sub`, `SubAssign`, and +/// - `Sub<$name>`. /// - Two `const` methods to convert between the address type and `usize`: /// - `from_usize`, which converts an `usize` to the address type, and /// - `as_usize`, which converts the address type to an `usize`. -/// - Methods to align the address, namely: -/// - `align_down`, `align_up`, `align_offset`, `is_aligned`, `align_down_4k`, `align_up_4k`, -/// `align_offset_4k`, and `is_aligned_4k`, which correspond to the functions with the same -/// names in the crate root. /// /// # Example /// /// ``` -/// use memory_addr::def_usize_addr; +/// use memory_addr::{def_usize_addr, MemoryAddr}; /// /// def_usize_addr! { /// /// A example address type. @@ -81,68 +277,6 @@ macro_rules! def_usize_addr { } } - impl $name { - /// Aligns the address downwards to the given alignment. - #[inline] - pub fn align_down(self, align: U) -> Self - where - U: Into, - { - Self::from_usize($crate::align_down(self.0, align.into())) - } - - /// Aligns the address upwards to the given alignment. - #[inline] - pub fn align_up(self, align: U) -> Self - where - U: Into, - { - Self::from_usize($crate::align_up(self.0, align.into())) - } - - /// Returns the offset of the address within the given alignment. - #[inline] - pub fn align_offset(self, align: U) -> usize - where - U: Into, - { - $crate::align_offset(self.0, align.into()) - } - - /// Checks whether the address has the demanded alignment. - #[inline] - pub fn is_aligned(self, align: U) -> bool - where - U: Into, - { - $crate::is_aligned(self.0, align.into()) - } - - /// Aligns the address downwards to 4096 (bytes). - #[inline] - pub const fn align_down_4k(self) -> Self { - Self::from_usize($crate::align_down(self.0, $crate::PAGE_SIZE_4K)) - } - - /// Aligns the address upwards to 4096 (bytes). - #[inline] - pub const fn align_up_4k(self) -> Self { - Self::from_usize($crate::align_up(self.0, $crate::PAGE_SIZE_4K)) - } - - /// Returns the offset of the address within a 4K-sized page. - #[inline] - pub const fn align_offset_4k(self) -> usize { - $crate::align_offset(self.0, $crate::PAGE_SIZE_4K) - } - - /// Checks whether the address is 4K-aligned. - #[inline] - pub const fn is_aligned_4k(self) -> bool { - $crate::is_aligned(self.0, $crate::PAGE_SIZE_4K) - } - } - impl From for $name { #[inline] fn from(addr: usize) -> Self { @@ -187,21 +321,32 @@ macro_rules! def_usize_addr { } } + impl core::ops::Sub<$name> for $name { + type Output = usize; + #[inline] + fn sub(self, rhs: $name) -> usize { + self.0 - rhs.0 + } + } + $crate::def_usize_addr!($($tt)*); }; () => {}; } -/// Creates implementations for the [`core::fmt::Debug`], [`core::fmt::LowerHex`], and -/// [`core::fmt::UpperHex`] traits for the given address types defined by the [`def_usize_addr`]. +/// Creates implementations for the [`Debug`](core::fmt::Debug), +/// [`LowerHex`](core::fmt::LowerHex), and [`UpperHex`](core::fmt::UpperHex) +/// traits for the given address types defined by the [`def_usize_addr`]. /// /// For each `$name = $format;`, this macro generates the following items: -/// - An implementation of [`core::fmt::Debug`] for the address type `$name`, which formats the -/// address with `format_args!($format, format_args!("{:#x}", self.0))`, -/// - An implementation of [`core::fmt::LowerHex`] for the address type `$name`, which formats the -/// address in the same way as [`core::fmt::Debug`], -/// - An implementation of [`core::fmt::UpperHex`] for the address type `$name`, which formats the -/// address with `format_args!($format, format_args!("{:#X}", self.0))`. +/// - An implementation of [`core::fmt::Debug`] for the address type `$name`, +/// which formats the address with `format_args!($format, +/// format_args!("{:#x}", self.0))`, +/// - An implementation of [`core::fmt::LowerHex`] for the address type `$name`, +/// which formats the address in the same way as [`core::fmt::Debug`], +/// - An implementation of [`core::fmt::UpperHex`] for the address type `$name`, +/// which formats the address with `format_args!($format, +/// format_args!("{:#X}", self.0))`. /// /// # Example /// @@ -217,11 +362,11 @@ macro_rules! def_usize_addr { /// ExampleAddr = "EA:{}"; /// } /// -/// fn main() { -/// assert_eq!(format!("{:?}", PhysAddr::from(0x1abc)), "PA:0x1abc"); -/// assert_eq!(format!("{:x}", VirtAddr::from(0x1abc)), "VA:0x1abc"); -/// assert_eq!(format!("{:X}", ExampleAddr::from(0x1abc)), "EA:0x1ABC"); -/// } +/// # fn main() { +/// assert_eq!(format!("{:?}", PhysAddr::from(0x1abc)), "PA:0x1abc"); +/// assert_eq!(format!("{:x}", VirtAddr::from(0x1abc)), "VA:0x1abc"); +/// assert_eq!(format!("{:X}", ExampleAddr::from(0x1abc)), "EA:0x1ABC"); +/// # } /// ``` #[macro_export] macro_rules! def_usize_addr_formatter { @@ -267,17 +412,42 @@ def_usize_addr_formatter! { } impl VirtAddr { + /// Creates a new virtual address from a raw pointer. + #[inline] + pub fn from_ptr_of(ptr: *const T) -> Self { + Self(ptr as usize) + } + + /// Creates a new virtual address from a mutable raw pointer. + #[inline] + pub fn from_mut_ptr_of(ptr: *mut T) -> Self { + Self(ptr as usize) + } + /// Converts the virtual address to a raw pointer. #[inline] pub const fn as_ptr(self) -> *const u8 { self.0 as *const u8 } + /// Converts the virtual address to a raw pointer of a specific type. + #[inline] + pub const fn as_ptr_of(self) -> *const T { + self.0 as *const T + } + /// Converts the virtual address to a mutable raw pointer. #[inline] pub const fn as_mut_ptr(self) -> *mut u8 { self.0 as *mut u8 } + + /// Converts the virtual address to a mutable raw pointer of a specific + /// type. + #[inline] + pub const fn as_mut_ptr_of(self) -> *mut T { + self.0 as *mut T + } } /// Alias for [`PhysAddr::from_usize`]. @@ -295,3 +465,250 @@ macro_rules! va { $crate::VirtAddr::from_usize($addr) }; } + +#[cfg(test)] +mod test { + use core::mem::size_of; + + use super::*; + + def_usize_addr! { + /// An example address type. + pub type ExampleAddr; + /// Another example address type. + pub type AnotherAddr; + } + + def_usize_addr_formatter! { + ExampleAddr = "EA:{}"; + AnotherAddr = "AA:{}"; + } + + #[test] + fn test_addr() { + let addr = va!(0x2000); + assert!(addr.is_aligned_4k()); + assert!(!addr.is_aligned(0x10000usize)); + assert_eq!(addr.align_offset_4k(), 0); + assert_eq!(addr.align_down_4k(), va!(0x2000)); + assert_eq!(addr.align_up_4k(), va!(0x2000)); + + let addr = va!(0x2fff); + assert!(!addr.is_aligned_4k()); + assert_eq!(addr.align_offset_4k(), 0xfff); + assert_eq!(addr.align_down_4k(), va!(0x2000)); + assert_eq!(addr.align_up_4k(), va!(0x3000)); + + let align = 0x100000; + let addr = va!(align * 5) + 0x2000; + assert!(addr.is_aligned_4k()); + assert!(!addr.is_aligned(align)); + assert_eq!(addr.align_offset(align), 0x2000); + assert_eq!(addr.align_down(align), va!(align * 5)); + assert_eq!(addr.align_up(align), va!(align * 6)); + } + + #[test] + pub fn test_addr_convert_and_comparison() { + let example1 = ExampleAddr::from_usize(0x1234); + let example2 = ExampleAddr::from(0x5678); + let another1 = AnotherAddr::from_usize(0x9abc); + let another2 = AnotherAddr::from(0xdef0); + + assert_eq!(example1.as_usize(), 0x1234); + assert_eq!(Into::::into(example2), 0x5678); + assert_eq!(Into::::into(another1), 0x9abc); + assert_eq!(another2.as_usize(), 0xdef0); + + assert_eq!(example1, ExampleAddr::from(0x1234)); + assert_eq!(example2, ExampleAddr::from_usize(0x5678)); + assert_eq!(another1, AnotherAddr::from_usize(0x9abc)); + assert_eq!(another2, AnotherAddr::from(0xdef0)); + + assert!(example1 < example2); + assert!(example1 <= example2); + assert!(example2 > example1); + assert!(example2 >= example1); + assert!(example1 != example2); + } + + #[test] + pub fn test_addr_fmt() { + assert_eq!(format!("{:?}", ExampleAddr::from(0x1abc)), "EA:0x1abc"); + assert_eq!(format!("{:x}", AnotherAddr::from(0x1abc)), "AA:0x1abc"); + assert_eq!(format!("{:X}", ExampleAddr::from(0x1abc)), "EA:0x1ABC"); + } + + #[test] + pub fn test_alignment() { + let alignment = 0x1000usize; + let base = alignment * 2; + let offset = 0x123usize; + let addr = ExampleAddr::from_usize(base + offset); + + assert_eq!(addr.align_down(alignment), ExampleAddr::from_usize(base)); + assert_eq!( + addr.align_up(alignment), + ExampleAddr::from_usize(base + alignment) + ); + assert_eq!(addr.align_offset(alignment), offset); + assert!(!addr.is_aligned(alignment)); + assert!(ExampleAddr::from_usize(base).is_aligned(alignment)); + assert_eq!( + ExampleAddr::from_usize(base).align_up(alignment), + ExampleAddr::from_usize(base) + ); + } + + #[test] + pub fn test_addr_arithmetic() { + let base = 0x1234usize; + let offset = 0x100usize; + let with_offset = base + offset; + + let addr = ExampleAddr::from_usize(base); + let offset_addr = ExampleAddr::from_usize(with_offset); + + assert_eq!(addr.offset(offset as isize), offset_addr); + assert_eq!(addr.wrapping_offset(offset as isize), offset_addr); + assert_eq!(offset_addr.offset_from(addr), offset as isize); + assert_eq!(addr.add(offset), offset_addr); + assert_eq!(addr.wrapping_add(offset), offset_addr); + assert_eq!(offset_addr.sub(offset), addr); + assert_eq!(offset_addr.wrapping_sub(offset), addr); + + assert_eq!(addr + offset, offset_addr); + assert_eq!(offset_addr - offset, addr); + assert_eq!(offset_addr - addr, offset); + } + + #[test] + pub fn test_addr_wrapping_arithmetic() { + let base = usize::MAX - 0x100usize; + let offset = 0x200usize; + let with_offset = base.wrapping_add(offset); + + let addr = ExampleAddr::from_usize(base); + let offset_addr = ExampleAddr::from_usize(with_offset); + + assert_eq!(addr.wrapping_offset(offset as isize), offset_addr); + assert_eq!(offset_addr.wrapping_offset(-(offset as isize)), addr); + assert_eq!(addr.wrapping_add(offset), offset_addr); + assert_eq!(offset_addr.wrapping_sub(offset), addr); + } + + #[test] + pub fn test_addr_checked_arithmetic() { + let low_addr = ExampleAddr::from_usize(0x100usize); + let high_addr = ExampleAddr::from_usize(usize::MAX - 0x100usize); + let small_offset = 0x50usize; + let large_offset = 0x200usize; + + assert_eq!( + low_addr.checked_sub(small_offset), + Some(low_addr.wrapping_sub(small_offset)) + ); + assert_eq!(low_addr.checked_sub(large_offset), None); + assert_eq!( + high_addr.checked_add(small_offset), + Some(high_addr.wrapping_add(small_offset)) + ); + assert_eq!(high_addr.checked_add(large_offset), None); + } + + #[test] + pub fn test_addr_overflowing_arithmetic() { + let low_addr = ExampleAddr::from_usize(0x100usize); + let high_addr = ExampleAddr::from_usize(usize::MAX - 0x100usize); + let small_offset = 0x50usize; + let large_offset = 0x200usize; + + assert_eq!( + low_addr.overflowing_sub(small_offset), + (low_addr.wrapping_sub(small_offset), false) + ); + assert_eq!( + low_addr.overflowing_sub(large_offset), + (low_addr.wrapping_sub(large_offset), true) + ); + assert_eq!( + high_addr.overflowing_add(small_offset), + (high_addr.wrapping_add(small_offset), false) + ); + assert_eq!( + high_addr.overflowing_add(large_offset), + (high_addr.wrapping_add(large_offset), true) + ); + } + + #[test] + #[should_panic] + pub fn test_addr_offset_overflow() { + let addr = ExampleAddr::from_usize(usize::MAX); + let _ = addr.offset(1); + } + + #[test] + #[should_panic] + pub fn test_addr_offset_from_overflow() { + let addr = ExampleAddr::from_usize(usize::MAX); + let _ = addr.offset_from(ExampleAddr::from_usize(0)); + } + + #[test] + #[should_panic] + pub fn test_addr_offset_from_underflow() { + let addr = ExampleAddr::from_usize(0); + let _ = addr.offset_from(ExampleAddr::from_usize(usize::MAX)); + } + + #[test] + #[should_panic] + pub fn test_addr_add_overflow() { + let addr = ExampleAddr::from_usize(usize::MAX); + let _ = addr.add(1); + } + + #[test] + #[should_panic] + pub fn test_addr_sub_underflow() { + let addr = ExampleAddr::from_usize(0); + let _ = addr.sub(1); + } + + #[test] + pub fn test_virt_addr_ptr() { + let a: [usize; 4] = [0x1234, 0x5678, 0x9abc, 0xdef0]; + + let va0 = VirtAddr::from_ptr_of(&a as *const usize); + let va1 = va0.add(size_of::()); + let va2 = va1.add(size_of::()); + let va3 = va2.add(size_of::()); + + let p0 = va0.as_ptr() as *const usize; + let p1 = va1.as_ptr_of::(); + let p2 = va2.as_mut_ptr() as *mut usize; + let p3 = va3.as_mut_ptr_of::(); + + // testing conversion back to virt addr + assert_eq!(va0, VirtAddr::from_ptr_of(p0)); + assert_eq!(va1, VirtAddr::from_ptr_of(p1)); + assert_eq!(va2, VirtAddr::from_mut_ptr_of(p2)); + assert_eq!(va3, VirtAddr::from_mut_ptr_of(p3)); + + // testing pointer read/write + assert!(unsafe { *p0 } == a[0]); + assert!(unsafe { *p1 } == a[1]); + assert!(unsafe { *p2 } == a[2]); + assert!(unsafe { *p3 } == a[3]); + + unsafe { + *p2 = 0xdeadbeef; + } + unsafe { + *p3 = 0xcafebabe; + } + assert_eq!(a[2], 0xdeadbeef); + assert_eq!(a[3], 0xcafebabe); + } +} diff --git a/memory_addr/src/iter.rs b/memory_addr/src/iter.rs index e178c96..0ebf461 100644 --- a/memory_addr/src/iter.rs +++ b/memory_addr/src/iter.rs @@ -1,4 +1,4 @@ -use crate::is_aligned; +use crate::MemoryAddr; /// A page-by-page iterator. /// @@ -16,10 +16,12 @@ use crate::is_aligned; /// assert_eq!(iter.next(), Some(0x1000)); /// assert_eq!(iter.next(), Some(0x2000)); /// assert_eq!(iter.next(), None); +/// +/// assert!(PageIter::<0x1000, usize>::new(0x1000, 0x3001).is_none()); /// ``` pub struct PageIter where - A: Into + Copy, + A: MemoryAddr, { start: A, end: A, @@ -27,7 +29,7 @@ where impl PageIter where - A: Into + Copy, + A: MemoryAddr, { /// Creates a new [`PageIter`]. /// @@ -35,8 +37,8 @@ where /// is not page-aligned. pub fn new(start: A, end: A) -> Option { if !PAGE_SIZE.is_power_of_two() - || !is_aligned(Into::::into(start), PAGE_SIZE) - || !is_aligned(Into::::into(end), PAGE_SIZE) + || !start.is_aligned(PAGE_SIZE) + || !end.is_aligned(PAGE_SIZE) { None } else { @@ -47,14 +49,14 @@ where impl Iterator for PageIter where - A: Into + Copy + PartialOrd + core::ops::AddAssign, + A: MemoryAddr, { type Item = A; fn next(&mut self) -> Option { if self.start < self.end { let ret = self.start; - self.start += PAGE_SIZE; + self.start = self.start.add(PAGE_SIZE); Some(ret) } else { None diff --git a/memory_addr/src/lib.rs b/memory_addr/src/lib.rs index 1fd4172..1ede0ed 100644 --- a/memory_addr/src/lib.rs +++ b/memory_addr/src/lib.rs @@ -77,72 +77,20 @@ pub const fn is_aligned_4k(addr: usize) -> bool { #[cfg(test)] mod tests { - use crate::{va, va_range, VirtAddrRange}; + use super::*; #[test] - fn test_addr() { - let addr = va!(0x2000); - assert!(addr.is_aligned_4k()); - assert!(!addr.is_aligned(0x10000usize)); - assert_eq!(addr.align_offset_4k(), 0); - assert_eq!(addr.align_down_4k(), va!(0x2000)); - assert_eq!(addr.align_up_4k(), va!(0x2000)); - - let addr = va!(0x2fff); - assert!(!addr.is_aligned_4k()); - assert_eq!(addr.align_offset_4k(), 0xfff); - assert_eq!(addr.align_down_4k(), va!(0x2000)); - assert_eq!(addr.align_up_4k(), va!(0x3000)); - - let align = 0x100000; - let addr = va!(align * 5) + 0x2000; - assert!(addr.is_aligned_4k()); - assert!(!addr.is_aligned(align)); - assert_eq!(addr.align_offset(align), 0x2000); - assert_eq!(addr.align_down(align), va!(align * 5)); - assert_eq!(addr.align_up(align), va!(align * 6)); - } - - #[test] - fn test_range() { - let start = va!(0x1000); - let end = va!(0x2000); - let range = va_range!(start..end); - println!("range: {:?}", range); - - assert!((0x1000..0x1000).is_empty()); - assert!((0x1000..0xfff).is_empty()); - assert!(!range.is_empty()); - - assert_eq!(range.start, start); - assert_eq!(range.end, end); - assert_eq!(range.size(), 0x1000); - - assert!(range.contains(va!(0x1000))); - assert!(range.contains(va!(0x1080))); - assert!(!range.contains(va!(0x2000))); - - assert!(!range.contains_range((0xfff..0x1fff).into())); - assert!(!range.contains_range((0xfff..0x2000).into())); - assert!(!range.contains_range((0xfff..0x2001).into())); - assert!(range.contains_range((0x1000..0x1fff).into())); - assert!(range.contains_range((0x1000..0x2000).into())); - assert!(!range.contains_range((0x1000..0x2001).into())); - assert!(range.contains_range((0x1001..0x1fff).into())); - assert!(range.contains_range((0x1001..0x2000).into())); - assert!(!range.contains_range((0x1001..0x2001).into())); - assert!(!range.contains_range(VirtAddrRange::from_start_size(0xfff.into(), 0x1))); - assert!(!range.contains_range(VirtAddrRange::from_start_size(0x2000.into(), 0x1))); - - assert!(range.contained_in((0xfff..0x2000).into())); - assert!(range.contained_in((0x1000..0x2000).into())); - assert!(range.contained_in((0x1000..0x2001).into())); - - assert!(!range.overlaps((0x800..0x1000).into())); - assert!(range.overlaps((0x800..0x1001).into())); - assert!(range.overlaps((0x1800..0x2000).into())); - assert!(range.overlaps((0x1800..0x2001).into())); - assert!(!range.overlaps((0x2000..0x2800).into())); - assert!(range.overlaps((0xfff..0x2001).into())); + fn test_align() { + assert_eq!(align_down(0x12345678, 0x1000), 0x12345000); + assert_eq!(align_up(0x12345678, 0x1000), 0x12346000); + assert_eq!(align_offset(0x12345678, 0x1000), 0x678); + assert!(is_aligned(0x12345000, 0x1000)); + assert!(!is_aligned(0x12345678, 0x1000)); + + assert_eq!(align_down_4k(0x12345678), 0x12345000); + assert_eq!(align_up_4k(0x12345678), 0x12346000); + assert_eq!(align_offset_4k(0x12345678), 0x678); + assert!(is_aligned_4k(0x12345000)); + assert!(!is_aligned_4k(0x12345678)); } } diff --git a/memory_addr/src/range.rs b/memory_addr/src/range.rs index 29ee1f4..1351944 100644 --- a/memory_addr/src/range.rs +++ b/memory_addr/src/range.rs @@ -2,16 +2,13 @@ use core::{fmt, ops::Range}; use crate::{MemoryAddr, PhysAddr, VirtAddr}; -macro_rules! usize { - ($addr:expr) => { - Into::::into($addr) - }; -} - /// A range of a given memory address type `A`. /// -/// The range is inclusive on the start and exclusive on the end. -/// It is empty if `start >= end`. +/// The range is inclusive on the start and exclusive on the end. A range is +/// considered **empty** iff `start == end`, and **invalid** iff `start > end`. +/// An invalid range should not be created and cannot be obtained without unsafe +/// operations, calling methods on an invalid range will cause unexpected +/// consequences. /// /// # Example /// @@ -22,7 +19,7 @@ macro_rules! usize { /// assert_eq!(range.start, 0x1000); /// assert_eq!(range.end, 0x2000); /// ``` -#[derive(Clone, Copy)] +#[derive(Clone, Copy, PartialEq, Eq)] pub struct AddrRange { /// The lower bound of the range (inclusive). pub start: A, @@ -35,7 +32,11 @@ impl AddrRange where A: MemoryAddr, { - /// Creates a new address range. + /// Creates a new address range from the start and end addresses. + /// + /// # Panics + /// + /// Panics if `start > end`. /// /// # Example /// @@ -46,13 +47,76 @@ where /// assert_eq!(range.start, 0x1000); /// assert_eq!(range.end, 0x2000); /// ``` + /// + /// And this will panic: + /// + /// ```should_panic + /// # use memory_addr::AddrRange; + /// let _ = AddrRange::new(0x2000usize, 0x1000); + /// ``` + #[inline] + pub fn new(start: A, end: A) -> Self { + assert!( + start <= end, + "invalid `AddrRange`: {}..{}", + start.into(), + end.into() + ); + Self { start, end } + } + + /// Creates a new address range from the given range. + /// + /// Returns `None` if `start > end`. + /// + /// # Example + /// + /// ``` + /// use memory_addr::AddrRange; + /// + /// let range = AddrRange::try_new(0x1000usize, 0x2000).unwrap(); + /// assert_eq!(range.start, 0x1000); + /// assert_eq!(range.end, 0x2000); + /// assert!(AddrRange::try_new(0x2000usize, 0x1000).is_none()); + /// ``` + #[inline] + pub fn try_new(start: A, end: A) -> Option { + if start <= end { + Some(Self { start, end }) + } else { + None + } + } + + /// Creates a new address range from the given range without checking the + /// validity. + /// + /// # Safety + /// + /// The caller must ensure that `start <= end`, otherwise the range will be + /// invalid and unexpected consequences will occur. + /// + /// # Example + /// + /// ``` + /// use memory_addr::AddrRange; + /// + /// let range = unsafe { AddrRange::new_unchecked(0x1000usize, 0x2000) }; + /// assert_eq!(range.start, 0x1000); + /// assert_eq!(range.end, 0x2000); + /// ``` #[inline] - pub const fn new(start: A, end: A) -> Self { + pub const unsafe fn new_unchecked(start: A, end: A) -> Self { Self { start, end } } /// Creates a new address range from the start address and the size. /// + /// # Panics + /// + /// Panics if `size` is too large and causes overflow during evaluating the + /// end address. + /// /// # Example /// /// ``` @@ -62,15 +126,76 @@ where /// assert_eq!(range.start, 0x1000); /// assert_eq!(range.end, 0x2000); /// ``` + /// + /// And this will panic: + /// + /// ```should_panic + /// # use memory_addr::AddrRange; + /// let _ = AddrRange::from_start_size(0x1000usize, usize::MAX); + /// ``` #[inline] pub fn from_start_size(start: A, size: usize) -> Self { + if let Some(end) = start.checked_add(size) { + Self { start, end } + } else { + panic!( + "size too large for `AddrRange`: {} + {}", + start.into(), + size + ); + } + } + + /// Creates a new address range from the start address and the size. + /// + /// Returns `None` if `size` is too large and causes overflow during + /// evaluating the end address. + /// + /// # Example + /// + /// ``` + /// use memory_addr::AddrRange; + /// + /// let range = AddrRange::try_from_start_size(0x1000usize, 0x1000).unwrap(); + /// assert_eq!(range.start, 0x1000); + /// assert_eq!(range.end, 0x2000); + /// assert!(AddrRange::try_from_start_size(0x1000usize, usize::MAX).is_none()); + /// ``` + #[inline] + pub fn try_from_start_size(start: A, size: usize) -> Option { + start.checked_add(size).map(|end| Self { start, end }) + } + + /// Creates a new address range from the start address and the size without + /// checking the validity. + /// + /// # Safety + /// + /// The caller must ensure that `size` is not too large and won't cause + /// overflow during evaluating the end address. Failing to do so will + /// create an invalid range and cause unexpected consequences. + /// + /// # Example + /// + /// ``` + /// use memory_addr::AddrRange; + /// + /// let range = unsafe { AddrRange::from_start_size_unchecked(0x1000usize, 0x1000) }; + /// assert_eq!(range.start, 0x1000); + /// assert_eq!(range.end, 0x2000); + /// ``` + #[inline] + pub unsafe fn from_start_size_unchecked(start: A, size: usize) -> Self { Self { start, - end: A::from(usize!(start) + size), + end: start.wrapping_add(size), } } - /// Returns `true` if the range is empty (`start >= end`). + /// Returns `true` if the range is empty. + /// + /// It's also guaranteed that `false` will be returned if the range is + /// invalid (i.e., `start > end`). /// /// # Example /// @@ -78,12 +203,11 @@ where /// use memory_addr::AddrRange; /// /// assert!(AddrRange::new(0x1000usize, 0x1000).is_empty()); - /// assert!(AddrRange::new(0x1000usize, 0xfff).is_empty()); /// assert!(!AddrRange::new(0x1000usize, 0x2000).is_empty()); /// ``` #[inline] pub fn is_empty(self) -> bool { - usize!(self.start) >= usize!(self.end) + self.start >= self.end } /// Returns the size of the range. @@ -98,7 +222,7 @@ where /// ``` #[inline] pub fn size(self) -> usize { - usize!(self.end) - usize!(self.start) + usize::wrapping_sub(self.end.into(), self.start.into()) } /// Checks if the range contains the given address. @@ -109,14 +233,14 @@ where /// use memory_addr::AddrRange; /// /// let range = AddrRange::new(0x1000usize, 0x2000); - /// assert!(!range.contains(0xfff)); + /// assert!(!range.contains(0x0fff)); /// assert!(range.contains(0x1000)); /// assert!(range.contains(0x1fff)); /// assert!(!range.contains(0x2000)); /// ``` #[inline] pub fn contains(self, addr: A) -> bool { - usize!(self.start) <= usize!(addr) && usize!(addr) < usize!(self.end) + self.start <= addr && addr < self.end } /// Checks if the range contains the given address range. @@ -124,19 +248,19 @@ where /// # Example /// /// ``` - /// use memory_addr::AddrRange; + /// use memory_addr::{addr_range, AddrRange}; /// /// let range = AddrRange::new(0x1000usize, 0x2000); - /// assert!(!range.contains_range(AddrRange::from(0x0usize..0xfff))); - /// assert!(!range.contains_range(AddrRange::from(0xfffusize..0x1fff))); - /// assert!(range.contains_range(AddrRange::from(0x1001usize..0x1fff))); - /// assert!(range.contains_range(AddrRange::from(0x1000usize..0x2000))); - /// assert!(!range.contains_range(AddrRange::from(0x1001usize..0x2001))); - /// assert!(!range.contains_range(AddrRange::from(0x2001usize..0x3001))); + /// assert!(!range.contains_range(addr_range!(0x0usize..0xfff))); + /// assert!(!range.contains_range(addr_range!(0x0fffusize..0x1fff))); + /// assert!(range.contains_range(addr_range!(0x1001usize..0x1fff))); + /// assert!(range.contains_range(addr_range!(0x1000usize..0x2000))); + /// assert!(!range.contains_range(addr_range!(0x1001usize..0x2001))); + /// assert!(!range.contains_range(addr_range!(0x2001usize..0x3001))); /// ``` #[inline] pub fn contains_range(self, other: Self) -> bool { - usize!(self.start) <= usize!(other.start) && usize!(other.end) <= usize!(self.end) + self.start <= other.start && other.end <= self.end } /// Checks if the range is contained in the given address range. @@ -144,13 +268,13 @@ where /// # Example /// /// ``` - /// use memory_addr::AddrRange; + /// use memory_addr::{addr_range, AddrRange}; /// /// let range = AddrRange::new(0x1000usize, 0x2000); - /// assert!(!range.contained_in(AddrRange::from(0xfffusize..0x1fff))); - /// assert!(!range.contained_in(AddrRange::from(0x1001usize..0x2001))); - /// assert!(range.contained_in(AddrRange::from(0xfffusize..0x2001))); - /// assert!(range.contained_in(AddrRange::from(0x1000usize..0x2000))); + /// assert!(!range.contained_in(addr_range!(0xfffusize..0x1fff))); + /// assert!(!range.contained_in(addr_range!(0x1001usize..0x2001))); + /// assert!(range.contained_in(addr_range!(0xfffusize..0x2001))); + /// assert!(range.contained_in(addr_range!(0x1000usize..0x2000))); /// ``` #[inline] pub fn contained_in(self, other: Self) -> bool { @@ -162,65 +286,53 @@ where /// # Example /// /// ``` - /// use memory_addr::AddrRange; + /// use memory_addr::{addr_range, AddrRange}; /// /// let range = AddrRange::new(0x1000usize, 0x2000usize); - /// assert!(!range.overlaps(AddrRange::from(0xfffusize..0xfff))); - /// assert!(!range.overlaps(AddrRange::from(0x2000usize..0x2000))); - /// assert!(!range.overlaps(AddrRange::from(0xfffusize..0x1000))); - /// assert!(range.overlaps(AddrRange::from(0xfffusize..0x1001))); - /// assert!(range.overlaps(AddrRange::from(0x1fffusize..0x2001))); - /// assert!(range.overlaps(AddrRange::from(0xfffusize..0x2001))); + /// assert!(!range.overlaps(addr_range!(0xfffusize..0xfff))); + /// assert!(!range.overlaps(addr_range!(0x2000usize..0x2000))); + /// assert!(!range.overlaps(addr_range!(0xfffusize..0x1000))); + /// assert!(range.overlaps(addr_range!(0xfffusize..0x1001))); + /// assert!(range.overlaps(addr_range!(0x1fffusize..0x2001))); + /// assert!(range.overlaps(addr_range!(0xfffusize..0x2001))); /// ``` #[inline] pub fn overlaps(self, other: Self) -> bool { - usize!(self.start) < usize!(other.end) && usize!(other.start) < usize!(self.end) + self.start < other.end && other.start < self.end } } -/// Implementations of [`Default`] for [`AddrRange`]. -/// -/// The default value is an empty range `0..0`. -impl Default for AddrRange +/// Conversion from [`Range`] to [`AddrRange`], provided that the type of the +/// endpoints can be converted to the address type `A`. +impl TryFrom> for AddrRange where - A: MemoryAddr, + A: MemoryAddr + From, { - #[inline] - fn default() -> Self { - Self::new(0.into(), 0.into()) - } -} + type Error = (); -/// Implementations of [`PartialEq`] for [`AddrRange`]. -/// -/// Two ranges are equal iff their start and end addresses are equal. -impl PartialEq for AddrRange -where - A: MemoryAddr, -{ #[inline] - fn eq(&self, other: &Self) -> bool { - usize!(self.start) == usize!(other.start) && usize!(self.end) == usize!(other.end) + fn try_from(range: Range) -> Result { + Self::try_new(range.start.into(), range.end.into()).ok_or(()) } } -/// Implementations of [`Eq`] for [`AddrRange`]. -impl Eq for AddrRange where A: MemoryAddr {} - -/// Implementations of [`From`] for [`AddrRange`] and [`Range`]. +/// Implementations of [`Default`] for [`AddrRange`]. /// -/// Converts a range into an address range. -impl From> for AddrRange +/// The default value is an empty range `Range { start: 0, end: 0 }`. +impl Default for AddrRange where - A: MemoryAddr + From, + A: MemoryAddr, { #[inline] - fn from(range: Range) -> Self { - Self::new(range.start.into(), range.end.into()) + fn default() -> Self { + Self { + start: 0.into(), + end: 0.into(), + } } } -/// Implementations of [`fmt::Debug`] for [`AddrRange`]. +/// Implementations of [`Debug`](fmt::Debug) for [`AddrRange`]. impl fmt::Debug for AddrRange where A: MemoryAddr + fmt::Debug, @@ -230,7 +342,7 @@ where } } -/// Implementations of [`fmt::LowerHex`] for [`AddrRange`]. +/// Implementations of [`LowerHex`](fmt::LowerHex) for [`AddrRange`]. impl fmt::LowerHex for AddrRange where A: MemoryAddr + fmt::LowerHex, @@ -240,7 +352,7 @@ where } } -/// Implementations of [`fmt::UpperHex`] for [`AddrRange`]. +/// Implementations of [`UpperHex`](fmt::UpperHex) for [`AddrRange`]. impl fmt::UpperHex for AddrRange where A: MemoryAddr + fmt::UpperHex, @@ -250,12 +362,41 @@ where } } -/// A range of virtual addresses. +/// A range of virtual addresses [`VirtAddr`]. pub type VirtAddrRange = AddrRange; -/// A range of physical addresses. +/// A range of physical addresses [`PhysAddr`]. pub type PhysAddrRange = AddrRange; -/// Converts the given range expression into [`VirtAddrRange`]. +/// Converts the given range expression into [`AddrRange`]. Panics if the range +/// is invalid. +/// +/// The concrete address type is inferred from the context. +/// +/// # Example +/// +/// ``` +/// use memory_addr::{addr_range, AddrRange}; +/// +/// let range: AddrRange = addr_range!(0x1000usize..0x2000); +/// assert_eq!(range.start, 0x1000usize); +/// assert_eq!(range.end, 0x2000usize); +/// ``` +/// +/// And this will panic: +/// +/// ```should_panic +/// # use memory_addr::{addr_range, AddrRange}; +/// let _: AddrRange = addr_range!(0x2000usize..0x1000); +/// ``` +#[macro_export] +macro_rules! addr_range { + ($range:expr) => { + $crate::AddrRange::try_from($range).unwrap() + }; +} + +/// Converts the given range expression into [`VirtAddrRange`]. Panics if the +/// range is invalid. /// /// # Example /// @@ -266,14 +407,22 @@ pub type PhysAddrRange = AddrRange; /// assert_eq!(range.start, 0x1000.into()); /// assert_eq!(range.end, 0x2000.into()); /// ``` +/// +/// And this will panic: +/// +/// ```should_panic +/// # use memory_addr::va_range; +/// let _ = va_range!(0x2000..0x1000); +/// ``` #[macro_export] macro_rules! va_range { ($range:expr) => { - $crate::VirtAddrRange::from($range) + $crate::VirtAddrRange::try_from($range).unwrap() }; } -/// Converts the given range expression into [`PhysAddrRange`]. +/// Converts the given range expression into [`PhysAddrRange`]. Panics if the +/// range is invalid. /// /// # Example /// @@ -284,9 +433,80 @@ macro_rules! va_range { /// assert_eq!(range.start, 0x1000.into()); /// assert_eq!(range.end, 0x2000.into()); /// ``` +/// +/// And this will panic: +/// +/// ```should_panic +/// # use memory_addr::pa_range; +/// let _ = pa_range!(0x2000..0x1000); +/// ``` #[macro_export] macro_rules! pa_range { ($range:expr) => { - $crate::PhysAddrRange::from($range) + $crate::PhysAddrRange::try_from($range).unwrap() }; } + +#[cfg(test)] +mod test { + use crate::{va, va_range, VirtAddrRange}; + + #[test] + fn test_range_format() { + let range = va_range!(0xfec000..0xfff000usize); + + assert_eq!(format!("{:?}", range), "VA:0xfec000..VA:0xfff000"); + assert_eq!(format!("{:x}", range), "VA:0xfec000..VA:0xfff000"); + assert_eq!(format!("{:X}", range), "VA:0xFEC000..VA:0xFFF000"); + } + + #[test] + fn test_range() { + let start = va!(0x1000); + let end = va!(0x2000); + let range = va_range!(start..end); + + println!("range: {:?}", range); + + assert!((0x1000..0x1000).is_empty()); + assert!((0x1000..0xfff).is_empty()); + assert!(!range.is_empty()); + + assert_eq!(range.start, start); + assert_eq!(range.end, end); + assert_eq!(range.size(), 0x1000); + + assert!(range.contains(va!(0x1000))); + assert!(range.contains(va!(0x1080))); + assert!(!range.contains(va!(0x2000))); + + assert!(!range.contains_range(addr_range!(0xfff..0x1fff))); + assert!(!range.contains_range(addr_range!(0xfff..0x2000))); + assert!(!range.contains_range(va_range!(0xfff..0x2001))); // test both `va_range!` and `addr_range!` + assert!(range.contains_range(va_range!(0x1000..0x1fff))); + assert!(range.contains_range(addr_range!(0x1000..0x2000))); + assert!(!range.contains_range(addr_range!(0x1000..0x2001))); + assert!(range.contains_range(va_range!(0x1001..0x1fff))); + assert!(range.contains_range(va_range!(0x1001..0x2000))); + assert!(!range.contains_range(va_range!(0x1001..0x2001))); + assert!(!range.contains_range(VirtAddrRange::from_start_size(0xfff.into(), 0x1))); + assert!(!range.contains_range(VirtAddrRange::from_start_size(0x2000.into(), 0x1))); + + assert!(range.contained_in(addr_range!(0xfff..0x2000))); + assert!(range.contained_in(addr_range!(0x1000..0x2000))); + assert!(range.contained_in(va_range!(0x1000..0x2001))); + + assert!(!range.overlaps(addr_range!(0x800..0x1000))); + assert!(range.overlaps(addr_range!(0x800..0x1001))); + assert!(range.overlaps(addr_range!(0x1800..0x2000))); + assert!(range.overlaps(va_range!(0x1800..0x2001))); + assert!(!range.overlaps(va_range!(0x2000..0x2800))); + assert!(range.overlaps(va_range!(0xfff..0x2001))); + + let default_range: VirtAddrRange = Default::default(); + assert!(default_range.is_empty()); + assert_eq!(default_range.size(), 0); + assert_eq!(default_range.start, va!(0)); + assert_eq!(default_range.end, va!(0)); + } +} diff --git a/memory_set/src/area.rs b/memory_set/src/area.rs index b439bee..fba1288 100644 --- a/memory_set/src/area.rs +++ b/memory_set/src/area.rs @@ -1,6 +1,6 @@ use core::fmt; -use memory_addr::AddrRange; +use memory_addr::{AddrRange, MemoryAddr}; use crate::{MappingBackend, MappingError, MappingResult}; @@ -107,7 +107,7 @@ impl MemoryArea { if !self.backend.unmap(self.start(), unmap_size, page_table) { return Err(MappingError::BadState); } - self.va_range.start = (self.va_range.start.into() + unmap_size).into(); + self.va_range.start = self.va_range.start.add(unmap_size); Ok(()) } @@ -121,14 +121,13 @@ impl MemoryArea { page_table: &mut B::PageTable, ) -> MappingResult { let unmap_size = self.size() - new_size; - if !self.backend.unmap( - (self.start().into() + new_size).into(), - unmap_size, - page_table, - ) { + if !self + .backend + .unmap(self.start().add(new_size), unmap_size, page_table) + { return Err(MappingError::BadState); } - self.va_range.end = (self.va_range.end.into() - unmap_size).into(); + self.va_range.end = self.va_range.end.sub(unmap_size); Ok(()) } @@ -140,14 +139,14 @@ impl MemoryArea { /// Returns `None` if the given position is not in the memory area, or one /// of the parts is empty after splitting. pub(crate) fn split(&mut self, pos: B::Addr) -> Option { - let pos: usize = pos.into(); - - let start: usize = self.start().into(); - let end: usize = self.end().into(); - // todo: is it a bug when `pos == end - 1`? - if start < pos && pos < end { - let new_area = Self::new(pos.into(), end - pos, self.flags, self.backend.clone()); - self.va_range.end = pos.into(); + if self.start() < pos && pos < self.end() { + let new_area = Self::new( + pos, + self.end().offset_from(pos) as usize, + self.flags, + self.backend.clone(), + ); + self.va_range.end = pos; Some(new_area) } else { None diff --git a/memory_set/src/backend.rs b/memory_set/src/backend.rs index e4a9d1f..3708547 100644 --- a/memory_set/src/backend.rs +++ b/memory_set/src/backend.rs @@ -5,8 +5,8 @@ use memory_addr::MemoryAddr; /// /// The backend can be different for different memory areas. e.g., for linear /// mappings, the target physical address is known when it is added to the page -/// table. For lazy mappings, an empty mapping needs to be added to the page table -/// to trigger a page fault. +/// table. For lazy mappings, an empty mapping needs to be added to the page +/// table to trigger a page fault. pub trait MappingBackend: Clone { /// The address type used in the memory area. type Addr: MemoryAddr; diff --git a/memory_set/src/set.rs b/memory_set/src/set.rs index a6ae04a..28faf04 100644 --- a/memory_set/src/set.rs +++ b/memory_set/src/set.rs @@ -3,13 +3,13 @@ use alloc::collections::BTreeMap; use alloc::vec::Vec; use core::fmt; -use memory_addr::AddrRange; +use memory_addr::{AddrRange, MemoryAddr}; use crate::{MappingBackend, MappingError, MappingResult, MemoryArea}; /// A container that maintains memory mappings ([`MemoryArea`]). pub struct MemorySet { - areas: BTreeMap>, + areas: BTreeMap>, } impl MemorySet { @@ -37,12 +37,12 @@ impl MemorySet { /// Returns whether the given address range overlaps with any existing area. pub fn overlaps(&self, range: AddrRange) -> bool { - if let Some((_, before)) = self.areas.range(..range.start.into()).last() { + if let Some((_, before)) = self.areas.range(..range.start).last() { if before.va_range().overlaps(range) { return true; } } - if let Some((_, after)) = self.areas.range(range.start.into()..).next() { + if let Some((_, after)) = self.areas.range(range.start..).next() { if after.va_range().overlaps(range) { return true; } @@ -52,7 +52,7 @@ impl MemorySet { /// Finds the memory area that contains the given address. pub fn find(&self, addr: B::Addr) -> Option<&MemoryArea> { - let candidate = self.areas.range(..=addr.into()).last().map(|(_, a)| a); + let candidate = self.areas.range(..=addr).last().map(|(_, a)| a); candidate.filter(|a| a.va_range().contains(addr)) } @@ -70,16 +70,15 @@ impl MemorySet { limit: AddrRange, ) -> Option { // brute force: try each area's end address as the start. - let hint: usize = hint.into(); - let mut last_end = hint.max(limit.start.into()); - for (addr, area) in self.areas.iter() { - if last_end + size <= *addr { - return Some(last_end.into()); + let mut last_end = hint.max(limit.start); + for (&addr, area) in self.areas.iter() { + if last_end.add(size) <= addr { + return Some(last_end); } - last_end = area.end().into(); + last_end = area.end(); } - if last_end + size <= limit.end.into() { - Some(last_end.into()) + if last_end.add(size) <= limit.end { + Some(last_end) } else { None } @@ -112,7 +111,7 @@ impl MemorySet { } area.map_area(page_table)?; - assert!(self.areas.insert(area.start().into(), area).is_none()); + assert!(self.areas.insert(area.start(), area).is_none()); Ok(()) } @@ -133,8 +132,7 @@ impl MemorySet { return Ok(()); } - let start: usize = start.into(); - let end = range.end.into(); + let end = range.end; // Unmap entire areas that are contained by the range. self.areas.retain(|_, area| { @@ -147,17 +145,17 @@ impl MemorySet { }); // Shrink right if the area intersects with the left boundary. - if let Some((before_start, before)) = self.areas.range_mut(..start).last() { - let before_end = before.end().into(); + if let Some((&before_start, before)) = self.areas.range_mut(..start).last() { + let before_end = before.end(); if before_end > start { if before_end <= end { // the unmapped area is at the end of `before`. - before.shrink_right(start - before_start, page_table)?; + before.shrink_right(start.offset_from(before_start) as usize, page_table)?; } else { // the unmapped area is in the middle `before`, need to split. - let right_part = before.split(end.into()).unwrap(); - before.shrink_right(start - before_start, page_table)?; - assert_eq!(right_part.start().into(), end); + let right_part = before.split(end).unwrap(); + before.shrink_right(start.offset_from(before_start) as usize, page_table)?; + assert_eq!(right_part.start().into(), Into::::into(end)); self.areas.insert(end, right_part); } } @@ -165,12 +163,12 @@ impl MemorySet { // Shrink left if the area intersects with the right boundary. if let Some((&after_start, after)) = self.areas.range_mut(start..).next() { - let after_end = after.end().into(); + let after_end = after.end(); if after_start < end { // the unmapped area is at the start of `after`. let mut new_area = self.areas.remove(&after_start).unwrap(); - new_area.shrink_left(after_end - end, page_table)?; - assert_eq!(new_area.start().into(), end); + new_area.shrink_left(after_end.offset_from(end) as usize, page_table)?; + assert_eq!(new_area.start().into(), Into::::into(end)); self.areas.insert(end, new_area); } } @@ -194,8 +192,8 @@ impl MemorySet { /// It returns [`None`] if there is no bit to change. /// /// Memory areas will be skipped according to `update_flags`. Memory areas - /// that are fully contained in the range or contains the range or intersects - /// with the boundary will be handled similarly to `munmap`. + /// that are fully contained in the range or contains the range or + /// intersects with the boundary will be handled similarly to `munmap`. pub fn protect( &mut self, start: B::Addr, @@ -203,12 +201,11 @@ impl MemorySet { update_flags: impl Fn(B::Flags) -> Option, page_table: &mut B::PageTable, ) -> MappingResult { - let start = start.into(); - let end = start + size; + let end = start.add(size); let mut to_insert = Vec::new(); for (area_start, area) in self.areas.iter_mut() { let area_start = *area_start; - let area_end = area.end().into(); + let area_end = area.end(); if let Some(new_flags) = update_flags(area.flags()) { if area_start >= end { @@ -227,32 +224,32 @@ impl MemorySet { } else if area_start < start && area_end > end { // [ prot ] // [ left | area | right ] - let right_part = area.split(end.into()).unwrap(); - area.set_end(start.into()); + let right_part = area.split(end).unwrap(); + area.set_end(start); let mut middle_part = - MemoryArea::new(start.into(), size, area.flags(), area.backend().clone()); + MemoryArea::new(start, size, area.flags(), area.backend().clone()); middle_part.protect_area(new_flags, page_table)?; middle_part.set_flags(new_flags); - to_insert.push((right_part.start().into(), right_part)); - to_insert.push((middle_part.start().into(), middle_part)); + to_insert.push((right_part.start(), right_part)); + to_insert.push((middle_part.start(), middle_part)); } else if area_end > end { // [ prot ] // [ area | right ] - let right_part = area.split(end.into()).unwrap(); + let right_part = area.split(end).unwrap(); area.protect_area(new_flags, page_table)?; area.set_flags(new_flags); - to_insert.push((right_part.start().into(), right_part)); + to_insert.push((right_part.start(), right_part)); } else { // [ prot ] // [ left | area ] - let mut right_part = area.split(start.into()).unwrap(); + let mut right_part = area.split(start).unwrap(); right_part.protect_area(new_flags, page_table)?; right_part.set_flags(new_flags); - to_insert.push((right_part.start().into(), right_part)); + to_insert.push((right_part.start(), right_part)); } } } diff --git a/memory_set/src/tests.rs b/memory_set/src/tests.rs index 0b1fe00..d3ce704 100644 --- a/memory_set/src/tests.rs +++ b/memory_set/src/tests.rs @@ -1,4 +1,4 @@ -use memory_addr::VirtAddr; +use memory_addr::{MemoryAddr, VirtAddr}; use crate::{MappingBackend, MappingError, MemoryArea, MemorySet}; diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000..b2715b2 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1 @@ +wrap_comments = true From 4d356460e10ce00e22159fa5ba7501b936e6f060 Mon Sep 17 00:00:00 2001 From: aarkegz Date: Mon, 26 Aug 2024 17:39:41 +0800 Subject: [PATCH 4/8] add `sub_addr` series to `MemoryAddr`, improve panic messages in `MemoryAddr` and `AddrRange` --- memory_addr/src/addr.rs | 91 ++++++++++++++++++++++++++++++++++++---- memory_addr/src/range.rs | 8 ++-- 2 files changed, 86 insertions(+), 13 deletions(-) diff --git a/memory_addr/src/addr.rs b/memory_addr/src/addr.rs index 4054771..b846572 100644 --- a/memory_addr/src/addr.rs +++ b/memory_addr/src/addr.rs @@ -98,12 +98,14 @@ pub trait MemoryAddr: /// Adds a given offset to the address to get a new address. /// - /// This method will panic on overflow. + /// # Panics + /// + /// Panics if the result overflows. #[inline] #[must_use = "this returns a new address, without modifying the original"] fn offset(self, offset: isize) -> Self { // todo: use `strict_add_signed` when it's stable. - Self::from(usize::checked_add_signed(self.into(), offset).unwrap()) + Self::from(usize::checked_add_signed(self.into(), offset).expect("overflow in `MemoryAddr::offset`")) } /// Adds a given offset to the address to get a new address. @@ -117,7 +119,9 @@ pub trait MemoryAddr: /// Gets the distance between two addresses. /// - /// This method will panic if the result is not representable by `isize`. + /// # Panics + /// + /// Panics if the result is not representable by `isize`. #[inline] #[must_use = "this function has no side effects, so it can be removed if the return value is not used"] fn offset_from(self, base: Self) -> isize { @@ -132,12 +136,15 @@ pub trait MemoryAddr: /// Adds a given **unsigned** offset to the address to get a new address. /// - /// This method is similar to `offset`, but it takes an unsigned offset. This method - /// will also panic on overflow. + /// This method is similar to `offset`, but it takes an unsigned offset. + /// + /// # Panics + /// + /// Panics if the result overflows. #[inline] #[must_use = "this returns a new address, without modifying the original"] fn add(self, rhs: usize) -> Self { - Self::from(usize::checked_add(self.into(), rhs).unwrap()) + Self::from(usize::checked_add(self.into(), rhs).expect("overflow in `MemoryAddr::add`")) } /// Adds a given **unsigned** offset to the address to get a new address. @@ -171,12 +178,15 @@ pub trait MemoryAddr: /// Subtracts a given **unsigned** offset from the address to get a new address. /// - /// This method is similar to `offset(-rhs)`, but it takes an unsigned offset. This method - /// will also panic on overflowed. + /// This method is similar to `offset(-rhs)`, but it takes an unsigned offset. + /// + /// # Panics + /// + /// Panics if the result overflows. #[inline] #[must_use = "this returns a new address, without modifying the original"] fn sub(self, rhs: usize) -> Self { - Self::from(usize::checked_sub(self.into(), rhs).unwrap()) + Self::from(usize::checked_sub(self.into(), rhs).expect("overflow in `MemoryAddr::sub`")) } /// Subtracts a given **unsigned** offset from the address to get a new address. @@ -207,6 +217,45 @@ pub trait MemoryAddr: fn checked_sub(self, rhs: usize) -> Option { usize::checked_sub(self.into(), rhs).map(Self::from) } + + /// Subtracts another address from the address to get the offset between them. + /// + /// # Panics + /// + /// Panics if the result overflows. + #[inline] + #[must_use = "this function has no side effects, so it can be removed if the return value is not used"] + fn sub_addr(self, rhs: Self) -> usize { + usize::checked_sub(self.into(), rhs.into()).expect("overflow in `MemoryAddr::sub_addr`") + } + + /// Subtracts another address from the address to get the offset between them. + /// + /// Unlike `sub_addr`, this method always wraps around on overflow. + #[inline] + #[must_use = "this function has no side effects, so it can be removed if the return value is not used"] + fn wrapping_sub_addr(self, rhs: Self) -> usize { + usize::wrapping_sub(self.into(), rhs.into()) + } + + /// Subtracts another address from the address to get the offset between them. + /// + /// Unlike `sub_addr`, this method returns a tuple of the offset and a boolean indicating + /// whether the subtraction has overflowed. + #[inline] + #[must_use = "this function has no side effects, so it can be removed if the return value is not used"] + fn overflowing_sub_addr(self, rhs: Self) -> (usize, bool) { + usize::overflowing_sub(self.into(), rhs.into()) + } + + /// Subtracts another address from the address to get the offset between them. + /// + /// Unlike `sub_addr`, this method returns `None` on overflow. + #[inline] + #[must_use = "this function has no side effects, so it can be removed if the return value is not used"] + fn checked_sub_addr(self, rhs: Self) -> Option { + usize::checked_sub(self.into(), rhs.into()) + } } /// Implement the `MemoryAddr` trait for any type that is `Copy`, `From`, @@ -576,6 +625,8 @@ mod test { assert_eq!(addr.wrapping_add(offset), offset_addr); assert_eq!(offset_addr.sub(offset), addr); assert_eq!(offset_addr.wrapping_sub(offset), addr); + assert_eq!(offset_addr.sub_addr(addr), offset); + assert_eq!(offset_addr.wrapping_sub_addr(addr), offset); assert_eq!(addr + offset, offset_addr); assert_eq!(offset_addr - offset, addr); @@ -595,6 +646,7 @@ mod test { assert_eq!(offset_addr.wrapping_offset(-(offset as isize)), addr); assert_eq!(addr.wrapping_add(offset), offset_addr); assert_eq!(offset_addr.wrapping_sub(offset), addr); + assert_eq!(offset_addr.wrapping_sub_addr(addr), offset); } #[test] @@ -614,6 +666,12 @@ mod test { Some(high_addr.wrapping_add(small_offset)) ); assert_eq!(high_addr.checked_add(large_offset), None); + + assert_eq!( + high_addr.checked_sub_addr(low_addr), + Some(usize::MAX - 0x200usize) + ); + assert_eq!(low_addr.checked_sub_addr(high_addr), None); } #[test] @@ -639,6 +697,14 @@ mod test { high_addr.overflowing_add(large_offset), (high_addr.wrapping_add(large_offset), true) ); + assert_eq!( + high_addr.overflowing_sub_addr(low_addr), + (high_addr.wrapping_sub_addr(low_addr), false) + ); + assert_eq!( + low_addr.overflowing_sub_addr(high_addr), + (low_addr.wrapping_sub_addr(high_addr), true) + ); } #[test] @@ -676,6 +742,13 @@ mod test { let _ = addr.sub(1); } + #[test] + #[should_panic] + pub fn test_addr_sub_addr_overflow() { + let addr = ExampleAddr::from_usize(0); + let _ = addr.sub_addr(ExampleAddr::from_usize(1)); + } + #[test] pub fn test_virt_addr_ptr() { let a: [usize; 4] = [0x1234, 0x5678, 0x9abc, 0xdef0]; diff --git a/memory_addr/src/range.rs b/memory_addr/src/range.rs index 1351944..5c0f670 100644 --- a/memory_addr/src/range.rs +++ b/memory_addr/src/range.rs @@ -222,7 +222,7 @@ where /// ``` #[inline] pub fn size(self) -> usize { - usize::wrapping_sub(self.end.into(), self.start.into()) + self.end.wrapping_sub_addr(self.start) } /// Checks if the range contains the given address. @@ -391,7 +391,7 @@ pub type PhysAddrRange = AddrRange; #[macro_export] macro_rules! addr_range { ($range:expr) => { - $crate::AddrRange::try_from($range).unwrap() + $crate::AddrRange::try_from($range).expect("invalid address range in `addr_range!`") }; } @@ -417,7 +417,7 @@ macro_rules! addr_range { #[macro_export] macro_rules! va_range { ($range:expr) => { - $crate::VirtAddrRange::try_from($range).unwrap() + $crate::VirtAddrRange::try_from($range).expect("invalid address range in `va_range!`") }; } @@ -443,7 +443,7 @@ macro_rules! va_range { #[macro_export] macro_rules! pa_range { ($range:expr) => { - $crate::PhysAddrRange::try_from($range).unwrap() + $crate::PhysAddrRange::try_from($range).expect("invalid address range in `pa_range!`") }; } From 10c66719af052f7701385950354b1e95e1948ab5 Mon Sep 17 00:00:00 2001 From: aarkegz Date: Mon, 26 Aug 2024 21:54:43 +0800 Subject: [PATCH 5/8] some small code improvements --- memory_set/src/backend.rs | 2 ++ memory_set/src/set.rs | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/memory_set/src/backend.rs b/memory_set/src/backend.rs index 3708547..5df5c15 100644 --- a/memory_set/src/backend.rs +++ b/memory_set/src/backend.rs @@ -23,8 +23,10 @@ pub trait MappingBackend: Clone { flags: Self::Flags, page_table: &mut Self::PageTable, ) -> bool; + /// What to do when unmaping a memory region within the area. fn unmap(&self, start: Self::Addr, size: usize, page_table: &mut Self::PageTable) -> bool; + /// What to do when changing access flags. fn protect( &self, diff --git a/memory_set/src/set.rs b/memory_set/src/set.rs index 28faf04..91d3e5f 100644 --- a/memory_set/src/set.rs +++ b/memory_set/src/set.rs @@ -72,7 +72,7 @@ impl MemorySet { // brute force: try each area's end address as the start. let mut last_end = hint.max(limit.start); for (&addr, area) in self.areas.iter() { - if last_end.add(size) <= addr { + if last_end.checked_add(size).is_some_and(|end| end <= addr) { return Some(last_end); } last_end = area.end(); @@ -203,8 +203,7 @@ impl MemorySet { ) -> MappingResult { let end = start.add(size); let mut to_insert = Vec::new(); - for (area_start, area) in self.areas.iter_mut() { - let area_start = *area_start; + for (&area_start, area) in self.areas.iter_mut() { let area_end = area.end(); if let Some(new_flags) = update_flags(area.flags()) { From ce66134e3d32697ffa5df6aebc0307d08d14747d Mon Sep 17 00:00:00 2001 From: aarkegz Date: Mon, 26 Aug 2024 21:55:44 +0800 Subject: [PATCH 6/8] bump version to 0.3.0 --- Cargo.toml | 2 +- memory_set/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9914b1d..f238d03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ members = [ ] [workspace.package] -version = "0.3.0-dev" # dev here to avoid publishing it mistakenly, as `memory_set` is not updated yet +version = "0.3.0" edition = "2021" authors = ["Yuekai Jia ", "aarkegz "] license = "GPL-3.0-or-later OR Apache-2.0 OR MulanPSL-2.0" diff --git a/memory_set/Cargo.toml b/memory_set/Cargo.toml index e380e30..7dee977 100644 --- a/memory_set/Cargo.toml +++ b/memory_set/Cargo.toml @@ -13,4 +13,4 @@ repository.workspace = true categories.workspace = true [dependencies] -memory_addr = { path = "../memory_addr", version = "0.3.0-dev" } +memory_addr = { path = "../memory_addr", version = "0.3.0" } From 27bc90380952e799d00dc07555990f15929ab705 Mon Sep 17 00:00:00 2001 From: aarkegz Date: Mon, 26 Aug 2024 22:34:32 +0800 Subject: [PATCH 7/8] update function calls about address arithmetic operations in `MemoryArea`s and `MemorySet`s --- memory_set/src/area.rs | 46 +++++++++++++++++++++++++++++++++--------- memory_set/src/set.rs | 16 +++++++++------ 2 files changed, 47 insertions(+), 15 deletions(-) diff --git a/memory_set/src/area.rs b/memory_set/src/area.rs index fba1288..689aa02 100644 --- a/memory_set/src/area.rs +++ b/memory_set/src/area.rs @@ -17,6 +17,10 @@ pub struct MemoryArea { impl MemoryArea { /// Creates a new memory area. + /// + /// # Panics + /// + /// Panics if `start + size` overflows. pub fn new(start: B::Addr, size: usize, flags: B::Flags, backend: B) -> Self { Self { va_range: AddrRange::from_start_size(start, size), @@ -98,16 +102,27 @@ impl MemoryArea { /// /// The start address of the memory area is increased by `new_size`. The /// shrunk part is unmapped. + /// + /// `new_size` must be greater than 0 and less than the current size, + /// otherwise this function returns an error. pub(crate) fn shrink_left( &mut self, new_size: usize, page_table: &mut B::PageTable, ) -> MappingResult { - let unmap_size = self.size() - new_size; + let old_size = self.size(); + let unmap_size = old_size + .checked_sub(new_size) + .filter(|&s| s > 0 && s < old_size) + .ok_or(MappingError::InvalidParam)?; + if !self.backend.unmap(self.start(), unmap_size, page_table) { return Err(MappingError::BadState); } - self.va_range.start = self.va_range.start.add(unmap_size); + // Use wrapping_add to avoid overflow check. + // Safety: `unmap_size` is less than the current size, so it will never + // overflow. + self.va_range.start = self.va_range.start.wrapping_add(unmap_size); Ok(()) } @@ -115,19 +130,30 @@ impl MemoryArea { /// /// The end address of the memory area is decreased by `new_size`. The /// shrunk part is unmapped. + /// + /// `new_size` must be greater than 0 and less than the current size, + /// otherwise this function returns an error. pub(crate) fn shrink_right( &mut self, new_size: usize, page_table: &mut B::PageTable, ) -> MappingResult { - let unmap_size = self.size() - new_size; - if !self - .backend - .unmap(self.start().add(new_size), unmap_size, page_table) - { + let old_size = self.size(); + let unmap_size = old_size + .checked_sub(new_size) + .filter(|&s| s > 0 && s < old_size) + .ok_or(MappingError::InvalidParam)?; + + // Use wrapping_add to avoid overflow check. + // Safety: `new_size` is less than the current size, so it will never overflow. + let unmap_start = self.start().wrapping_add(new_size); + + if !self.backend.unmap(unmap_start, unmap_size, page_table) { return Err(MappingError::BadState); } - self.va_range.end = self.va_range.end.sub(unmap_size); + + // Use wrapping_sub to avoid overflow check, same as above. + self.va_range.end = self.va_range.end.wrapping_sub(unmap_size); Ok(()) } @@ -142,7 +168,9 @@ impl MemoryArea { if self.start() < pos && pos < self.end() { let new_area = Self::new( pos, - self.end().offset_from(pos) as usize, + // Use wrapping_sub_addr to avoid overflow check. It is safe because + // `pos` is within the memory area. + self.end().wrapping_sub_addr(pos), self.flags, self.backend.clone(), ); diff --git a/memory_set/src/set.rs b/memory_set/src/set.rs index 91d3e5f..7ac8340 100644 --- a/memory_set/src/set.rs +++ b/memory_set/src/set.rs @@ -77,7 +77,10 @@ impl MemorySet { } last_end = area.end(); } - if last_end.add(size) <= limit.end { + if last_end + .checked_add(size) + .is_some_and(|end| end <= limit.end) + { Some(last_end) } else { None @@ -127,7 +130,8 @@ impl MemorySet { size: usize, page_table: &mut B::PageTable, ) -> MappingResult { - let range = AddrRange::from_start_size(start, size); + let range = + AddrRange::try_from_start_size(start, size).ok_or(MappingError::InvalidParam)?; if range.is_empty() { return Ok(()); } @@ -150,11 +154,11 @@ impl MemorySet { if before_end > start { if before_end <= end { // the unmapped area is at the end of `before`. - before.shrink_right(start.offset_from(before_start) as usize, page_table)?; + before.shrink_right(start.sub_addr(before_start), page_table)?; } else { // the unmapped area is in the middle `before`, need to split. let right_part = before.split(end).unwrap(); - before.shrink_right(start.offset_from(before_start) as usize, page_table)?; + before.shrink_right(start.sub_addr(before_start), page_table)?; assert_eq!(right_part.start().into(), Into::::into(end)); self.areas.insert(end, right_part); } @@ -167,7 +171,7 @@ impl MemorySet { if after_start < end { // the unmapped area is at the start of `after`. let mut new_area = self.areas.remove(&after_start).unwrap(); - new_area.shrink_left(after_end.offset_from(end) as usize, page_table)?; + new_area.shrink_left(after_end.sub_addr(end), page_table)?; assert_eq!(new_area.start().into(), Into::::into(end)); self.areas.insert(end, new_area); } @@ -201,7 +205,7 @@ impl MemorySet { update_flags: impl Fn(B::Flags) -> Option, page_table: &mut B::PageTable, ) -> MappingResult { - let end = start.add(size); + let end = start.checked_add(size).ok_or(MappingError::InvalidParam)?; let mut to_insert = Vec::new(); for (&area_start, area) in self.areas.iter_mut() { let area_end = area.end(); From 6e7ca9401698189ad1f3d503c7503413ad8e4923 Mon Sep 17 00:00:00 2001 From: aarkegz Date: Tue, 27 Aug 2024 00:07:17 +0800 Subject: [PATCH 8/8] loosen memory area shrink check --- memory_set/src/area.rs | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/memory_set/src/area.rs b/memory_set/src/area.rs index 689aa02..fed6526 100644 --- a/memory_set/src/area.rs +++ b/memory_set/src/area.rs @@ -103,18 +103,16 @@ impl MemoryArea { /// The start address of the memory area is increased by `new_size`. The /// shrunk part is unmapped. /// - /// `new_size` must be greater than 0 and less than the current size, - /// otherwise this function returns an error. + /// `new_size` must be greater than 0 and less than the current size. pub(crate) fn shrink_left( &mut self, new_size: usize, page_table: &mut B::PageTable, ) -> MappingResult { + assert!(new_size > 0 && new_size < self.size()); + let old_size = self.size(); - let unmap_size = old_size - .checked_sub(new_size) - .filter(|&s| s > 0 && s < old_size) - .ok_or(MappingError::InvalidParam)?; + let unmap_size = old_size - new_size; if !self.backend.unmap(self.start(), unmap_size, page_table) { return Err(MappingError::BadState); @@ -131,18 +129,15 @@ impl MemoryArea { /// The end address of the memory area is decreased by `new_size`. The /// shrunk part is unmapped. /// - /// `new_size` must be greater than 0 and less than the current size, - /// otherwise this function returns an error. + /// `new_size` must be greater than 0 and less than the current size. pub(crate) fn shrink_right( &mut self, new_size: usize, page_table: &mut B::PageTable, ) -> MappingResult { + assert!(new_size > 0 && new_size < self.size()); let old_size = self.size(); - let unmap_size = old_size - .checked_sub(new_size) - .filter(|&s| s > 0 && s < old_size) - .ok_or(MappingError::InvalidParam)?; + let unmap_size = old_size - new_size; // Use wrapping_add to avoid overflow check. // Safety: `new_size` is less than the current size, so it will never overflow.