From 368a7e9a8483db928c58461d612044dc51f6be17 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 23 Apr 2025 00:15:06 +0000 Subject: [PATCH 1/4] Squashed commit of the following: commit 4472263a6e96104bc7d471caff262c6bb6fb4d12 Author: Yi Lin Date: Thu Apr 17 04:32:13 2025 +0000 Remove an outdated assertion commit abbde8bc091000f61d068de3ab733a505117da20 Author: Yi Lin Date: Thu Apr 17 03:39:41 2025 +0000 Remove ALLOC_TABLE from local specs. Fix build for 1.74.1 commit 1c64dcb7e844f14583d0809bce8f866295d24dd8 Author: Yi Lin Date: Thu Apr 17 01:21:45 2025 +0000 Make chunk map as global side metadata commit b7b5988a6e65aa977ee134084bc09aeeacd8246b Author: Yi Lin Date: Tue Apr 15 04:33:21 2025 +0000 Refactor ChunkState to encode space index --- src/policy/immix/immixspace.rs | 11 +- src/policy/marksweepspace/native_ms/global.rs | 16 +-- src/util/heap/chunk_map.rs | 133 +++++++++++++----- src/util/metadata/side_metadata/global.rs | 5 + src/util/metadata/side_metadata/spec_defs.rs | 4 +- src/util/object_enum.rs | 15 +- .../mock_test_allocate_nonmoving.rs | 37 +++++ src/vm/tests/mock_tests/mod.rs | 1 + 8 files changed, 163 insertions(+), 59 deletions(-) create mode 100644 src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index b932808d8f..49b5691cd5 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -252,7 +252,6 @@ impl ImmixSpace { vec![ MetadataSpec::OnSide(Block::DEFRAG_STATE_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), - MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC, @@ -264,7 +263,6 @@ impl ImmixSpace { MetadataSpec::OnSide(Line::MARK_TABLE), MetadataSpec::OnSide(Block::DEFRAG_STATE_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), - MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC, @@ -299,6 +297,7 @@ impl ImmixSpace { let scheduler = args.scheduler.clone(); let common = CommonSpace::new(args.into_policy_args(true, false, Self::side_metadata_specs())); + let space_index = common.descriptor.get_index(); ImmixSpace { pr: if common.vmrequest.is_discontiguous() { BlockPageResource::new_discontiguous( @@ -316,7 +315,7 @@ impl ImmixSpace { ) }, common, - chunk_map: ChunkMap::new(), + chunk_map: ChunkMap::new(space_index), line_mark_state: AtomicU8::new(Line::RESET_MARK_STATE), line_unavail_state: AtomicU8::new(Line::RESET_MARK_STATE), lines_consumed: AtomicUsize::new(0), @@ -524,7 +523,7 @@ impl ImmixSpace { self.defrag.notify_new_clean_block(copy); let block = Block::from_aligned_address(block_address); block.init(copy); - self.chunk_map.set(block.chunk(), ChunkState::Allocated); + self.chunk_map.set_allocated(block.chunk(), true); self.lines_consumed .fetch_add(Block::LINES, Ordering::SeqCst); Some(block) @@ -899,7 +898,7 @@ struct SweepChunk { impl GCWork for SweepChunk { fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { - assert_eq!(self.space.chunk_map.get(self.chunk), ChunkState::Allocated); + assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated()); let mut histogram = self.space.defrag.new_histogram(); let line_mark_state = if super::BLOCK_ONLY { @@ -950,7 +949,7 @@ impl GCWork for SweepChunk { probe!(mmtk, sweep_chunk, allocated_blocks); // Set this chunk as free if there is not live blocks. if allocated_blocks == 0 { - self.space.chunk_map.set(self.chunk, ChunkState::Free) + self.space.chunk_map.set_allocated(self.chunk, false) } self.space.defrag.add_completed_mark_histogram(histogram); self.epilogue.finish_one_work_packet(); diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 0b349c4508..26570e41ba 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -288,7 +288,7 @@ impl MarkSweepSpace { let vm_map = args.vm_map; let is_discontiguous = args.vmrequest.is_discontiguous(); let local_specs = { - metadata::extract_side_metadata(&vec![ + metadata::extract_side_metadata(&[ MetadataSpec::OnSide(Block::NEXT_BLOCK_TABLE), MetadataSpec::OnSide(Block::PREV_BLOCK_TABLE), MetadataSpec::OnSide(Block::FREE_LIST_TABLE), @@ -300,11 +300,11 @@ impl MarkSweepSpace { MetadataSpec::OnSide(Block::BLOCK_LIST_TABLE), MetadataSpec::OnSide(Block::TLS_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), - MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, ]) }; let common = CommonSpace::new(args.into_policy_args(false, false, local_specs)); + let space_index = common.descriptor.get_index(); MarkSweepSpace { pr: if is_discontiguous { BlockPageResource::new_discontiguous( @@ -322,7 +322,7 @@ impl MarkSweepSpace { ) }, common, - chunk_map: ChunkMap::new(), + chunk_map: ChunkMap::new(space_index), scheduler, abandoned: Mutex::new(AbandonedBlockLists::new()), abandoned_in_gc: Mutex::new(AbandonedBlockLists::new()), @@ -402,7 +402,7 @@ impl MarkSweepSpace { pub fn record_new_block(&self, block: Block) { block.init(); - self.chunk_map.set(block.chunk(), ChunkState::Allocated); + self.chunk_map.set_allocated(block.chunk(), true); } pub fn prepare(&mut self, full_heap: bool) { @@ -567,7 +567,7 @@ struct PrepareChunkMap { impl GCWork for PrepareChunkMap { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { - debug_assert!(self.space.chunk_map.get(self.chunk) == ChunkState::Allocated); + debug_assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated()); // number of allocated blocks. let mut n_occupied_blocks = 0; self.chunk @@ -581,7 +581,7 @@ impl GCWork for PrepareChunkMap { }); if n_occupied_blocks == 0 { // Set this chunk as free if there is no live blocks. - self.space.chunk_map.set(self.chunk, ChunkState::Free) + self.space.chunk_map.set_allocated(self.chunk, false) } else { // Otherwise this chunk is occupied, and we reset the mark bit if it is on the side. if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC { @@ -617,7 +617,7 @@ struct SweepChunk { impl GCWork for SweepChunk { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { - assert_eq!(self.space.chunk_map.get(self.chunk), ChunkState::Allocated); + assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated()); // number of allocated blocks. let mut allocated_blocks = 0; @@ -636,7 +636,7 @@ impl GCWork for SweepChunk { probe!(mmtk, sweep_chunk, allocated_blocks); // Set this chunk as free if there is not live blocks. if allocated_blocks == 0 { - self.space.chunk_map.set(self.chunk, ChunkState::Free) + self.space.chunk_map.set_allocated(self.chunk, false); } self.epilogue.finish_one_work_packet(); } diff --git a/src/util/heap/chunk_map.rs b/src/util/heap/chunk_map.rs index 26912b3f89..d07601c1d7 100644 --- a/src/util/heap/chunk_map.rs +++ b/src/util/heap/chunk_map.rs @@ -44,20 +44,68 @@ impl Chunk { } } -/// Chunk allocation state -#[repr(u8)] -#[derive(Debug, PartialEq, Clone, Copy)] -pub enum ChunkState { - /// The chunk is not allocated. - Free = 0, - /// The chunk is allocated. - Allocated = 1, +/// The allocation state for a chunk in the chunk map. It includes whether each chunk is allocated or free, and the space the chunk belongs to. +/// Highest bit: 0 = free, 1 = allocated +/// Lower 4 bits: Space index (0-15) +#[repr(transparent)] +#[derive(PartialEq, Clone, Copy)] +pub struct ChunkState(u8); + +impl ChunkState { + const ALLOC_BIT_MASK: u8 = 0x80; + const SPACE_INDEX_MASK: u8 = 0x0F; + + /// Create a new ChunkState that represents being allocated in the given space + pub fn allocated(space_index: usize) -> ChunkState { + debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES); + let mut encode = space_index as u8; + encode |= Self::ALLOC_BIT_MASK; + ChunkState(encode) + } + /// Create a new ChunkState that represents being free in the given space + pub fn free(space_index: usize) -> ChunkState { + debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES); + ChunkState(space_index as u8) + } + /// Is the chunk free? + pub fn is_free(&self) -> bool { + self.0 & Self::ALLOC_BIT_MASK == 0 + } + /// Is the chunk allocated? + pub fn is_allocated(&self) -> bool { + !self.is_free() + } + /// Get the space index of the chunk + pub fn get_space_index(&self) -> usize { + let index = (self.0 & Self::SPACE_INDEX_MASK) as usize; + debug_assert!(index < crate::util::heap::layout::heap_parameters::MAX_SPACES); + index + } +} + +impl std::fmt::Debug for ChunkState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.is_free() { + write!(f, "Free({})", self.get_space_index()) + } else { + write!(f, "Allocated({})", self.get_space_index()) + } + } } /// A byte-map to record all the allocated chunks. /// A plan can use this to maintain records for the chunks that they used, and the states of the chunks. -/// Any plan that uses the chunk map should include the `ALLOC_TABLE` spec in their local sidemetadata specs +/// Any plan that uses the chunk map should include the `ALLOC_TABLE` spec in their local sidemetadata specs. +/// +/// A chunk map is created for a space (identified by the space index), and will only update or list chunks for that space. pub struct ChunkMap { + /// The space that uses this chunk map. + space_index: usize, + /// The range of chunks that are used by the space. The range only records the lowest chunk and the highest chunk. + /// All the chunks that are used for the space are within the range, but not necessarily that all the chunks in the range + /// are used for the space. Spaces may be discontiguous, thus the range may include chunks that do not belong to the space. + /// We need to use the space index in the chunk map and the space index encoded with the chunk state to know if + /// the chunk belongs to the current space. chunk_range: Mutex>, } @@ -66,22 +114,40 @@ impl ChunkMap { pub const ALLOC_TABLE: SideMetadataSpec = crate::util::metadata::side_metadata::spec_defs::CHUNK_MARK; - pub fn new() -> Self { + pub fn new(space_index: usize) -> Self { Self { + space_index, chunk_range: Mutex::new(Chunk::ZERO..Chunk::ZERO), } } - /// Set chunk state - pub fn set(&self, chunk: Chunk, state: ChunkState) { + /// Set a chunk as allocated, or as free. + pub fn set_allocated(&self, chunk: Chunk, allocated: bool) { + let state = if allocated { + ChunkState::allocated(self.space_index) + } else { + ChunkState::free(self.space_index) + }; // Do nothing if the chunk is already in the expected state. - if self.get(chunk) == state { + if self.get_any(chunk) == state { return; } + #[cfg(debug_assertions)] + { + let old_state = self.get_any(chunk); + // If a chunk is free, any space may use it. If a chunk is not free, only the current space may update its state. + assert!( + old_state.is_free() || old_state.get_space_index() == state.get_space_index(), + "Chunk {:?}: old state {:?}, new state {:?}. Cannot set to new state.", + chunk, + old_state, + state + ); + } // Update alloc byte - unsafe { Self::ALLOC_TABLE.store::(chunk.start(), state as u8) }; + unsafe { Self::ALLOC_TABLE.store::(chunk.start(), state.0) }; // If this is a newly allcoated chunk, then expand the chunk range. - if state == ChunkState::Allocated { + if state.is_allocated() { debug_assert!(!chunk.start().is_zero()); let mut range = self.chunk_range.lock(); if range.start == Chunk::ZERO { @@ -96,20 +162,30 @@ impl ChunkMap { } } - /// Get chunk state - pub fn get(&self, chunk: Chunk) -> ChunkState { + /// Get chunk state. Return None if the chunk does not belong to the space. + pub fn get(&self, chunk: Chunk) -> Option { + let state = self.get_any(chunk); + (state.get_space_index() == self.space_index).then_some(state) + } + + /// Get chunk state, regardless of the space. This should always be private. + fn get_any(&self, chunk: Chunk) -> ChunkState { let byte = unsafe { Self::ALLOC_TABLE.load::(chunk.start()) }; - match byte { - 0 => ChunkState::Free, - 1 => ChunkState::Allocated, - _ => unreachable!(), - } + ChunkState(byte) + } + + /// A range of all chunks in the heap. + pub fn all_chunks(&self) -> impl Iterator + '_ { + let chunk_range = self.chunk_range.lock(); + RegionIterator::::new(chunk_range.start, chunk_range.end) + .filter(|c| self.get(*c).is_some()) } /// A range of all chunks in the heap. - pub fn all_chunks(&self) -> RegionIterator { + pub fn all_allocated_chunks(&self) -> impl Iterator + '_ { let chunk_range = self.chunk_range.lock(); RegionIterator::::new(chunk_range.start, chunk_range.end) + .filter(|c| self.get(*c).is_some_and(|state| state.is_allocated())) } /// Helper function to create per-chunk processing work packets for each allocated chunks. @@ -118,18 +194,9 @@ impl ChunkMap { func: impl Fn(Chunk) -> Box>, ) -> Vec>> { let mut work_packets: Vec>> = vec![]; - for chunk in self - .all_chunks() - .filter(|c| self.get(*c) == ChunkState::Allocated) - { + for chunk in self.all_allocated_chunks() { work_packets.push(func(chunk)); } work_packets } } - -impl Default for ChunkMap { - fn default() -> Self { - Self::new() - } -} diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index 8730602a4b..387d2f1d75 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -1345,6 +1345,11 @@ impl SideMetadataContext { } } + // Any plan that uses the chunk map needs to reserve the chunk map table. + // As we use either the mark sweep or (non moving) immix as the non moving space, + // and both policies use the chunk map, we just add the chunk map table globally. + ret.push(crate::util::heap::chunk_map::ChunkMap::ALLOC_TABLE); + ret.extend_from_slice(specs); ret } diff --git a/src/util/metadata/side_metadata/spec_defs.rs b/src/util/metadata/side_metadata/spec_defs.rs index be7a7730d3..621f11a3cf 100644 --- a/src/util/metadata/side_metadata/spec_defs.rs +++ b/src/util/metadata/side_metadata/spec_defs.rs @@ -60,6 +60,8 @@ define_side_metadata_specs!( MS_ACTIVE_CHUNK = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK), // Track the index in SFT map for a chunk (only used for SFT sparse chunk map) SFT_DENSE_CHUNK_MAP_INDEX = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK), + // Mark chunks (any plan that uses the chunk map should include this spec in their local sidemetadata specs) + CHUNK_MARK = (global: true, log_num_of_bits: 3, log_bytes_in_region: crate::util::heap::chunk_map::Chunk::LOG_BYTES), ); // This defines all LOCAL side metadata used by mmtk-core. @@ -75,8 +77,6 @@ define_side_metadata_specs!( IX_BLOCK_DEFRAG = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::immix::block::Block::LOG_BYTES), // Mark blocks by immix IX_BLOCK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::immix::block::Block::LOG_BYTES), - // Mark chunks (any plan that uses the chunk map should include this spec in their local sidemetadata specs) - CHUNK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::util::heap::chunk_map::Chunk::LOG_BYTES), // Mark blocks by (native mimalloc) marksweep MS_BLOCK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::marksweepspace::native_ms::Block::LOG_BYTES), // Next block in list for native mimalloc diff --git a/src/util/object_enum.rs b/src/util/object_enum.rs index 18508f088b..776885e11b 100644 --- a/src/util/object_enum.rs +++ b/src/util/object_enum.rs @@ -5,10 +5,7 @@ use std::marker::PhantomData; use crate::vm::VMBinding; use super::{ - heap::{ - chunk_map::{ChunkMap, ChunkState}, - MonotonePageResource, - }, + heap::{chunk_map::ChunkMap, MonotonePageResource}, linear_scan::Region, metadata::{side_metadata::spec_defs::VO_BIT, vo_bit}, Address, ObjectReference, @@ -84,12 +81,10 @@ pub(crate) fn enumerate_blocks_from_chunk_map( ) where B: BlockMayHaveObjects, { - for chunk in chunk_map.all_chunks() { - if chunk_map.get(chunk) == ChunkState::Allocated { - for block in chunk.iter_region::() { - if block.may_have_objects() { - enumerator.visit_address_range(block.start(), block.end()); - } + for chunk in chunk_map.all_allocated_chunks() { + for block in chunk.iter_region::() { + if block.may_have_objects() { + enumerator.visit_address_range(block.start(), block.end()); } } } diff --git a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs new file mode 100644 index 0000000000..82d1c5e958 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs @@ -0,0 +1,37 @@ +// GITHUB-CI: MMTK_PLAN=all + +use super::mock_test_prelude::*; +use crate::plan::AllocationSemantics; + +#[test] +pub fn allocate_nonmoving() { + with_mockvm( + || -> MockVM { + MockVM { + is_collection_enabled: MockMethod::new_fixed(Box::new(|_| false)), + ..MockVM::default() + } + }, + || { + // 1MB heap + const MB: usize = 1024 * 1024; + let mut fixture = MutatorFixture::create_with_heapsize(MB); + + // Normal alloc + let addr = + memory_manager::alloc(&mut fixture.mutator, 16, 8, 0, AllocationSemantics::Default); + assert!(!addr.is_zero()); + + // Non moving alloc + let addr = memory_manager::alloc( + &mut fixture.mutator, + 16, + 8, + 0, + AllocationSemantics::NonMoving, + ); + assert!(!addr.is_zero()); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mod.rs b/src/vm/tests/mock_tests/mod.rs index aab9aafd8b..55b08b3b12 100644 --- a/src/vm/tests/mock_tests/mod.rs +++ b/src/vm/tests/mock_tests/mod.rs @@ -24,6 +24,7 @@ pub(crate) mod mock_test_prelude { } mod mock_test_allocate_align_offset; +mod mock_test_allocate_nonmoving; mod mock_test_allocate_with_disable_collection; mod mock_test_allocate_with_initialize_collection; mod mock_test_allocate_with_re_enable_collection; From e885f63b4245a0eccbb3c63a625151e7c1935e81 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 23 Apr 2025 00:15:24 +0000 Subject: [PATCH 2/4] Squashed commit of the following: commit 530d80c6b03d1006750cf50a01c2933b4d3c6b5f Author: Yi Lin Date: Tue Apr 22 04:03:27 2025 +0000 Fix commit 836cac59d5fd3030fb35054aa8f0df894d050ba0 Author: Yi Lin Date: Thu Apr 17 04:27:11 2025 +0000 Allow configuring each Immix space to be non moving --- src/plan/generational/immix/global.rs | 7 ++-- src/plan/immix/global.rs | 6 ++- src/plan/sticky/immix/global.rs | 8 ++-- src/policy/immix/defrag.rs | 9 ++--- src/policy/immix/immixspace.rs | 57 ++++++++++++++++++++------- src/policy/immix/mod.rs | 25 ------------ 6 files changed, 58 insertions(+), 54 deletions(-) diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index 80d2fbc2f5..09a53e26cf 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -87,9 +87,9 @@ impl Plan for GenImmix { fn last_collection_was_exhaustive(&self) -> bool { self.last_gc_was_full_heap.load(Ordering::Relaxed) - && ImmixSpace::::is_last_gc_exhaustive( - self.last_gc_was_defrag.load(Ordering::Relaxed), - ) + && self + .immix_space + .is_last_gc_exhaustive(self.last_gc_was_defrag.load(Ordering::Relaxed)) } fn collection_required(&self, space_full: bool, space: Option>) -> bool @@ -254,6 +254,7 @@ impl GenImmix { // In GenImmix, young objects are not allocated in ImmixSpace directly. #[cfg(feature = "vo_bit")] mixed_age: false, + never_move_objects: cfg!(feature = "immix_non_moving"), }, ); diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 2e0e665f78..2be8b3d3a5 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -38,7 +38,7 @@ pub struct Immix { /// The plan constraints for the immix plan. pub const IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { - moves_objects: crate::policy::immix::DEFRAG, + moves_objects: true, // Max immix object size is half of a block. max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, needs_prepare_mutator: false, @@ -51,7 +51,8 @@ impl Plan for Immix { } fn last_collection_was_exhaustive(&self) -> bool { - ImmixSpace::::is_last_gc_exhaustive(self.last_gc_was_defrag.load(Ordering::Relaxed)) + self.immix_space + .is_last_gc_exhaustive(self.last_gc_was_defrag.load(Ordering::Relaxed)) } fn constraints(&self) -> &'static PlanConstraints { @@ -139,6 +140,7 @@ impl Immix { unlog_object_when_traced: false, #[cfg(feature = "vo_bit")] mixed_age: false, + never_move_objects: cfg!(feature = "immix_non_moving"), }, ) } diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index 7e78347477..3d2ee99f6b 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -7,7 +7,6 @@ use crate::plan::PlanConstraints; use crate::policy::gc_work::TraceKind; use crate::policy::gc_work::TRACE_KIND_TRANSITIVE_PIN; use crate::policy::immix::ImmixSpace; -use crate::policy::immix::PREFER_COPY_ON_NURSERY_GC; use crate::policy::immix::TRACE_KIND_FAST; use crate::policy::sft::SFT; use crate::policy::space::Space; @@ -41,7 +40,7 @@ pub struct StickyImmix { /// The plan constraints for the sticky immix plan. pub const STICKY_IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { - moves_objects: crate::policy::immix::DEFRAG || crate::policy::immix::PREFER_COPY_ON_NURSERY_GC, + moves_objects: true, needs_log_bit: true, barrier: crate::plan::BarrierSelector::ObjectBarrier, // We may trace duplicate edges in sticky immix (or any plan that uses object remembering barrier). See https://github.com/mmtk/mmtk-core/issues/743. @@ -164,7 +163,7 @@ impl Plan for StickyImmix { fn current_gc_may_move_object(&self) -> bool { if self.is_current_gc_nursery() { - PREFER_COPY_ON_NURSERY_GC + self.get_immix_space().prefer_copy_on_nursery_gc() } else { self.get_immix_space().in_defrag() } @@ -263,7 +262,7 @@ impl crate::plan::generational::global::GenerationalPlanExt f self.immix .immix_space .trace_object_without_moving(queue, object) - } else if crate::policy::immix::PREFER_COPY_ON_NURSERY_GC { + } else if self.immix.immix_space.prefer_copy_on_nursery_gc() { let ret = self.immix.immix_space.trace_object_with_opportunistic_copy( queue, object, @@ -330,6 +329,7 @@ impl StickyImmix { // In StickyImmix, both young and old objects are allocated in the ImmixSpace. #[cfg(feature = "vo_bit")] mixed_age: true, + never_move_objects: cfg!(feature = "immix_non_moving"), }, ); Self { diff --git a/src/policy/immix/defrag.rs b/src/policy/immix/defrag.rs index 8861e30bdc..fb1fe9ea0e 100644 --- a/src/policy/immix/defrag.rs +++ b/src/policy/immix/defrag.rs @@ -63,8 +63,10 @@ impl Defrag { } /// Determine whether the current GC should do defragmentation. + #[allow(clippy::too_many_arguments)] pub fn decide_whether_to_defrag( &self, + defrag_enabled: bool, emergency_collection: bool, collect_whole_heap: bool, collection_attempts: usize, @@ -72,7 +74,7 @@ impl Defrag { exhausted_reusable_space: bool, full_heap_system_gc: bool, ) { - let in_defrag = super::DEFRAG + let in_defrag = defrag_enabled && (emergency_collection || (collection_attempts > 1) || !exhausted_reusable_space @@ -116,9 +118,8 @@ impl Defrag { } /// Prepare work. Should be called in ImmixSpace::prepare. - #[allow(clippy::assertions_on_constants)] pub fn prepare(&self, space: &ImmixSpace, plan_stats: StatsForDefrag) { - debug_assert!(super::DEFRAG); + debug_assert!(space.is_defrag_enabled()); self.defrag_space_exhausted.store(false, Ordering::Release); // Calculate available free space for defragmentation. @@ -207,9 +208,7 @@ impl Defrag { } /// Reset the in-defrag state. - #[allow(clippy::assertions_on_constants)] pub fn reset_in_defrag(&self) { - debug_assert!(super::DEFRAG); self.in_defrag_collection.store(false, Ordering::Release); } } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 49b5691cd5..5e55fc194a 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -73,6 +73,8 @@ pub struct ImmixSpaceArgs { // Currently only used when "vo_bit" is enabled. Using #[cfg(...)] to eliminate dead code warning. #[cfg(feature = "vo_bit")] pub mixed_age: bool, + /// Disable copying for this Immix space. + pub never_move_objects: bool, } unsafe impl Sync for ImmixSpace {} @@ -84,7 +86,7 @@ impl SFT for ImmixSpace { fn get_forwarded_object(&self, object: ObjectReference) -> Option { // If we never move objects, look no further. - if super::NEVER_MOVE_OBJECTS { + if !self.is_movable() { return None; } @@ -102,7 +104,7 @@ impl SFT for ImmixSpace { } // If we never move objects, look no further. - if super::NEVER_MOVE_OBJECTS { + if !self.is_movable() { return false; } @@ -122,7 +124,7 @@ impl SFT for ImmixSpace { VM::VMObjectModel::LOCAL_PINNING_BIT_SPEC.is_object_pinned::(object) } fn is_movable(&self) -> bool { - !super::NEVER_MOVE_OBJECTS + !self.space_args.never_move_objects } #[cfg(feature = "sanity")] @@ -276,12 +278,13 @@ impl ImmixSpace { args: crate::policy::space::PlanCreateSpaceArgs, space_args: ImmixSpaceArgs, ) -> Self { - #[cfg(feature = "immix_non_moving")] - info!( - "Creating non-moving ImmixSpace: {}. Block size: 2^{}", - args.name, - Block::LOG_BYTES - ); + if space_args.never_move_objects { + info!( + "Creating non-moving ImmixSpace: {}. Block size: 2^{}", + args.name, + Block::LOG_BYTES + ); + } if space_args.unlog_object_when_traced { assert!( @@ -290,7 +293,18 @@ impl ImmixSpace { ); } - super::validate_features(); + // validate features + if super::BLOCK_ONLY { + assert!( + space_args.never_move_objects, + "Block-only immix must not move objects" + ); + } + assert!( + Block::LINES / 2 <= u8::MAX as usize - 2, + "Number of lines in a block should not exceed BlockState::MARK_MARKED" + ); + #[cfg(feature = "vo_bit")] vo_bit::helper::validate_config::(); let vm_map = args.vm_map; @@ -355,6 +369,7 @@ impl ImmixSpace { full_heap_system_gc: bool, ) -> bool { self.defrag.decide_whether_to_defrag( + self.is_defrag_enabled(), emergency_collection, collect_whole_heap, collection_attempts, @@ -389,7 +404,7 @@ impl ImmixSpace { } // Prepare defrag info - if super::DEFRAG { + if self.is_defrag_enabled() { self.defrag.prepare(self, plan_stats); } @@ -482,7 +497,7 @@ impl ImmixSpace { /// Return whether this GC was a defrag GC, as a plan may want to know this. pub fn end_of_gc(&mut self) -> bool { let did_defrag = self.defrag.in_defrag(); - if super::DEFRAG { + if self.is_defrag_enabled() { self.defrag.reset_in_defrag(); } did_defrag @@ -805,8 +820,8 @@ impl ImmixSpace { Some((start, end)) } - pub fn is_last_gc_exhaustive(did_defrag_for_last_gc: bool) -> bool { - if super::DEFRAG { + pub fn is_last_gc_exhaustive(&self, did_defrag_for_last_gc: bool) -> bool { + if self.is_defrag_enabled() { did_defrag_for_last_gc } else { // If defrag is disabled, every GC is exhaustive. @@ -832,6 +847,18 @@ impl ImmixSpace { self.mark_lines(object); } } + + pub(crate) fn prefer_copy_on_nursery_gc(&self) -> bool { + self.is_nursery_copy_enabled() + } + + pub(crate) fn is_nursery_copy_enabled(&self) -> bool { + !self.space_args.never_move_objects && !cfg!(feature = "sticky_immix_non_moving_nursery") + } + + pub(crate) fn is_defrag_enabled(&self) -> bool { + !self.space_args.never_move_objects + } } /// A work packet to prepare each block for a major GC. @@ -866,7 +893,7 @@ impl GCWork for PrepareBlockState { continue; } // Check if this block needs to be defragmented. - let is_defrag_source = if !super::DEFRAG { + let is_defrag_source = if !self.space.is_defrag_enabled() { // Do not set any block as defrag source if defrag is disabled. false } else if super::DEFRAG_EVERY_BLOCK { diff --git a/src/policy/immix/mod.rs b/src/policy/immix/mod.rs index 5e34caaf52..13e1637297 100644 --- a/src/policy/immix/mod.rs +++ b/src/policy/immix/mod.rs @@ -14,9 +14,6 @@ pub const MAX_IMMIX_OBJECT_SIZE: usize = Block::BYTES >> 1; /// Mark/sweep memory for block-level only pub const BLOCK_ONLY: bool = false; -/// Do we allow Immix to do defragmentation? -pub const DEFRAG: bool = !cfg!(feature = "immix_non_moving"); // defrag if we are allowed to move. - // STRESS COPYING: Set the feature 'immix_stress_copying' so that Immix will copy as many objects as possible. // Useful for debugging copying GC if you cannot use SemiSpace. // @@ -46,28 +43,6 @@ pub const DEFRAG_HEADROOM_PERCENT: usize = if cfg!(feature = "immix_stress_copyi 2 }; -/// If Immix is used as a nursery space, do we prefer copy? -pub const PREFER_COPY_ON_NURSERY_GC: bool = - !cfg!(feature = "immix_non_moving") && !cfg!(feature = "sticky_immix_non_moving_nursery"); // copy nursery objects if we are allowed to move. - -/// In some cases/settings, Immix may never move objects. -/// Currently we only have two cases where we move objects: 1. defrag, 2. nursery copy. -/// If we do neither, we will not move objects. -/// If we have other reasons to move objects, we need to add them here. -pub const NEVER_MOVE_OBJECTS: bool = !DEFRAG && !PREFER_COPY_ON_NURSERY_GC; - /// Mark lines when scanning objects. /// Otherwise, do it at mark time. pub const MARK_LINE_AT_SCAN_TIME: bool = true; - -macro_rules! validate { - ($x: expr) => { assert!($x, stringify!($x)) }; - ($x: expr => $y: expr) => { if $x { assert!($y, stringify!($x implies $y)) } }; -} - -fn validate_features() { - // Block-only immix cannot do defragmentation - validate!(DEFRAG => !BLOCK_ONLY); - // Number of lines in a block should not exceed BlockState::MARK_MARKED - assert!(Block::LINES / 2 <= u8::MAX as usize - 2); -} From de4d5f812ad881fdc88ae260ab25d53732bb1537 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 17 Apr 2025 00:19:12 +0000 Subject: [PATCH 3/4] Use Immix as non moving space --- src/plan/generational/immix/global.rs | 2 +- src/plan/global.rs | 19 +++++--- src/plan/immix/global.rs | 2 +- src/plan/mutator_context.rs | 46 ++++++++++++++++--- src/plan/sticky/immix/global.rs | 2 +- src/policy/immix/immixspace.rs | 4 +- src/util/alloc/allocators.rs | 2 +- src/util/heap/chunk_map.rs | 6 +++ .../mock_test_allocate_nonmoving.rs | 2 + 9 files changed, 66 insertions(+), 19 deletions(-) diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index 09a53e26cf..6bc73f6de5 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -131,7 +131,7 @@ impl Plan for GenImmix { if full_heap { self.immix_space.prepare( full_heap, - crate::policy::immix::defrag::StatsForDefrag::new(self), + Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), ); } } diff --git a/src/plan/global.rs b/src/plan/global.rs index 062cdc969b..1dd4c7c383 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -7,6 +7,8 @@ use crate::plan::tracing::ObjectQueue; use crate::plan::Mutator; use crate::policy::immortalspace::ImmortalSpace; use crate::policy::largeobjectspace::LargeObjectSpace; +use crate::policy::marksweepspace::native_ms::MarkSweepSpace; +use crate::policy::immix::ImmixSpace; use crate::policy::space::{PlanCreateSpaceArgs, Space}; #[cfg(feature = "vm_space")] use crate::policy::vmspace::VMSpace; @@ -553,7 +555,7 @@ pub struct CommonPlan { pub los: LargeObjectSpace, // TODO: We should use a marksweep space for nonmoving. #[space] - pub nonmoving: ImmortalSpace, + pub nonmoving: ImmixSpace, #[parent] pub base: BasePlan, } @@ -571,12 +573,17 @@ impl CommonPlan { args.get_space_args("los", true, false, VMRequest::discontiguous()), false, ), - nonmoving: ImmortalSpace::new(args.get_space_args( + nonmoving: ImmixSpace::new(args.get_space_args( "nonmoving", true, false, VMRequest::discontiguous(), - )), + ), crate::policy::immix::ImmixSpaceArgs { + unlog_object_when_traced: false, + #[cfg(feature = "vo_bit")] + mixed_age: false, + never_move_objects: true, + }), base: BasePlan::new(args), } } @@ -591,14 +598,14 @@ impl CommonPlan { pub fn prepare(&mut self, tls: VMWorkerThread, full_heap: bool) { self.immortal.prepare(); self.los.prepare(full_heap); - self.nonmoving.prepare(); + self.nonmoving.prepare(full_heap, None); self.base.prepare(tls, full_heap) } pub fn release(&mut self, tls: VMWorkerThread, full_heap: bool) { self.immortal.release(); self.los.release(full_heap); - self.nonmoving.release(); + self.nonmoving.release(full_heap); self.base.release(tls, full_heap) } @@ -610,7 +617,7 @@ impl CommonPlan { &self.los } - pub fn get_nonmoving(&self) -> &ImmortalSpace { + pub fn get_nonmoving(&self) -> &ImmixSpace { &self.nonmoving } } diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 2be8b3d3a5..e6a779b080 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -87,7 +87,7 @@ impl Plan for Immix { self.common.prepare(tls, true); self.immix_space.prepare( true, - crate::policy::immix::defrag::StatsForDefrag::new(self), + Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), ); } diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index c72c44f140..382fa6de03 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -5,7 +5,7 @@ use crate::plan::global::Plan; use crate::plan::AllocationSemantics; use crate::policy::space::Space; use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; -use crate::util::alloc::Allocator; +use crate::util::alloc::{Allocator, FreeListAllocator, ImmixAllocator}; use crate::util::{Address, ObjectReference}; use crate::util::{VMMutatorThread, VMWorkerThread}; use crate::vm::VMBinding; @@ -27,6 +27,19 @@ pub(crate) fn unreachable_prepare_func( unreachable!("`MutatorConfig::prepare_func` must not be called for the current plan.") } +/// An mutator prepare implementation for plans that use [`crate::plan::global::CommonPlan`]. +pub(crate) fn common_prepare_func(mutator: &mut Mutator, _tls: VMWorkerThread) { + // Prepare the free list allocator used for non moving + // unsafe { + // mutator + // .allocators + // .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + // } + // .downcast_mut::>() + // .unwrap() + // .prepare(); +} + /// A place-holder implementation for `MutatorConfig::release_func` that should not be called. /// Currently only used by `NoGC`. pub(crate) fn unreachable_release_func( @@ -36,6 +49,27 @@ pub(crate) fn unreachable_release_func( unreachable!("`MutatorConfig::release_func` must not be called for the current plan.") } +/// An mutator release implementation for plans that use [`crate::plan::global::CommonPlan`]. +pub(crate) fn common_release_func(mutator: &mut Mutator, _tls: VMWorkerThread) { + // // Release the free list allocator used for non moving + // unsafe { + // mutator + // .allocators + // .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + // } + // .downcast_mut::>() + // .unwrap() + // .release(); + let immix_allocator = unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + } + .downcast_mut::>() + .unwrap(); + immix_allocator.reset(); +} + /// A place-holder implementation for `MutatorConfig::release_func` that does nothing. pub(crate) fn no_op_release_func(_mutator: &mut Mutator, _tls: VMWorkerThread) {} @@ -455,10 +489,8 @@ pub(crate) fn create_allocator_mapping( map[AllocationSemantics::Los] = AllocatorSelector::LargeObject(reserved.n_large_object); reserved.n_large_object += 1; - // TODO: This should be freelist allocator once we use marksweep for nonmoving space. - map[AllocationSemantics::NonMoving] = - AllocatorSelector::BumpPointer(reserved.n_bump_pointer); - reserved.n_bump_pointer += 1; + map[AllocationSemantics::NonMoving] = AllocatorSelector::Immix(reserved.n_immix); + reserved.n_immix += 1; } reserved.validate(); @@ -522,10 +554,10 @@ pub(crate) fn create_space_mapping( reserved.n_large_object += 1; // TODO: This should be freelist allocator once we use marksweep for nonmoving space. vec.push(( - AllocatorSelector::BumpPointer(reserved.n_bump_pointer), + AllocatorSelector::Immix(reserved.n_immix), plan.common().get_nonmoving(), )); - reserved.n_bump_pointer += 1; + reserved.n_immix += 1; } reserved.validate(); diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index 3d2ee99f6b..2998d96b04 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -116,7 +116,7 @@ impl Plan for StickyImmix { // Prepare both large object space and immix space self.immix.immix_space.prepare( false, - crate::policy::immix::defrag::StatsForDefrag::new(self), + Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), ); self.immix.common.los.prepare(false); } else { diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 5e55fc194a..6f5758d8c9 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -385,7 +385,7 @@ impl ImmixSpace { &self.scheduler } - pub fn prepare(&mut self, major_gc: bool, plan_stats: StatsForDefrag) { + pub fn prepare(&mut self, major_gc: bool, plan_stats: Option) { if major_gc { // Update mark_state if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_on_side() { @@ -405,7 +405,7 @@ impl ImmixSpace { // Prepare defrag info if self.is_defrag_enabled() { - self.defrag.prepare(self, plan_stats); + self.defrag.prepare(self, plan_stats.unwrap()); } // Prepare each block for GC diff --git a/src/util/alloc/allocators.rs b/src/util/alloc/allocators.rs index 58b591b1a8..58dd790ae2 100644 --- a/src/util/alloc/allocators.rs +++ b/src/util/alloc/allocators.rs @@ -22,7 +22,7 @@ use super::MarkCompactAllocator; pub(crate) const MAX_BUMP_ALLOCATORS: usize = 6; pub(crate) const MAX_LARGE_OBJECT_ALLOCATORS: usize = 2; pub(crate) const MAX_MALLOC_ALLOCATORS: usize = 1; -pub(crate) const MAX_IMMIX_ALLOCATORS: usize = 1; +pub(crate) const MAX_IMMIX_ALLOCATORS: usize = 2; pub(crate) const MAX_FREE_LIST_ALLOCATORS: usize = 2; pub(crate) const MAX_MARK_COMPACT_ALLOCATORS: usize = 1; diff --git a/src/util/heap/chunk_map.rs b/src/util/heap/chunk_map.rs index d07601c1d7..2f553e7b34 100644 --- a/src/util/heap/chunk_map.rs +++ b/src/util/heap/chunk_map.rs @@ -174,6 +174,12 @@ impl ChunkMap { ChunkState(byte) } + fn is_my_chunk(&self, chunk: Chunk) -> bool { + let byte = unsafe { Self::ALLOC_TABLE.load::(chunk.start()) }; + let state = ChunkState(byte); + state.get_space_index() == self.space_index + } + /// A range of all chunks in the heap. pub fn all_chunks(&self) -> impl Iterator + '_ { let chunk_range = self.chunk_range.lock(); diff --git a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs index 82d1c5e958..7c7df436fe 100644 --- a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs +++ b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs @@ -21,6 +21,7 @@ pub fn allocate_nonmoving() { let addr = memory_manager::alloc(&mut fixture.mutator, 16, 8, 0, AllocationSemantics::Default); assert!(!addr.is_zero()); + info!("Allocated default at: {:#x}", addr); // Non moving alloc let addr = memory_manager::alloc( @@ -31,6 +32,7 @@ pub fn allocate_nonmoving() { AllocationSemantics::NonMoving, ); assert!(!addr.is_zero()); + info!("Allocated nonmoving at: {:#x}", addr); }, no_cleanup, ) From f45d87ee7a319df1a44561dfe7121f374c4e6bd8 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 23 Apr 2025 06:27:53 +0000 Subject: [PATCH 4/4] Choose nonmoving policy based on the feature --- Cargo.toml | 12 +++-- src/plan/global.rs | 66 +++++++++++++++++++++------- src/plan/mutator_context.rs | 88 +++++++++++++++++++++++++------------ 3 files changed, 119 insertions(+), 47 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 340756ecc9..263f057f36 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,7 +82,7 @@ name = "main" harness = false [features] -default = ["builtin_env_logger"] +default = ["builtin_env_logger", "immortal_as_nonmoving"] # Built-in env_logger. This feature is enabled by default. # The user can disable this default feature to remove `env_logger` from the dependencies. @@ -224,9 +224,6 @@ malloc_jemalloc = ["dep:jemalloc-sys"] # is not compiled in default builds. malloc_native_mimalloc = [] -# If there are more groups, they should be inserted above this line -# Group:end - # Group:marksweepallocation # default is native allocator with lazy sweeping eager_sweeping = [] @@ -234,4 +231,11 @@ eager_sweeping = [] # normal heap range, we will have to use chunk-based SFT table. Turning on this feature will use a different SFT map implementation on 64bits, # and will affect all the plans in the build. Please be aware of the consequence, and this is only meant to be experimental use. malloc_mark_sweep = [] + +# Group:nonmovingspace +immortal_as_nonmoving = [] +immix_as_nonmoving = [] +marksweep_as_nonmoving = [] + +# If there are more groups, they should be inserted above this line # Group:end diff --git a/src/plan/global.rs b/src/plan/global.rs index 1dd4c7c383..436174165a 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -7,7 +7,6 @@ use crate::plan::tracing::ObjectQueue; use crate::plan::Mutator; use crate::policy::immortalspace::ImmortalSpace; use crate::policy::largeobjectspace::LargeObjectSpace; -use crate::policy::marksweepspace::native_ms::MarkSweepSpace; use crate::policy::immix::ImmixSpace; use crate::policy::space::{PlanCreateSpaceArgs, Space}; #[cfg(feature = "vm_space")] @@ -544,6 +543,15 @@ impl BasePlan { } } +#[cfg(feature = "immortal_as_nonmoving")] +pub type NonMovingSpace = crate::policy::immortalspace::ImmortalSpace; + +#[cfg(feature = "immix_as_nonmoving")] +pub type NonMovingSpace = crate::policy::immix::ImmixSpace; + +#[cfg(feature = "marksweep_as_nonmoving")] +pub type NonMovingSpace = crate::policy::marksweepspace::native_ms::MarkSweepSpace; + /** CommonPlan is for representing state and features used by _many_ plans, but that are not fundamental to _all_ plans. Examples include the Large Object Space and an Immortal space. Features that are fundamental to _all_ plans must be included in BasePlan. */ @@ -555,7 +563,7 @@ pub struct CommonPlan { pub los: LargeObjectSpace, // TODO: We should use a marksweep space for nonmoving. #[space] - pub nonmoving: ImmixSpace, + pub nonmoving: NonMovingSpace, #[parent] pub base: BasePlan, } @@ -573,17 +581,7 @@ impl CommonPlan { args.get_space_args("los", true, false, VMRequest::discontiguous()), false, ), - nonmoving: ImmixSpace::new(args.get_space_args( - "nonmoving", - true, - false, - VMRequest::discontiguous(), - ), crate::policy::immix::ImmixSpaceArgs { - unlog_object_when_traced: false, - #[cfg(feature = "vo_bit")] - mixed_age: false, - never_move_objects: true, - }), + nonmoving: Self::new_nonmoving_space(&mut args), base: BasePlan::new(args), } } @@ -598,14 +596,14 @@ impl CommonPlan { pub fn prepare(&mut self, tls: VMWorkerThread, full_heap: bool) { self.immortal.prepare(); self.los.prepare(full_heap); - self.nonmoving.prepare(full_heap, None); + self.prepare_nonmoving_space(full_heap); self.base.prepare(tls, full_heap) } pub fn release(&mut self, tls: VMWorkerThread, full_heap: bool) { self.immortal.release(); self.los.release(full_heap); - self.nonmoving.release(full_heap); + self.release_nonmoving_space(full_heap); self.base.release(tls, full_heap) } @@ -617,9 +615,45 @@ impl CommonPlan { &self.los } - pub fn get_nonmoving(&self) -> &ImmixSpace { + pub fn get_nonmoving(&self) -> &NonMovingSpace { &self.nonmoving } + + fn new_nonmoving_space(args: &mut CreateSpecificPlanArgs) -> NonMovingSpace { + let space_args = args.get_space_args( + "nonmoving", + true, + false, + VMRequest::discontiguous(), + ); + #[cfg(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving"))] + return NonMovingSpace::new(space_args); + #[cfg(feature = "immix_as_nonmoving")] + return NonMovingSpace::new(space_args, crate::policy::immix::ImmixSpaceArgs { + unlog_object_when_traced: false, + #[cfg(feature = "vo_bit")] + mixed_age: false, + never_move_objects: true, + }); + } + + fn prepare_nonmoving_space(&mut self, _full_heap: bool) { + #[cfg(feature = "immortal_as_nonmoving")] + self.nonmoving.prepare(); + #[cfg(feature = "immix_as_nonmoving")] + self.nonmoving.prepare(_full_heap, None); + #[cfg(feature = "marksweep_as_nonmoving")] + self.nonmoving.prepare(_full_heap); + } + + fn release_nonmoving_space(&mut self, _full_heap: bool) { + #[cfg(feature = "immortal_as_nonmoving")] + self.nonmoving.release(); + #[cfg(feature = "immix_as_nonmoving")] + self.nonmoving.release(_full_heap); + #[cfg(feature = "marksweep_as_nonmoving")] + self.nonmoving.release(); + } } use crate::policy::gc_work::TraceKind; diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 382fa6de03..20603f3b02 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -30,14 +30,15 @@ pub(crate) fn unreachable_prepare_func( /// An mutator prepare implementation for plans that use [`crate::plan::global::CommonPlan`]. pub(crate) fn common_prepare_func(mutator: &mut Mutator, _tls: VMWorkerThread) { // Prepare the free list allocator used for non moving - // unsafe { - // mutator - // .allocators - // .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) - // } - // .downcast_mut::>() - // .unwrap() - // .prepare(); + #[cfg(feature = "marksweep_as_nonmoving")] + unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + } + .downcast_mut::>() + .unwrap() + .prepare(); } /// A place-holder implementation for `MutatorConfig::release_func` that should not be called. @@ -51,23 +52,25 @@ pub(crate) fn unreachable_release_func( /// An mutator release implementation for plans that use [`crate::plan::global::CommonPlan`]. pub(crate) fn common_release_func(mutator: &mut Mutator, _tls: VMWorkerThread) { - // // Release the free list allocator used for non moving - // unsafe { - // mutator - // .allocators - // .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) - // } - // .downcast_mut::>() - // .unwrap() - // .release(); + // Release the free list allocator used for non moving + #[cfg(feature = "marksweep_as_nonmoving")] + unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + } + .downcast_mut::>() + .unwrap() + .release(); + #[cfg(feature = "immix_as_nonmoving")] let immix_allocator = unsafe { mutator .allocators .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) } .downcast_mut::>() - .unwrap(); - immix_allocator.reset(); + .unwrap() + .reset(); } /// A place-holder implementation for `MutatorConfig::release_func` that does nothing. @@ -489,8 +492,21 @@ pub(crate) fn create_allocator_mapping( map[AllocationSemantics::Los] = AllocatorSelector::LargeObject(reserved.n_large_object); reserved.n_large_object += 1; - map[AllocationSemantics::NonMoving] = AllocatorSelector::Immix(reserved.n_immix); - reserved.n_immix += 1; + #[cfg(feature = "immix_as_nonmoving")] + { + map[AllocationSemantics::NonMoving] = AllocatorSelector::Immix(reserved.n_immix); + reserved.n_immix += 1; + } + #[cfg(feature = "immortal_as_nonmoving")] + { + map[AllocationSemantics::NonMoving] = AllocatorSelector::BumpPointer(reserved.n_bump_pointer); + reserved.n_bump_pointer += 1; + } + #[cfg(feature = "marksweep_as_nonmoving")] + { + map[AllocationSemantics::NonMoving] = AllocatorSelector::FreeList(reserved.n_free_list); + reserved.n_free_list += 1; + } } reserved.validate(); @@ -552,12 +568,30 @@ pub(crate) fn create_space_mapping( plan.common().get_los(), )); reserved.n_large_object += 1; - // TODO: This should be freelist allocator once we use marksweep for nonmoving space. - vec.push(( - AllocatorSelector::Immix(reserved.n_immix), - plan.common().get_nonmoving(), - )); - reserved.n_immix += 1; + #[cfg(feature = "immix_as_nonmoving")] + { + vec.push(( + AllocatorSelector::Immix(reserved.n_immix), + plan.common().get_nonmoving(), + )); + reserved.n_immix += 1; + } + #[cfg(feature = "marksweep_as_nonmoving")] + { + vec.push(( + AllocatorSelector::FreeList(reserved.n_free_list), + plan.common().get_nonmoving(), + )); + reserved.n_free_list += 1; + } + #[cfg(feature = "immortal_as_nonmoving")] + { + vec.push(( + AllocatorSelector::BumpPointer(reserved.n_bump_pointer), + plan.common().get_nonmoving(), + )); + reserved.n_bump_pointer += 1; + } } reserved.validate();