Skip to content

Commit

Permalink
rename crate
Browse files Browse the repository at this point in the history
  • Loading branch information
chris-oo committed Nov 11, 2024
1 parent 4b42e26 commit 3259bdc
Show file tree
Hide file tree
Showing 16 changed files with 40 additions and 40 deletions.
40 changes: 20 additions & 20 deletions Cargo.lock
Original file line number Diff line number Diff line change
Expand Up @@ -2307,11 +2307,11 @@ dependencies = [
"inspect",
"inspect_counters",
"mesh",
"page_pool_alloc",
"pal_async",
"parking_lot",
"power_resources",
"serde_json",
"shared_pool_alloc",
"test_with_tracing",
"thiserror 2.0.0",
"tracing",
Expand Down Expand Up @@ -4504,6 +4504,23 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
name = "oversized_box"
version = "0.0.0"

[[package]]
name = "page_pool_alloc"
version = "0.0.0"
dependencies = [
"anyhow",
"hcl",
"hvdef",
"inspect",
"memory_range",
"parking_lot",
"sparse_mmap",
"thiserror 2.0.0",
"tracing",
"user_driver",
"vm_topology",
]

[[package]]
name = "page_table"
version = "0.0.0"
Expand Down Expand Up @@ -5680,23 +5697,6 @@ dependencies = [
"lazy_static",
]

[[package]]
name = "shared_pool_alloc"
version = "0.0.0"
dependencies = [
"anyhow",
"hcl",
"hvdef",
"inspect",
"memory_range",
"parking_lot",
"sparse_mmap",
"thiserror 2.0.0",
"tracing",
"user_driver",
"vm_topology",
]

[[package]]
name = "shell-words"
version = "1.1.0"
Expand Down Expand Up @@ -6696,6 +6696,7 @@ dependencies = [
"netvsp",
"nvme_driver",
"nvme_resources",
"page_pool_alloc",
"pal",
"pal_async",
"pal_uring",
Expand All @@ -6710,7 +6711,6 @@ dependencies = [
"serde_helpers",
"serde_json",
"serial_16550_resources",
"shared_pool_alloc",
"socket2",
"sparse_mmap",
"state_unit",
Expand Down Expand Up @@ -7198,13 +7198,13 @@ dependencies = [
"libc",
"memory_range",
"mesh",
"page_pool_alloc",
"pal",
"pal_async",
"pal_uring",
"parking_lot",
"pci_core",
"safe_intrinsics",
"shared_pool_alloc",
"sidecar_client",
"thiserror 2.0.0",
"tracelimit",
Expand Down
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -151,9 +151,9 @@ host_fdt_parser = { path = "openhcl/host_fdt_parser" }
kmsg_defs = { path = "openhcl/kmsg_defs" }
minimal_rt = { path = "openhcl/minimal_rt" }
minimal_rt_build = { path = "openhcl/minimal_rt_build" }
page_pool_alloc = { path = "openhcl/page_pool_alloc" }
sidecar_client = { path = "openhcl/sidecar_client" }
sidecar_defs = { path = "openhcl/sidecar_defs" }
shared_pool_alloc = { path = "openhcl/shared_pool_alloc" }
underhill_attestation = { path = "openhcl/underhill_attestation" }
underhill_confidentiality = { path = "openhcl/underhill_confidentiality" }
underhill_core = { path = "openhcl/underhill_core" }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Licensed under the MIT License.

[package]
name = "shared_pool_alloc"
name = "page_pool_alloc"
edition = "2021"
rust-version.workspace = true

Expand Down
File renamed without changes.
File renamed without changes.
2 changes: 1 addition & 1 deletion openhcl/underhill_core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ cvm_tracing.workspace = true
diag_proto.workspace = true
diag_server.workspace = true
debug_worker_defs = { workspace = true, optional = true }
shared_pool_alloc = { workspace = true, features = ["vfio"] }
page_pool_alloc = { workspace = true, features = ["vfio"] }
virt.workspace = true
vmm_core.workspace = true
vmm_core_defs.workspace = true
Expand Down
2 changes: 1 addition & 1 deletion openhcl/underhill_core/src/dispatch/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,10 @@ use mesh::CancelContext;
use mesh::MeshPayload;
use mesh_worker::WorkerRpc;
use net_packet_capture::PacketCaptureParams;
use page_pool_alloc::PagePool;
use pal_async::task::Spawn;
use pal_async::task::Task;
use parking_lot::Mutex;
use shared_pool_alloc::PagePool;
use socket2::Socket;
use state_unit::SavedStateUnit;
use state_unit::SpawnedUnit;
Expand Down
2 changes: 1 addition & 1 deletion openhcl/underhill_core/src/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,14 +89,14 @@ use mesh_worker::Worker;
use mesh_worker::WorkerId;
use mesh_worker::WorkerRpc;
use net_packet_capture::PacketCaptureParams;
use page_pool_alloc::PagePool;
use pal_async::local::LocalDriver;
use pal_async::task::Spawn;
use pal_async::DefaultDriver;
use pal_async::DefaultPool;
use parking_lot::Mutex;
use scsi_core::ResolveScsiDeviceHandleParams;
use scsidisk::atapi_scsi::AtapiScsiDisk;
use shared_pool_alloc::PagePool;
use socket2::Socket;
use state_unit::SpawnedUnit;
use state_unit::StateUnits;
Expand Down
2 changes: 1 addition & 1 deletion openhcl/virt_mshv_vtl/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ gdb = []
aarch64emu.workspace = true
aarch64defs.workspace = true
hcl.workspace = true
shared_pool_alloc.workspace = true
page_pool_alloc.workspace = true
virt.workspace = true
virt_support_aarch64emu.workspace = true
virt_support_apic.workspace = true
Expand Down
6 changes: 3 additions & 3 deletions openhcl/virt_mshv_vtl/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ pub enum Error {
#[error("failed to map overlay page")]
MapOverlay(#[source] std::io::Error),
#[error("failed to allocate shared visibility pages for overlay")]
AllocateSharedVisOverlay(#[source] shared_pool_alloc::PagePoolOutOfMemory),
AllocateSharedVisOverlay(#[source] page_pool_alloc::PagePoolOutOfMemory),
#[error("failed to open msr device")]
OpenMsr(#[source] std::io::Error),
#[error("cpuid did not contain valid TSC frequency information")]
Expand Down Expand Up @@ -215,7 +215,7 @@ struct UhPartitionInner {
isolated_memory_protector: Option<Arc<dyn ProtectIsolatedMemory>>,
#[cfg_attr(guest_arch = "aarch64", allow(dead_code))]
#[inspect(skip)]
shared_vis_pages_pool: Option<shared_pool_alloc::PagePoolAllocator>,
shared_vis_pages_pool: Option<page_pool_alloc::PagePoolAllocator>,
#[inspect(with = "inspect::AtomicMut")]
no_sidecar_hotplug: AtomicBool,
use_mmio_hypercalls: bool,
Expand Down Expand Up @@ -1155,7 +1155,7 @@ pub struct UhLateParams<'a> {
/// An object to call to change host visibility on guest memory.
pub isolated_memory_protector: Option<Arc<dyn ProtectIsolatedMemory>>,
/// Allocator for shared visibility pages.
pub shared_vis_pages_pool: Option<shared_pool_alloc::PagePoolAllocator>,
pub shared_vis_pages_pool: Option<page_pool_alloc::PagePoolAllocator>,
}

/// Trait for CVM-related protections on guest memory.
Expand Down
2 changes: 1 addition & 1 deletion openhcl/virt_mshv_vtl/src/processor/snp/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ pub struct SnpBacked {
direct_overlays_pfns: [u64; UhDirectOverlay::Count as usize],
#[inspect(skip)]
#[allow(dead_code)] // Allocation handle for direct overlays held until drop
direct_overlay_pfns_handle: shared_pool_alloc::PagePoolHandle,
direct_overlay_pfns_handle: page_pool_alloc::PagePoolHandle,
#[inspect(hex)]
hv_sint_notifications: u16,
general_stats: GeneralStats,
Expand Down
4 changes: 2 additions & 2 deletions openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -393,7 +393,7 @@ pub struct TdxBacked {
direct_overlays_pfns: [u64; UhDirectOverlay::Count as usize],
#[inspect(skip)]
#[allow(dead_code)] // Allocation handle for direct overlays held until drop
direct_overlay_pfns_handle: shared_pool_alloc::PagePoolHandle,
direct_overlay_pfns_handle: page_pool_alloc::PagePoolHandle,

lapic: LapicState,
untrusted_synic: Option<ProcessorSynic>,
Expand All @@ -413,7 +413,7 @@ pub struct TdxBacked {
flush_state: VtlArray<TdxFlushState, 2>,
/// A mapped page used for issuing INVGLA hypercalls.
#[inspect(skip)]
flush_page: shared_pool_alloc::PagePoolHandle,
flush_page: page_pool_alloc::PagePoolHandle,

enter_stats: EnterStats,
exit_stats: ExitStats,
Expand Down
4 changes: 2 additions & 2 deletions openhcl/virt_mshv_vtl/src/processor/tdx/tlb_flush.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ impl UhProcessor<'_, TdxBacked> {
partition_flush_state: &TdxPartitionFlushState,
gva_list_count: &mut Wrapping<usize>,
runner: &mut ProcessorRunner<'_, Tdx>,
flush_page: &shared_pool_alloc::PagePoolHandle,
flush_page: &page_pool_alloc::PagePoolHandle,
) -> bool {
// Check quickly to see whether any new addresses are in the list.
if partition_flush_state.s.gva_list_count == *gva_list_count {
Expand Down Expand Up @@ -155,7 +155,7 @@ impl UhProcessor<'_, TdxBacked> {
target_vtl: GuestVtl,
flush_addrs: &[HvGvaRange],
runner: &mut ProcessorRunner<'_, Tdx>,
flush_page: &shared_pool_alloc::PagePoolHandle,
flush_page: &page_pool_alloc::PagePoolHandle,
) {
// Now we can build the TDX structs and actually call INVGLA.
tracing::trace!(
Expand Down
2 changes: 1 addition & 1 deletion vm/devices/get/guest_emulation_transport/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ get_protocol.workspace = true
guest_emulation_device = { workspace = true, optional = true }
guestmem.workspace = true
hvdef.workspace = true
shared_pool_alloc.workspace = true
page_pool_alloc.workspace = true
underhill_config.workspace = true
vmbus_async.workspace = true
vmbus_ring.workspace = true
Expand Down
2 changes: 1 addition & 1 deletion vm/devices/get/guest_emulation_transport/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ impl GuestEmulationTransportClient {
/// Set the shared memory allocator, which is required by ['igvm_attest'].
pub fn set_shared_memory_allocator(
&mut self,
shared_pool_allocator: shared_pool_alloc::PagePoolAllocator,
shared_pool_allocator: page_pool_alloc::PagePoolAllocator,
shared_guest_memory: guestmem::GuestMemory,
) {
self.control.notify(msg::Msg::SetupSharedMemoryAllocator(
Expand Down
8 changes: 4 additions & 4 deletions vm/devices/get/guest_emulation_transport/src/process_loop.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ pub(crate) enum FatalError {
#[error("failed to make the IgvmAttest request because shared memory is unavailable")]
SharedMemoryUnavailable,
#[error("failed to allocated shared memory")]
SharedMemoryAllocationError(#[source] shared_pool_alloc::PagePoolOutOfMemory),
SharedMemoryAllocationError(#[source] page_pool_alloc::PagePoolOutOfMemory),
#[error("failed to read the `IGVM_ATTEST` response from shared memory")]
ReadSharedMemory(#[source] guestmem::GuestMemoryError),
#[error("failed to deserialize the asynchronous `IGVM_ATTEST` response")]
Expand Down Expand Up @@ -174,7 +174,7 @@ pub(crate) mod msg {
Inspect(inspect::Deferred),
/// Store the shared memory allocator and guest memory for later use by
/// IGVM attestation.
SetupSharedMemoryAllocator(shared_pool_alloc::PagePoolAllocator, guestmem::GuestMemory),
SetupSharedMemoryAllocator(page_pool_alloc::PagePoolAllocator, guestmem::GuestMemory),

// Late bound receivers for Guest Notifications
/// Take the late-bound GuestRequest receiver for Generation Id updates.
Expand Down Expand Up @@ -468,7 +468,7 @@ pub(crate) struct ProcessLoop<T: RingMem> {
#[inspect(skip)]
igvm_attest_read_send: mesh::Sender<Vec<u8>>,
#[inspect(skip)]
shared_pool_allocator: Option<Arc<shared_pool_alloc::PagePoolAllocator>>,
shared_pool_allocator: Option<Arc<page_pool_alloc::PagePoolAllocator>>,
shared_guest_memory: Option<Arc<guestmem::GuestMemory>>,
stats: Stats,

Expand Down Expand Up @@ -1806,7 +1806,7 @@ async fn request_saved_state(
async fn request_igvm_attest(
mut access: HostRequestPipeAccess,
request: msg::IgvmAttestRequestData,
shared_pool_allocator: Option<Arc<shared_pool_alloc::PagePoolAllocator>>,
shared_pool_allocator: Option<Arc<page_pool_alloc::PagePoolAllocator>>,
shared_guest_memory: Option<Arc<guestmem::GuestMemory>>,
) -> Result<Result<Vec<u8>, IgvmAttestError>, FatalError> {
const ALLOCATED_SHARED_MEMORY_SIZE: usize =
Expand Down

0 comments on commit 3259bdc

Please sign in to comment.