From 5e9679a609414459f52fa6d9a07dc854e31a8f78 Mon Sep 17 00:00:00 2001 From: Martin Molzer Date: Thu, 24 Oct 2024 08:57:48 +0200 Subject: [PATCH] make bytes no-std compatible --- crates/burn-tensor/src/tensor/bytes.rs | 46 +++++++++++++------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/crates/burn-tensor/src/tensor/bytes.rs b/crates/burn-tensor/src/tensor/bytes.rs index 6fa24758bd8..e1058b2d084 100644 --- a/crates/burn-tensor/src/tensor/bytes.rs +++ b/crates/burn-tensor/src/tensor/bytes.rs @@ -1,9 +1,9 @@ //! A version of [`bytemuck::BoxBytes`] that is cloneable and allows trailing uninitialized elements. -use std::alloc::{Layout, LayoutError}; -use std::borrow::Cow; -use std::ops::{Deref, DerefMut}; -use std::ptr::NonNull; +use alloc::alloc::{Layout, LayoutError}; +use alloc::borrow::Cow; +use core::ops::{Deref, DerefMut}; +use core::ptr::NonNull; /// A sort of `Box<[u8]>` that remembers the original alignment and can contain trailing uninitialized bytes. pub struct Bytes { @@ -18,28 +18,28 @@ pub struct Bytes { /// The maximum supported alignment. The limit exists to not have to store alignment when serializing. Instead, /// the bytes are always over-aligned when deserializing to MAX_ALIGN. -const MAX_ALIGN: usize = std::mem::align_of::(); +const MAX_ALIGN: usize = core::mem::align_of::(); -fn debug_from_fn) -> std::fmt::Result>( +fn debug_from_fn) -> core::fmt::Result>( f: F, -) -> impl std::fmt::Debug { +) -> impl core::fmt::Debug { // See also: std::fmt::from_fn struct FromFn(F); - impl std::fmt::Debug for FromFn + impl core::fmt::Debug for FromFn where - F: Fn(&mut std::fmt::Formatter<'_>) -> std::fmt::Result, + F: Fn(&mut core::fmt::Formatter<'_>) -> core::fmt::Result, { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { (self.0)(f) } } FromFn(f) } -impl std::fmt::Debug for Bytes { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl core::fmt::Debug for Bytes { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let data = &**self; - let fmt_data = move |f: &mut std::fmt::Formatter<'_>| { + let fmt_data = move |f: &mut core::fmt::Formatter<'_>| { if data.len() > 3 { // There is a nightly API `debug_more_non_exhaustive` which has `finish_non_exhaustive` f.debug_list().entries(&data[0..3]).entry(&"...").finish() @@ -99,8 +99,8 @@ impl Bytes { // correctly aligned, slice of bytes. Since we might not be able to fully predict the length and align ahead of time, this does currently // not seem worth the hassle. let bytes = unsafe { - let mem = std::alloc::alloc(layout); - std::ptr::copy_nonoverlapping(data.as_ref().as_ptr(), mem, len); + let mem = alloc::alloc::alloc(layout); + core::ptr::copy_nonoverlapping(data.as_ref().as_ptr(), mem, len); NonNull::new_unchecked(mem) }; Ok(Self { @@ -114,12 +114,12 @@ impl Bytes { pub fn from_elems(mut elems: Vec) -> Self { let _: () = const { assert!( - std::mem::align_of::() <= MAX_ALIGN, + core::mem::align_of::() <= MAX_ALIGN, "element type not supported due to too large alignment" ); }; // Note: going through a Box as in Vec::into_boxed_slice would re-allocate on excess capacity. Avoid that. - let byte_len = elems.len() * std::mem::size_of::(); + let byte_len = elems.len() * core::mem::size_of::(); // Set the length to 0, then all data is in the "spare capacity". // SAFETY: Data is Copy, so in particular does not need to be dropped. In any case, try not to panic until // we have taken ownership of the data! @@ -130,7 +130,7 @@ impl Bytes { // SAFETY: data is the allocation of a vec, hence can not be null. We use unchecked to avoid a panic-path. let ptr = unsafe { NonNull::new_unchecked(data.as_mut_ptr() as *mut u8) }; // Now we manage the memory manually, forget the vec. - std::mem::forget(elems); + core::mem::forget(elems); Self { ptr, len: byte_len, @@ -151,7 +151,7 @@ impl Bytes { let Some(capacity) = self.layout.size().checked_div(size_of::()) else { return Err(self); }; - if self.layout.align() != std::mem::align_of::() { + if self.layout.align() != core::mem::align_of::() { return Err(self); } let Ok(data) = bytemuck::checked::try_cast_slice_mut::<_, E>(&mut self) else { @@ -159,7 +159,7 @@ impl Bytes { }; let length = data.len(); let data = data.as_mut_ptr(); - std::mem::forget(self); + core::mem::forget(self); // SAFETY: // - data was allocated by the global allocator as per type-invariant // - `E` has the same alignment as indicated by the stored layout. @@ -176,14 +176,14 @@ impl Deref for Bytes { fn deref(&self) -> &Self::Target { // SAFETY: see type invariants - unsafe { std::slice::from_raw_parts(self.ptr.as_ptr(), self.len) } + unsafe { core::slice::from_raw_parts(self.ptr.as_ptr(), self.len) } } } impl DerefMut for Bytes { fn deref_mut(&mut self) -> &mut Self::Target { // SAFETY: see type invariants - unsafe { std::slice::from_raw_parts_mut(self.ptr.as_mut(), self.len) } + unsafe { core::slice::from_raw_parts_mut(self.ptr.as_mut(), self.len) } } } @@ -191,7 +191,7 @@ impl Drop for Bytes { fn drop(&mut self) { if self.layout.size() != 0 { unsafe { - std::alloc::dealloc(self.ptr.as_ptr(), self.layout); + alloc::alloc::dealloc(self.ptr.as_ptr(), self.layout); } } }