Skip to content

Commit

Permalink
Merge pull request #60 from delta-rs/58-create-solution-for-using-a-c…
Browse files Browse the repository at this point in the history
…ustom-dataset

58 create solution for using a custom dataset
  • Loading branch information
mjovanc authored Dec 7, 2024
2 parents 2326cb6 + e0a5501 commit 334d406
Show file tree
Hide file tree
Showing 27 changed files with 670 additions and 148 deletions.
3 changes: 2 additions & 1 deletion .config/jetbrains/Build Workspace.run.xml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Build Workspace" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
<option name="command" value="build " />
<option name="buildProfileId" value="dev" />
<option name="command" value="build" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<envs />
<option name="emulateTerminal" value="true" />
Expand Down
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -127,5 +127,5 @@ rust-project.json
.history
.ionide

# Directory for cached data
# Directory for cached dataset
.cache
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ members = [
"examples/image_classification/cifar10",
"examples/image_classification/mnist",
"examples/image_classification/imagenet_v2",
"examples/image_classification/custom",
]
resolver = "2"

Expand Down
2 changes: 0 additions & 2 deletions delta/src/common/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
//! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pub mod activation;
pub mod data;
pub mod errors;
pub mod layer;
pub mod loss;
Expand All @@ -37,7 +36,6 @@ pub mod shape;
pub mod tensor_ops;

pub use activation::Activation;
pub use data::{Dataset, DatasetOps};
pub use errors::{CoreError, Result};
pub use layer::{Layer, LayerOutput};
pub use loss::Loss;
Expand Down
72 changes: 36 additions & 36 deletions delta/src/common/tensor_ops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use rand::Rng;
/// A struct representing a tensor.
#[derive(Debug, Clone)]
pub struct Tensor {
/// The data of the tensor stored as an n-dimensional array.
/// The dataset of the tensor stored as an n-dimensional array.
pub data: ArrayD<f32>,
}

Expand All @@ -18,7 +18,7 @@ impl Tensor {
///
/// # Arguments
///
/// * `data` - A vector of data.
/// * `dataset` - A vector of dataset.
/// * `shape` - A vector representing the shape of the tensor.
///
/// # Returns
Expand All @@ -30,15 +30,15 @@ impl Tensor {
/// ```
/// use deltaml::common::Tensor;
///
/// let data = vec![1.0, 2.0, 3.0, 4.0];
/// let dataset = vec![1.0, 2.0, 3.0, 4.0];
/// let shape = vec![2, 2];
///
/// let tensor = Tensor::new(data, shape);
/// let tensor = Tensor::new(dataset, shape);
/// ```
pub fn new(data: Vec<f32>, shape: Vec<usize>) -> Self {
let shape = IxDyn(&shape);
Self {
data: Array::from_shape_vec(shape, data).expect("Invalid shape for data"),
data: Array::from_shape_vec(shape, data).expect("Invalid shape for dataset"),
}
}

Expand Down Expand Up @@ -90,7 +90,7 @@ impl Tensor {
let shape = IxDyn(&shape); // Convert shape to dynamic dimension
let data: Vec<f32> = (0..shape.size()).map(|_| rng.gen::<f32>()).collect(); // Use size() method
Self {
data: Array::from_shape_vec(shape, data).expect("Invalid shape for random data"),
data: Array::from_shape_vec(shape, data).expect("Invalid shape for random dataset"),
}
}

Expand Down Expand Up @@ -171,15 +171,15 @@ impl Tensor {
///
/// # Returns
///
/// A new tensor with the reshaped data.
/// A new tensor with the reshaped dataset.
///
/// # Example
///
/// ```
/// use deltaml::common::Tensor;
///
/// let data = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(data, vec![2, 2]);
/// let dataset = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(dataset, vec![2, 2]);
/// let new_shape = vec![1, 4];
/// let reshaped_tensor = tensor.reshape(new_shape);
/// ```
Expand Down Expand Up @@ -217,7 +217,7 @@ impl Tensor {
where
F: Fn(f32) -> f32,
{
// Create a new array by applying the function `f` to each element of `self.data`
// Create a new array by applying the function `f` to each element of `self.dataset`
let new_data = self.data.mapv(|x| f(x));

Tensor { data: new_data }
Expand All @@ -231,15 +231,15 @@ impl Tensor {
///
/// # Returns
///
/// A new tensor containing the sliced data.
/// A new tensor containing the sliced dataset.
///
/// # Example
///
/// ```
/// use deltaml::common::Tensor;
///
/// let data = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(data, vec![2, 2]);
/// let dataset = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(dataset, vec![2, 2]);
/// let indices = vec![0..2, 0..2];
/// let sliced_tensor = tensor.slice(indices);
/// ```
Expand Down Expand Up @@ -303,7 +303,7 @@ impl Tensor {
///
/// # Returns
///
/// A new tensor containing the transposed data.
/// A new tensor containing the transposed dataset.
///
/// # Panics
///
Expand All @@ -314,8 +314,8 @@ impl Tensor {
/// ```
/// use deltaml::common::Tensor;
///
/// let data = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(data, vec![2, 2]);
/// let dataset = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(dataset, vec![2, 2]);
/// let transposed_tensor = tensor.transpose();
/// ```
pub fn transpose(&self) -> Tensor {
Expand Down Expand Up @@ -383,15 +383,15 @@ impl Tensor {
///
/// # Returns
///
/// A new tensor containing the summed data.
/// A new tensor containing the summed dataset.
///
/// # Example
///
/// ```
/// use deltaml::common::Tensor;
///
/// let data = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(data, vec![2, 2]);
/// let dataset = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(dataset, vec![2, 2]);
/// let summed_tensor = tensor.sum_along_axis(1);
/// ```
pub fn sum_along_axis(&self, axis: usize) -> Tensor {
Expand Down Expand Up @@ -527,15 +527,15 @@ impl Tensor {
///
/// # Returns
///
/// A new tensor containing the flattened data.
/// A new tensor containing the flattened dataset.
///
/// # Example
///
/// ```
/// use deltaml::common::Tensor;
///
/// let data = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(data, vec![2, 2]);
/// let dataset = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(dataset, vec![2, 2]);
/// let flattened_tensor = tensor.flatten();
/// ```
pub fn flatten(&self) -> Tensor {
Expand All @@ -553,15 +553,15 @@ impl Tensor {
///
/// # Returns
///
/// A new tensor containing the mean data.
/// A new tensor containing the mean dataset.
///
/// # Example
///
/// ```
/// use deltaml::common::Tensor;
///
/// let data = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(data, vec![2, 2]);
/// let dataset = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(dataset, vec![2, 2]);
/// let mean_tensor = tensor.mean_axis(1);
/// ```
pub fn mean_axis(&self, axis: usize) -> Tensor {
Expand Down Expand Up @@ -637,15 +637,15 @@ impl Tensor {
///
/// # Returns
///
/// A new tensor containing the normalized data.
/// A new tensor containing the normalized dataset.
///
/// # Example
///
/// ```
/// use deltaml::common::Tensor;
///
/// let data = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(data, vec![2, 2]);
/// let dataset = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(dataset, vec![2, 2]);
/// let normalized_tensor = tensor.normalize(0.0, 1.0);
pub fn normalize(&self, min: f32, max: f32) -> Tensor {
let normalized_data = self.data.mapv(|x| (x - min) / (max - min));
Expand Down Expand Up @@ -685,15 +685,15 @@ impl Tensor {
///
/// # Returns
///
/// A new tensor containing the reduced data.
/// A new tensor containing the reduced dataset.
///
/// # Example
///
/// ```
/// use deltaml::common::Tensor;
///
/// let data = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(data, vec![2, 2]);
/// let dataset = vec![1.0, 2.0, 3.0, 4.0];
/// let tensor = Tensor::new(dataset, vec![2, 2]);
/// let reduced_tensor = tensor.reduce_sum(1);
/// ```
pub fn reduce_sum(&self, axis: usize) -> Tensor {
Expand Down Expand Up @@ -788,11 +788,11 @@ impl Tensor {
Tensor::new(data, shape)
}

/// Converts the tensor data to a vector.
/// Converts the tensor dataset to a vector.
///
/// # Returns
///
/// A vector containing the tensor data in row-major order.
/// A vector containing the tensor dataset in row-major order.
pub fn to_vec(&self) -> Vec<f32> {
self.data.as_slice().unwrap_or(&[]).to_vec()
}
Expand All @@ -805,7 +805,7 @@ impl Tensor {
///
/// # Returns
///
/// A `Tensor` containing the image pixel data in the shape `(height, width, channels)`.
/// A `Tensor` containing the image pixel dataset in the shape `(height, width, channels)`.
pub fn from_image_bytes(image_bytes: Vec<u8>) -> Result<Self, String> {
// Decode the image from bytes
let image = ImageReader::new(Cursor::new(image_bytes))
Expand All @@ -814,9 +814,9 @@ impl Tensor {
.decode()
.map_err(|e| format!("Failed to decode image: {}", e))?;

// Get image dimensions and pixel data
// Get image dimensions and pixel dataset
let (width, height) = image.dimensions();
let pixel_data = image.to_rgba8().into_raw(); // Convert to RGBA and flatten the pixel data
let pixel_data = image.to_rgba8().into_raw(); // Convert to RGBA and flatten the pixel dataset

// Construct the Tensor with shape (height, width, 4)
Ok(Tensor::new(
Expand Down
Loading

0 comments on commit 334d406

Please sign in to comment.