Skip to content

Commit

Permalink
LazyVec is removed at as many places as possible
Browse files Browse the repository at this point in the history
  • Loading branch information
amigin committed Jan 11, 2024
1 parent 4d17aac commit 101db61
Show file tree
Hide file tree
Showing 7 changed files with 35 additions and 43 deletions.
22 changes: 12 additions & 10 deletions src/db_operations/read/get_rows_as_vec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,12 @@ pub async fn execute(
}

if let Some(row_key) = row_key {
return Ok(
get_as_row_key_only(app, table, row_key, limit, skip, now, update_statistics).await,
);
let result =
get_as_row_key_only(app, table, row_key, limit, skip, now, update_statistics).await;
return Ok(Some(result));
}

return Ok(get_all(table, limit, skip, now).await);
return Ok(Some(get_all(table, limit, skip, now).await));
}

pub async fn get_as_partition_key_and_row_key(
Expand Down Expand Up @@ -89,12 +89,14 @@ async fn get_as_partition_key_only(

let db_partition = read_access.get_partition(partition_key)?;

super::read_filter::filter_it_and_clone(
let result = super::read_filter::filter_it_and_clone(
db_partition.get_all_rows().into_iter(),
limit,
skip,
now.date_time,
)
);

Some(result)
}

async fn get_as_row_key_only(
Expand All @@ -105,7 +107,7 @@ async fn get_as_row_key_only(
skip: Option<usize>,
now: &JsonTimeStamp,
update_statistics: UpdateStatistics,
) -> Option<Vec<Arc<DbRow>>> {
) -> Vec<Arc<DbRow>> {
let read_access = table.data.read().await;

let mut data_by_row = Vec::new();
Expand All @@ -125,11 +127,11 @@ async fn get_as_row_key_only(
now.date_time,
);

if let Some(result) = &result {
if result.len() > 0 {
if update_statistics.has_statistics_to_update() {
let mut by_partition = HashMap::new();

for db_row in result {
for db_row in &result {
let partition_key = db_row.get_partition_key();

if !by_partition.contains_key(partition_key) {
Expand Down Expand Up @@ -157,7 +159,7 @@ async fn get_all(
limit: Option<usize>,
skip: Option<usize>,
now: &JsonTimeStamp,
) -> Option<Vec<Arc<DbRow>>> {
) -> Vec<Arc<DbRow>> {
let read_access = table.data.read().await;

super::read_filter::filter_it_and_clone(
Expand Down
6 changes: 1 addition & 5 deletions src/db_operations/read/partitions/get_partitions_list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,5 @@ pub async fn get_partitions(
let items = table_data.partitions.get_all().iter().map(|itm| itm.0);

let result = crate::db_operations::read::read_filter::filter_it(items, limit, skip);

match result {
Some(items) => Ok((count, items.iter().map(|itm| itm.to_string()).collect())),
None => Ok((count, vec![])),
}
Ok((count, result.iter().map(|itm| itm.to_string()).collect()))
}
22 changes: 11 additions & 11 deletions src/db_operations/read/read_filter.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use std::sync::Arc;

use my_no_sql_sdk::core::db::DbRow;
use rust_extensions::{date_time::DateTimeAsMicroseconds, lazy::LazyVec};
use rust_extensions::date_time::DateTimeAsMicroseconds;

/*
pub struct DbRowsFilter<'s, TIter: Iterator<Item = &'s Arc<DbRow>>> {
Expand Down Expand Up @@ -56,11 +56,11 @@ pub fn filter_it<'s, TItem>(
iterator: impl Iterator<Item = &'s TItem>,
limit: Option<usize>,
skip: Option<usize>,
) -> Option<Vec<&'s TItem>> {
) -> Vec<&'s TItem> {
let mut result = if let Some(limit) = limit {
LazyVec::with_capacity(limit)
Vec::with_capacity(limit)
} else {
LazyVec::new()
Vec::new()
};

let mut no = 0;
Expand All @@ -74,7 +74,7 @@ pub fn filter_it<'s, TItem>(
}
}

result.add(item);
result.push(item);
added += 1;

if let Some(limit) = limit {
Expand All @@ -88,7 +88,7 @@ pub fn filter_it<'s, TItem>(
//crate::db_operations::sync_to_main::update_row_last_read_access_time(app, db_row);
}

result.get_result()
result
//json_array_writer.build()
}

Expand All @@ -97,11 +97,11 @@ pub fn filter_it_and_clone<'s, TIter: Iterator<Item = &'s Arc<DbRow>>>(
limit: Option<usize>,
skip: Option<usize>,
now: DateTimeAsMicroseconds,
) -> Option<Vec<Arc<DbRow>>> {
) -> Vec<Arc<DbRow>> {
let mut result = if let Some(limit) = limit {
LazyVec::with_capacity(limit)
Vec::with_capacity(limit)
} else {
LazyVec::new()
Vec::new()
};

let mut no = 0;
Expand All @@ -115,7 +115,7 @@ pub fn filter_it_and_clone<'s, TIter: Iterator<Item = &'s Arc<DbRow>>>(
}
}
db_row.last_read_access.update(now);
result.add(db_row.clone());
result.push(db_row.clone());
added += 1;

if let Some(limit) = limit {
Expand All @@ -129,6 +129,6 @@ pub fn filter_it_and_clone<'s, TIter: Iterator<Item = &'s Arc<DbRow>>>(
//crate::db_operations::sync_to_main::update_row_last_read_access_time(app, db_row);
}

result.get_result()
result
//json_array_writer.build()
}
6 changes: 2 additions & 4 deletions src/db_operations/read/read_operation_result.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,17 +50,15 @@ impl ReadOperationResult {
app: &Arc<AppContext>,
db_table: &Arc<DbTableWrapper>,
partition_key: &String,
db_rows: Option<Vec<&Arc<DbRow>>>,
db_rows: Vec<&Arc<DbRow>>,
update_statistics: UpdateStatistics,
) -> Self {
if db_rows.is_none() {
if db_rows.len() == 0 {
return Self::EmptyArray;
}

let mut json_array_writer = JsonArrayWriter::new();

let db_rows = db_rows.unwrap();

update_statistics
.update_statistics(app, db_table, partition_key, || {
db_rows.iter().map(|db_row| db_row.get_row_key())
Expand Down
2 changes: 1 addition & 1 deletion src/db_operations/read/rows/get_all.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ pub async fn get_all(
let db_rows =
crate::db_operations::read::read_filter::filter_it(result_items.into_iter(), limit, skip);

let db_rows = if let Some(db_rows) = db_rows {
let db_rows = if db_rows.len() > 0 {
let mut result = HashMap::new();

for db_row in db_rows {
Expand Down
13 changes: 5 additions & 8 deletions src/db_operations/read/rows/get_all_by_row_key.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
use std::{collections::HashMap, sync::Arc};

use my_no_sql_server_core::DbTableWrapper;
use rust_extensions::lazy::LazyVec;

use crate::{
app::AppContext,
Expand All @@ -22,25 +21,23 @@ pub async fn get_all_by_row_key(

let table_data = db_table.data.read().await;

let mut db_rows = LazyVec::new();
let mut db_rows = Vec::new();

for partition in table_data.partitions.get_partitions() {
let get_row_result = partition.get_row(row_key);

if let Some(db_row) = get_row_result {
db_rows.add(db_row);
db_rows.push(db_row);
}
}

let db_rows = db_rows.get_result();

if db_rows.is_none() {
if db_rows.len() == 0 {
return Ok(ReadOperationResult::EmptyArray);
}

let db_rows = super::super::read_filter::filter_it(db_rows.unwrap().into_iter(), limit, skip);
let db_rows = super::super::read_filter::filter_it(db_rows.into_iter(), limit, skip);

let db_rows = if let Some(db_rows) = db_rows {
let db_rows = if db_rows.len() > 0 {
let mut result = HashMap::new();
for db_row in db_rows {
result.insert(db_row.get_partition_key().to_string(), vec![db_row]);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
use std::sync::Arc;

use my_no_sql_server_core::DbTableWrapper;
use rust_extensions::lazy::LazyVec;

use crate::{
app::AppContext,
Expand All @@ -28,21 +27,21 @@ pub async fn get_single_partition_multiple_rows(

let db_partition = db_partition.unwrap();

let mut db_rows = LazyVec::with_capacity(row_keys.len());
let mut db_rows = Vec::with_capacity(row_keys.len());

for row_key in &row_keys {
let db_row = db_partition.get_row(row_key);

if let Some(db_row) = db_row {
db_rows.add(db_row);
db_rows.push(db_row);
}
}

return Ok(ReadOperationResult::compile_array_or_empty_from_partition(
app,
db_table_wrapper,
partition_key,
db_rows.get_result(),
db_rows,
update_statistics,
)
.await);
Expand Down

0 comments on commit 101db61

Please sign in to comment.