From 4b58b52bafa58d0b04963c49c6831a933d09b3d1 Mon Sep 17 00:00:00 2001 From: Evgeny Fomin Date: Fri, 17 Jan 2025 15:43:12 +0100 Subject: [PATCH 1/5] wip --- costs/src/context.rs | 16 +- grovedb-version/src/lib.rs | 38 +- .../src/version/grovedb_versions.rs | 3 + grovedb-version/src/version/v1.rs | 3 + grovedb-version/src/version/v2.rs | 3 + .../estimated_costs/average_case_costs.rs | 8 +- .../batch/estimated_costs/worst_case_costs.rs | 6 +- .../batch/just_in_time_reference_update.rs | 14 +- grovedb/src/batch/mod.rs | 461 ++++-------- grovedb/src/debugger.rs | 20 +- grovedb/src/element/delete.rs | 2 - grovedb/src/element/exists.rs | 4 +- grovedb/src/element/get.rs | 61 +- grovedb/src/element/helpers.rs | 2 +- grovedb/src/element/insert.rs | 8 +- grovedb/src/element/query.rs | 314 ++++---- grovedb/src/element/serialize.rs | 2 +- .../src/estimated_costs/average_case_costs.rs | 26 +- .../src/estimated_costs/worst_case_costs.rs | 19 +- grovedb/src/lib.rs | 499 ++----------- grovedb/src/merk_cache.rs | 271 +++++++ grovedb/src/operations/auxiliary.rs | 145 ++-- grovedb/src/operations/delete/average_case.rs | 14 +- .../src/operations/delete/delete_up_tree.rs | 17 +- grovedb/src/operations/delete/mod.rs | 484 ++++-------- grovedb/src/operations/delete/worst_case.rs | 10 +- grovedb/src/operations/get/average_case.rs | 2 +- grovedb/src/operations/get/mod.rs | 140 +--- grovedb/src/operations/get/query.rs | 18 +- grovedb/src/operations/get/worst_case.rs | 2 +- grovedb/src/operations/insert/mod.rs | 220 +----- grovedb/src/operations/is_empty_tree.rs | 31 +- grovedb/src/operations/proof/generate.rs | 12 +- grovedb/src/operations/proof/verify.rs | 3 +- grovedb/src/query/mod.rs | 2 +- grovedb/src/reference_path.rs | 705 +++++++++++++++++- grovedb/src/replication.rs | 124 +-- grovedb/src/tests/count_sum_tree_tests.rs | 35 +- grovedb/src/tests/count_tree_tests.rs | 50 +- grovedb/src/tests/mod.rs | 46 +- grovedb/src/tests/sum_tree_tests.rs | 103 ++- grovedb/src/tests/tree_hashes_tests.rs | 21 +- grovedb/src/util.rs | 494 +----------- grovedb/src/util/compat.rs | 131 ++++ grovedb/src/visualize.rs | 64 +- merk/src/merk/meta.rs | 111 +++ merk/src/merk/mod.rs | 69 +- merk/src/merk/open.rs | 25 +- merk/src/merk/restore.rs | 14 +- merk/src/merk/tree_type.rs | 78 ++ merk/src/proofs/tree.rs | 22 +- merk/src/test_utils/mod.rs | 10 +- merk/src/test_utils/temp_merk.rs | 64 +- merk/src/tree/encoding.rs | 2 +- merk/src/tree/mod.rs | 10 +- merk/src/tree/ops.rs | 6 +- merk/src/tree/walk/mod.rs | 8 +- path/Cargo.toml | 1 + path/src/subtree_path.rs | 168 +++-- path/src/subtree_path_builder.rs | 111 ++- path/src/util/compact_bytes.rs | 46 +- path/src/util/cow_like.rs | 2 +- storage/src/rocksdb_storage.rs | 2 +- storage/src/rocksdb_storage/storage.rs | 57 +- .../src/rocksdb_storage/storage_context.rs | 2 - .../storage_context/context_no_tx.rs | 286 ------- storage/src/rocksdb_storage/tests.rs | 62 +- storage/src/storage.rs | 30 +- 68 files changed, 2904 insertions(+), 2935 deletions(-) create mode 100644 grovedb/src/merk_cache.rs create mode 100644 grovedb/src/util/compat.rs create mode 100644 merk/src/merk/meta.rs create mode 100644 merk/src/merk/tree_type.rs delete mode 100644 storage/src/rocksdb_storage/storage_context/context_no_tx.rs diff --git a/costs/src/context.rs b/costs/src/context.rs index d69cb054e..d661e3a1d 100644 --- a/costs/src/context.rs +++ b/costs/src/context.rs @@ -116,6 +116,15 @@ impl CostResult { pub fn cost_as_result(self) -> Result { self.value.map(|_| self.cost) } + + /// Call the provided function on success without altering result or cost. + pub fn for_ok(self, f: impl FnOnce(&T)) -> CostResult { + if let Ok(x) = &self.value { + f(x) + } + + self + } } impl CostResult, E> { @@ -170,8 +179,9 @@ impl CostsExt for T {} /// 1. Early termination on error; /// 2. Because of 1, `Result` is removed from the equation; /// 3. `CostContext` is removed too because it is added to external cost -/// accumulator; 4. Early termination uses external cost accumulator so previous -/// costs won't be lost. +/// accumulator; +/// 4. Early termination uses external cost accumulator so previous costs won't +/// be lost. #[macro_export] macro_rules! cost_return_on_error { ( &mut $cost:ident, $($body:tt)+ ) => { @@ -193,7 +203,7 @@ macro_rules! cost_return_on_error { /// so no costs will be added except previously accumulated. #[macro_export] macro_rules! cost_return_on_error_no_add { - ( &$cost:ident, $($body:tt)+ ) => { + ( $cost:ident, $($body:tt)+ ) => { { use $crate::CostsExt; let result = { $($body)+ }; diff --git a/grovedb-version/src/lib.rs b/grovedb-version/src/lib.rs index f66019d4a..59fb034c9 100644 --- a/grovedb-version/src/lib.rs +++ b/grovedb-version/src/lib.rs @@ -1,4 +1,4 @@ -use crate::version::GroveVersion; +use version::GroveVersion; pub mod error; pub mod version; @@ -8,13 +8,15 @@ macro_rules! check_grovedb_v0_with_cost { ($method:expr, $version:expr) => {{ const EXPECTED_VERSION: u16 = 0; if $version != EXPECTED_VERSION { - return Err(GroveVersionError::UnknownVersionMismatch { - method: $method.to_string(), - known_versions: vec![EXPECTED_VERSION], - received: $version, - } - .into()) - .wrap_with_cost(OperationCost::default()); + return grovedb_costs::CostsExt::wrap_with_cost( + Err($crate::error::GroveVersionError::UnknownVersionMismatch { + method: $method.to_string(), + known_versions: vec![EXPECTED_VERSION], + received: $version, + } + .into()), + Default::default(), + ); } }}; } @@ -24,7 +26,7 @@ macro_rules! check_grovedb_v0 { ($method:expr, $version:expr) => {{ const EXPECTED_VERSION: u16 = 0; if $version != EXPECTED_VERSION { - return Err(GroveVersionError::UnknownVersionMismatch { + return Err($crate::error::GroveVersionError::UnknownVersionMismatch { method: $method.to_string(), known_versions: vec![EXPECTED_VERSION], received: $version, @@ -56,13 +58,15 @@ macro_rules! check_merk_v0_with_cost { ($method:expr, $version:expr) => {{ const EXPECTED_VERSION: u16 = 0; if $version != EXPECTED_VERSION { - return Err(GroveVersionError::UnknownVersionMismatch { - method: $method.to_string(), - known_versions: vec![EXPECTED_VERSION], - received: $version, - } - .into()) - .wrap_with_cost(OperationCost::default()); + return grovedb_costs::CostsExt::wrap_with_cost( + Err($crate::error::GroveVersionError::UnknownVersionMismatch { + method: $method.to_string(), + known_versions: vec![EXPECTED_VERSION], + received: $version, + } + .into()), + Default::default(), + ); } }}; } @@ -72,7 +76,7 @@ macro_rules! check_merk_v0 { ($method:expr, $version:expr) => {{ const EXPECTED_VERSION: u16 = 0; if $version != EXPECTED_VERSION { - return Err(GroveVersionError::UnknownVersionMismatch { + return Err($crate::error::GroveVersionError::UnknownVersionMismatch { method: $method.to_string(), known_versions: vec![EXPECTED_VERSION], received: $version, diff --git a/grovedb-version/src/version/grovedb_versions.rs b/grovedb-version/src/version/grovedb_versions.rs index 598fa1789..de6e3d422 100644 --- a/grovedb-version/src/version/grovedb_versions.rs +++ b/grovedb-version/src/version/grovedb_versions.rs @@ -48,6 +48,7 @@ pub struct GroveDBOperationsGetVersions { pub get: FeatureVersion, pub get_caching_optional: FeatureVersion, pub follow_reference: FeatureVersion, + pub follow_reference_once: FeatureVersion, pub get_raw: FeatureVersion, pub get_raw_caching_optional: FeatureVersion, pub get_raw_optional: FeatureVersion, @@ -190,6 +191,7 @@ pub struct GroveDBElementMethodVersions { pub get_optional_from_storage: FeatureVersion, pub get_with_absolute_refs: FeatureVersion, pub get_value_hash: FeatureVersion, + pub get_with_value_hash: FeatureVersion, pub get_specialized_cost: FeatureVersion, pub value_defined_cost: FeatureVersion, pub value_defined_cost_for_serialized_value: FeatureVersion, @@ -203,6 +205,7 @@ pub struct GroveDBElementMethodVersions { pub insert_if_changed_value_into_batch_operations: FeatureVersion, pub insert_reference: FeatureVersion, pub insert_reference_into_batch_operations: FeatureVersion, + pub insert_reference_if_changed_value: FeatureVersion, pub insert_subtree: FeatureVersion, pub insert_subtree_into_batch_operations: FeatureVersion, pub get_query: FeatureVersion, diff --git a/grovedb-version/src/version/v1.rs b/grovedb-version/src/version/v1.rs index 5bf58180a..0234315ab 100644 --- a/grovedb-version/src/version/v1.rs +++ b/grovedb-version/src/version/v1.rs @@ -65,6 +65,8 @@ pub const GROVE_V1: GroveVersion = GroveVersion { serialize: 0, serialized_size: 0, deserialize: 0, + get_with_value_hash: 0, + insert_reference_if_changed_value: 0, }, operations: GroveDBOperationsVersions { get: GroveDBOperationsGetVersions { @@ -86,6 +88,7 @@ pub const GROVE_V1: GroveVersion = GroveVersion { worst_case_for_get_raw: 0, worst_case_for_get: 0, is_empty_tree: 0, + follow_reference_once: 0, }, insert: GroveDBOperationsInsertVersions { insert: 0, diff --git a/grovedb-version/src/version/v2.rs b/grovedb-version/src/version/v2.rs index 3591ba1af..6f357c6b8 100644 --- a/grovedb-version/src/version/v2.rs +++ b/grovedb-version/src/version/v2.rs @@ -65,6 +65,8 @@ pub const GROVE_V2: GroveVersion = GroveVersion { serialize: 0, serialized_size: 0, deserialize: 0, + get_with_value_hash: 0, + insert_reference_if_changed_value: 0, }, operations: GroveDBOperationsVersions { get: GroveDBOperationsGetVersions { @@ -86,6 +88,7 @@ pub const GROVE_V2: GroveVersion = GroveVersion { worst_case_for_get_raw: 0, worst_case_for_get: 0, is_empty_tree: 0, + follow_reference_once: 0, }, insert: GroveDBOperationsInsertVersions { insert: 0, diff --git a/grovedb/src/batch/estimated_costs/average_case_costs.rs b/grovedb/src/batch/estimated_costs/average_case_costs.rs index 310c58639..8e2bbbb26 100644 --- a/grovedb/src/batch/estimated_costs/average_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/average_case_costs.rs @@ -192,7 +192,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { let mut cost = OperationCost::default(); let layer_element_estimates = cost_return_on_error_no_add!( - &cost, + cost, self.paths.get(path).ok_or_else(|| { let paths = self .paths @@ -214,7 +214,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { // Then we have to get the tree if self.cached_merks.get(path).is_none() { let layer_info = cost_return_on_error_no_add!( - &cost, + cost, self.paths.get(path).ok_or_else(|| { let paths = self .paths @@ -229,7 +229,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { }) ); cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_average_case_get_merk_at_path::( &mut cost, path, @@ -268,7 +268,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { // Then we have to get the tree if !self.cached_merks.contains_key(&base_path) { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_average_case_get_merk_at_path::( &mut cost, &base_path, diff --git a/grovedb/src/batch/estimated_costs/worst_case_costs.rs b/grovedb/src/batch/estimated_costs/worst_case_costs.rs index b48109ade..82dd7bbe2 100644 --- a/grovedb/src/batch/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/worst_case_costs.rs @@ -189,7 +189,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { let mut cost = OperationCost::default(); let worst_case_layer_element_estimates = cost_return_on_error_no_add!( - &cost, + cost, self.paths .get(path) .ok_or_else(|| Error::PathNotFoundInCacheForEstimatedCosts(format!( @@ -201,7 +201,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { // Then we have to get the tree if !self.cached_merks.contains(path) { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_worst_case_get_merk_at_path::( &mut cost, path, @@ -244,7 +244,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { // Then we have to get the tree if !self.cached_merks.contains(&base_path) { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_worst_case_get_merk_at_path::( &mut cost, &base_path, diff --git a/grovedb/src/batch/just_in_time_reference_update.rs b/grovedb/src/batch/just_in_time_reference_update.rs index 06081eb25..21a06aab8 100644 --- a/grovedb/src/batch/just_in_time_reference_update.rs +++ b/grovedb/src/batch/just_in_time_reference_update.rs @@ -54,7 +54,7 @@ where updated_new_element_with_old_flags.set_flags(maybe_old_flags.clone()); // There are no storage flags, we can just hash new element let new_serialized_bytes = cost_return_on_error_no_add!( - &cost, + cost, updated_new_element_with_old_flags.serialize(grove_version) ); let val_hash = value_hash(&new_serialized_bytes).unwrap_add_cost(&mut cost); @@ -94,7 +94,7 @@ where updated_new_element_with_old_flags.set_flags(maybe_old_flags.clone()); let serialized_with_old_flags = cost_return_on_error_no_add!( - &cost, + cost, updated_new_element_with_old_flags.serialize(grove_version) ); KV::node_value_byte_cost_size( @@ -120,7 +120,7 @@ where if let Some(old_element_flags) = maybe_old_flags.as_mut() { if let BasicStorageRemoval(removed_bytes) = storage_costs.removed_bytes { let (_, value_removed_bytes) = cost_return_on_error_no_add!( - &cost, + cost, split_removal_bytes(old_element_flags, 0, removed_bytes) ); storage_costs.removed_bytes = value_removed_bytes; @@ -130,7 +130,7 @@ where let mut new_element_cloned = original_new_element.clone(); let changed = cost_return_on_error_no_add!( - &cost, + cost, (flags_update)( &storage_costs, maybe_old_flags.clone(), @@ -150,10 +150,8 @@ where return Ok(val_hash).wrap_with_cost(cost); } else { // There are no storage flags, we can just hash new element - let new_serialized_bytes = cost_return_on_error_no_add!( - &cost, - new_element_cloned.serialize(grove_version) - ); + let new_serialized_bytes = + cost_return_on_error_no_add!(cost, new_element_cloned.serialize(grove_version)); new_storage_cost = KV::node_value_byte_cost_size( key.len() as u32, diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index 3d35525c6..33d9bbef3 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -57,12 +57,9 @@ use grovedb_merk::{ }; use grovedb_path::SubtreePath; use grovedb_storage::{ - rocksdb_storage::{PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext}, - Storage, StorageBatch, StorageContext, -}; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, + rocksdb_storage::PrefixedRocksDbTransactionContext, Storage, StorageBatch, StorageContext, }; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; use grovedb_visualize::{Drawer, Visualize}; use integer_encoding::VarInt; use itertools::Itertools; @@ -82,6 +79,7 @@ use crate::{ reference_path::{ path_from_reference_path_type, path_from_reference_qualified_path_type, ReferencePathType, }, + util::TxRef, Element, ElementFlags, Error, GroveDb, Transaction, TransactionArg, }; @@ -808,7 +806,7 @@ where Ok(referenced_element_value_hash).wrap_with_cost(cost) } else if let Some(referenced_path) = intermediate_reference_info { let path = cost_return_on_error_no_add!( - &cost, + cost, path_from_reference_qualified_path_type(referenced_path.clone(), qualified_path) ); self.follow_reference_get_value_hash( @@ -901,7 +899,7 @@ where if let Some(referenced_element) = referenced_element { let element = cost_return_on_error_no_add!( - &cost, + cost, Element::deserialize(referenced_element.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) @@ -1001,13 +999,13 @@ where match element { Element::Item(..) | Element::SumItem(..) => { let serialized = - cost_return_on_error_no_add!(&cost, element.serialize(grove_version)); + cost_return_on_error_no_add!(cost, element.serialize(grove_version)); let val_hash = value_hash(&serialized).unwrap_add_cost(&mut cost); Ok(val_hash).wrap_with_cost(cost) } Element::Reference(path, ..) => { let path = cost_return_on_error_no_add!( - &cost, + cost, path_from_reference_qualified_path_type(path, qualified_path) ); self.follow_reference_get_value_hash( @@ -1079,7 +1077,7 @@ where match element { Element::Item(..) | Element::SumItem(..) => { let serialized = cost_return_on_error_no_add!( - &cost, + cost, element.serialize(grove_version) ); if element.get_flags().is_none() { @@ -1126,7 +1124,7 @@ where } Element::Reference(path, ..) => { let path = cost_return_on_error_no_add!( - &cost, + cost, path_from_reference_qualified_path_type( path.clone(), qualified_path @@ -1154,13 +1152,13 @@ where GroveOp::InsertOnly { element } => match element { Element::Item(..) | Element::SumItem(..) => { let serialized = - cost_return_on_error_no_add!(&cost, element.serialize(grove_version)); + cost_return_on_error_no_add!(cost, element.serialize(grove_version)); let val_hash = value_hash(&serialized).unwrap_add_cost(&mut cost); Ok(val_hash).wrap_with_cost(cost) } Element::Reference(path, ..) => { let path = cost_return_on_error_no_add!( - &cost, + cost, path_from_reference_qualified_path_type(path.clone(), qualified_path) ); self.follow_reference_get_value_hash( @@ -1440,7 +1438,7 @@ where ) ); cost_return_on_error_no_add!( - &cost, + cost, Element::deserialize(value.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) @@ -1576,7 +1574,7 @@ where } }; let merk_feature_type = - cost_return_on_error_no_add!(&cost, element.get_feature_type(in_tree_type)); + cost_return_on_error_no_add!(cost, element.get_feature_type(in_tree_type)); cost_return_on_error!( &mut cost, @@ -1785,7 +1783,7 @@ impl GroveDb { if batch_apply_options.base_root_storage_is_free { // the base root is free let mut update_root_cost = cost_return_on_error_no_add!( - &cost, + cost, merk_tree_cache .update_base_merk_root_key(calculated_root_key, grove_version) .cost_as_result() @@ -2282,74 +2280,6 @@ impl GroveDb { } } - /// Opens merk at path with given storage batch context. Returns CostResult. - pub fn open_batch_merk_at_path<'a, B: AsRef<[u8]>>( - &'a self, - storage_batch: &'a StorageBatch, - path: SubtreePath, - new_merk: bool, - grove_version: &GroveVersion, - ) -> CostResult, Error> { - check_grovedb_v0_with_cost!( - "open_batch_merk_at_path", - grove_version - .grovedb_versions - .apply_batch - .open_batch_merk_at_path - ); - let mut local_cost = OperationCost::default(); - let storage = self - .db - .get_storage_context(path.clone(), Some(storage_batch)) - .unwrap_add_cost(&mut local_cost); - - if new_merk { - let merk_type = if path.is_root() { - MerkType::BaseMerk - } else { - MerkType::LayeredMerk - }; - Ok(Merk::open_empty(storage, merk_type, TreeType::NormalTree)) - .wrap_with_cost(local_cost) - } else if let Some((base_path, last)) = path.derive_parent() { - let parent_storage = self - .db - .get_storage_context(base_path, Some(storage_batch)) - .unwrap_add_cost(&mut local_cost); - let element = cost_return_on_error!( - &mut local_cost, - Element::get_from_storage(&parent_storage, last, grove_version) - ); - if let Some((root_key, tree_type)) = element.root_key_and_tree_type_owned() { - Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .add_cost(local_cost) - } else { - Err(Error::CorruptedData( - "cannot open a subtree as parent exists but is not a tree".to_owned(), - )) - .wrap_with_cost(local_cost) - } - } else { - Merk::open_base( - storage, - TreeType::NormalTree, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| Error::CorruptedData("cannot open a subtree".to_owned())) - .add_cost(local_cost) - } - } - /// Applies batch of operations on GroveDB pub fn apply_batch_with_element_flags_update( &self, @@ -2379,6 +2309,7 @@ impl GroveDb { .apply_batch_with_element_flags_update ); let mut cost = OperationCost::default(); + let tx = TxRef::new(&self.db, transaction); if ops.is_empty() { return Ok(()).wrap_with_cost(cost); @@ -2416,93 +2347,50 @@ impl GroveDb { // 5. Remove operation from the tree, repeat until there are operations to do; // 6. Add root leaves save operation to the batch // 7. Apply storage_cost batch - if let Some(tx) = transaction { - cost_return_on_error!( - &mut cost, - self.apply_body( - ops, - batch_apply_options, - update_element_flags_function, - split_removal_bytes_function, - |path, new_merk| { - self.open_batch_transactional_merk_at_path( - &storage_batch, - path.into(), - tx, - new_merk, - grove_version, - ) - }, - grove_version - ) - ); - - // TODO: compute batch costs - cost_return_on_error!( - &mut cost, - self.db - .commit_multi_context_batch(storage_batch, Some(tx)) - .map_err(|e| e.into()) - ); - - // Keep this commented for easy debugging in the future. - // let issues = self - // .visualize_verify_grovedb(Some(tx), true, - // &Default::default()) .unwrap(); - // if issues.len() > 0 { - // println!( - // "tx_issues: {}", - // issues - // .iter() - // .map(|(hash, (a, b, c))| format!("{}: {} {} {}", - // hash, a, b, c)) .collect::>() - // .join(" | ") - // ); - // } - } else { - cost_return_on_error!( - &mut cost, - self.apply_body( - ops, - batch_apply_options, - update_element_flags_function, - split_removal_bytes_function, - |path, new_merk| { - self.open_batch_merk_at_path( - &storage_batch, - path.into(), - new_merk, - grove_version, - ) - }, - grove_version - ) - ); + cost_return_on_error!( + &mut cost, + self.apply_body( + ops, + batch_apply_options, + update_element_flags_function, + split_removal_bytes_function, + |path, new_merk| { + self.open_batch_transactional_merk_at_path( + &storage_batch, + path.into(), + tx.as_ref(), + new_merk, + grove_version, + ) + }, + grove_version + ) + ); - // TODO: compute batch costs - cost_return_on_error!( - &mut cost, - self.db - .commit_multi_context_batch(storage_batch, None) - .map_err(|e| e.into()) - ); + // TODO: compute batch costs + cost_return_on_error!( + &mut cost, + self.db + .commit_multi_context_batch(storage_batch, Some(tx.as_ref())) + .map_err(|e| e.into()) + ); - // Keep this commented for easy debugging in the future. - // let issues = self - // .visualize_verify_grovedb(None, true, &Default::default()) - // .unwrap(); - // if issues.len() > 0 { - // println!( - // "non_tx_issues: {}", - // issues - // .iter() - // .map(|(hash, (a, b, c))| format!("{}: {} {} {}", - // hash, a, b, c)) .collect::>() - // .join(" | ") - // ); - // } - } - Ok(()).wrap_with_cost(cost) + // Keep this commented for easy debugging in the future. + // let issues = self + // .visualize_verify_grovedb(Some(tx), true, + // &Default::default()) .unwrap(); + // if issues.len() > 0 { + // println!( + // "tx_issues: {}", + // issues + // .iter() + // .map(|(hash, (a, b, c))| format!("{}: {} {} {}", + // hash, a, b, c)) .collect::>() + // .join(" | ") + // ); + // } + + tx.commit_local().wrap_with_cost(cost) } /// Applies a partial batch of operations on GroveDB @@ -2541,6 +2429,7 @@ impl GroveDb { .apply_partial_batch_with_element_flags_update ); let mut cost = OperationCost::default(); + let tx = TxRef::new(&self.db, transaction); if ops.is_empty() { return Ok(()).wrap_with_cost(cost); @@ -2582,177 +2471,93 @@ impl GroveDb { // 5. Remove operation from the tree, repeat until there are operations to do; // 6. Add root leaves save operation to the batch // 7. Apply storage_cost batch - if let Some(tx) = transaction { - let left_over_operations = cost_return_on_error!( - &mut cost, - self.apply_body( - ops, - Some(batch_apply_options.clone()), - &mut update_element_flags_function, - &mut split_removal_bytes_function, - |path, new_merk| { - self.open_batch_transactional_merk_at_path( - &storage_batch, - path.into(), - tx, - new_merk, - grove_version, - ) - }, - grove_version - ) - ); - // if we paused at the root height, the left over operations would be to replace - // a lot of leaf nodes in the root tree - - // let's build the write batch - let (mut write_batch, mut pending_costs) = cost_return_on_error!( - &mut cost, - self.db - .build_write_batch(storage_batch) - .map_err(|e| e.into()) - ); - - let total_current_costs = cost.clone().add(pending_costs.clone()); - - // todo: estimate root costs - - // at this point we need to send the pending costs back - // we will get GroveDB a new set of GroveDBOps - - let new_operations = cost_return_on_error_no_add!( - &cost, - add_on_operations(&total_current_costs, &left_over_operations) - ); - - // we are trying to finalize - batch_apply_options.batch_pause_height = None; - - let continue_storage_batch = StorageBatch::new(); - - cost_return_on_error!( - &mut cost, - self.continue_partial_apply_body( - left_over_operations, - new_operations, - Some(batch_apply_options), - update_element_flags_function, - split_removal_bytes_function, - |path, new_merk| { - self.open_batch_transactional_merk_at_path( - &continue_storage_batch, - path.into(), - tx, - new_merk, - grove_version, - ) - }, - grove_version - ) - ); - - // let's build the write batch - let continued_pending_costs = cost_return_on_error!( - &mut cost, - self.db - .continue_write_batch(&mut write_batch, continue_storage_batch) - .map_err(|e| e.into()) - ); - - pending_costs.add_assign(continued_pending_costs); + let left_over_operations = cost_return_on_error!( + &mut cost, + self.apply_body( + ops, + Some(batch_apply_options.clone()), + &mut update_element_flags_function, + &mut split_removal_bytes_function, + |path, new_merk| { + self.open_batch_transactional_merk_at_path( + &storage_batch, + path.into(), + tx.as_ref(), + new_merk, + grove_version, + ) + }, + grove_version + ) + ); + // if we paused at the root height, the left over operations would be to replace + // a lot of leaf nodes in the root tree - // TODO: compute batch costs - cost_return_on_error!( - &mut cost, - self.db - .commit_db_write_batch(write_batch, pending_costs, Some(tx)) - .map_err(|e| e.into()) - ); - } else { - let left_over_operations = cost_return_on_error!( - &mut cost, - self.apply_body( - ops, - Some(batch_apply_options.clone()), - &mut update_element_flags_function, - &mut split_removal_bytes_function, - |path, new_merk| { - self.open_batch_merk_at_path( - &storage_batch, - path.into(), - new_merk, - grove_version, - ) - }, - grove_version - ) - ); + // let's build the write batch + let (mut write_batch, mut pending_costs) = cost_return_on_error!( + &mut cost, + self.db + .build_write_batch(storage_batch) + .map_err(|e| e.into()) + ); - // if we paused at the root height, the left over operations would be to replace - // a lot of leaf nodes in the root tree + let total_current_costs = cost.clone().add(pending_costs.clone()); - // let's build the write batch - let (mut write_batch, mut pending_costs) = cost_return_on_error!( - &mut cost, - self.db - .build_write_batch(storage_batch) - .map_err(|e| e.into()) - ); + // todo: estimate root costs - let total_current_costs = cost.clone().add(pending_costs.clone()); + // at this point we need to send the pending costs back + // we will get GroveDB a new set of GroveDBOps - // at this point we need to send the pending costs back - // we will get GroveDB a new set of GroveDBOps + let new_operations = cost_return_on_error_no_add!( + cost, + add_on_operations(&total_current_costs, &left_over_operations) + ); - let new_operations = cost_return_on_error_no_add!( - &cost, - add_on_operations(&total_current_costs, &left_over_operations) - ); + // we are trying to finalize + batch_apply_options.batch_pause_height = None; - // we are trying to finalize - batch_apply_options.batch_pause_height = None; + let continue_storage_batch = StorageBatch::new(); - let continue_storage_batch = StorageBatch::new(); + cost_return_on_error!( + &mut cost, + self.continue_partial_apply_body( + left_over_operations, + new_operations, + Some(batch_apply_options), + update_element_flags_function, + split_removal_bytes_function, + |path, new_merk| { + self.open_batch_transactional_merk_at_path( + &continue_storage_batch, + path.into(), + tx.as_ref(), + new_merk, + grove_version, + ) + }, + grove_version + ) + ); - cost_return_on_error!( - &mut cost, - self.continue_partial_apply_body( - left_over_operations, - new_operations, - Some(batch_apply_options), - update_element_flags_function, - split_removal_bytes_function, - |path, new_merk| { - self.open_batch_merk_at_path( - &continue_storage_batch, - path.into(), - new_merk, - grove_version, - ) - }, - grove_version - ) - ); + // let's build the write batch + let continued_pending_costs = cost_return_on_error!( + &mut cost, + self.db + .continue_write_batch(&mut write_batch, continue_storage_batch) + .map_err(|e| e.into()) + ); - // let's build the write batch - let continued_pending_costs = cost_return_on_error!( - &mut cost, - self.db - .continue_write_batch(&mut write_batch, continue_storage_batch) - .map_err(|e| e.into()) - ); + pending_costs.add_assign(continued_pending_costs); - pending_costs.add_assign(continued_pending_costs); + // TODO: compute batch costs + cost_return_on_error!( + &mut cost, + self.db + .commit_db_write_batch(write_batch, pending_costs, Some(tx.as_ref())) + .map_err(|e| e.into()) + ); - // TODO: compute batch costs - cost_return_on_error!( - &mut cost, - self.db - .commit_db_write_batch(write_batch, pending_costs, None) - .map_err(|e| e.into()) - ); - } - Ok(()).wrap_with_cost(cost) + tx.commit_local().wrap_with_cost(cost) } #[cfg(feature = "estimated_costs")] diff --git a/grovedb/src/debugger.rs b/grovedb/src/debugger.rs index 696a3a6ac..50dbcce07 100644 --- a/grovedb/src/debugger.rs +++ b/grovedb/src/debugger.rs @@ -227,9 +227,15 @@ async fn fetch_node( }): Json>, ) -> Result>, AppError> { let db = state.get_snapshot(session_id).await?; + let transaction = db.start_transaction(); let merk = db - .open_non_transactional_merk_at_path(path.as_slice().into(), None, GroveVersion::latest()) + .open_transactional_merk_at_path( + path.as_slice().into(), + &transaction, + None, + GroveVersion::latest(), + ) .unwrap()?; let node = merk.get_node_dbg(&key)?; @@ -249,9 +255,15 @@ async fn fetch_root_node( }): Json>, ) -> Result>, AppError> { let db = state.get_snapshot(session_id).await?; + let transaction = db.start_transaction(); let merk = db - .open_non_transactional_merk_at_path(SubtreePath::empty(), None, GroveVersion::latest()) + .open_transactional_merk_at_path( + SubtreePath::empty(), + &transaction, + None, + GroveVersion::latest(), + ) .unwrap()?; let node = merk.get_root_node_dbg()?; @@ -312,6 +324,7 @@ fn query_result_to_grovedbg( query_result: QueryResultElements, ) -> Result, crate::Error> { let mut result = Vec::new(); + let transaction = db.start_transaction(); let mut last_merk: Option<(Vec>, grovedb_merk::Merk<_>)> = None; @@ -322,8 +335,9 @@ fn query_result_to_grovedbg( _ => { last_merk = Some(( path.clone(), - db.open_non_transactional_merk_at_path( + db.open_transactional_merk_at_path( path.as_slice().into(), + &transaction, None, GroveVersion::latest(), ) diff --git a/grovedb/src/element/delete.rs b/grovedb/src/element/delete.rs index 17095d72f..9243eb398 100644 --- a/grovedb/src/element/delete.rs +++ b/grovedb/src/element/delete.rs @@ -14,8 +14,6 @@ use grovedb_storage::StorageContext; #[cfg(feature = "minimal")] use grovedb_version::check_grovedb_v0_with_cost; #[cfg(feature = "minimal")] -use grovedb_version::error::GroveVersionError; -#[cfg(feature = "minimal")] use grovedb_version::version::GroveVersion; #[cfg(feature = "minimal")] diff --git a/grovedb/src/element/exists.rs b/grovedb/src/element/exists.rs index 63dcfe4bd..b57d5c5c7 100644 --- a/grovedb/src/element/exists.rs +++ b/grovedb/src/element/exists.rs @@ -4,9 +4,7 @@ use grovedb_costs::{CostResult, CostsExt, OperationCost}; use grovedb_merk::Merk; use grovedb_storage::StorageContext; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; use crate::{Element, Error}; diff --git a/grovedb/src/element/get.rs b/grovedb/src/element/get.rs index 40868c77a..7b062bc71 100644 --- a/grovedb/src/element/get.rs +++ b/grovedb/src/element/get.rs @@ -84,7 +84,7 @@ impl Element { .map_err(|e| Error::CorruptedData(e.to_string())) ); let element = cost_return_on_error_no_add!( - &cost, + cost, value_opt .map(|value| { Self::deserialize(value.as_slice(), grove_version).map_err(|_| { @@ -164,7 +164,7 @@ impl Element { .map_err(|e| Error::CorruptedData(e.to_string())) ); let maybe_tree_inner: Option = cost_return_on_error_no_add!( - &cost, + cost, node_value_opt .map(|node_value| { Decode::decode(node_value.as_slice()) @@ -175,7 +175,7 @@ impl Element { let value = maybe_tree_inner.map(|tree_inner| tree_inner.value_as_owned()); let element = cost_return_on_error_no_add!( - &cost, + cost, value .as_ref() .map(|value| { @@ -248,7 +248,7 @@ impl Element { .map_err(|e| Error::CorruptedData(e.to_string())) ); let maybe_tree_inner: Option = cost_return_on_error_no_add!( - &cost, + cost, node_value_opt .map(|node_value| { Decode::decode(node_value.as_slice()) @@ -264,7 +264,7 @@ impl Element { }; let node_type = tree_feature_type.node_type(); let element = cost_return_on_error_no_add!( - &cost, + cost, Self::deserialize(value.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) @@ -340,7 +340,7 @@ impl Element { ); let absolute_element = cost_return_on_error_no_add!( - &cost, + cost, element.convert_if_reference_to_absolute_reference(path, Some(key.as_ref())) ); @@ -374,6 +374,47 @@ impl Element { Ok(value_hash).wrap_with_cost(cost) } + + #[cfg(feature = "minimal")] + /// Get an element and its value hash from Merk under a key + pub fn get_with_value_hash<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( + merk: &Merk, + key: K, + allow_cache: bool, + grove_version: &GroveVersion, + ) -> CostResult<(Element, Hash), Error> { + check_grovedb_v0_with_cost!( + "get_with_value_hash", + grove_version.grovedb_versions.element.get_with_value_hash + ); + let mut cost = OperationCost::default(); + + let Some((value, value_hash)) = cost_return_on_error!( + &mut cost, + merk.get_value_and_value_hash( + key.as_ref(), + allow_cache, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .map_err(|e| Error::CorruptedData(e.to_string())) + ) else { + return Err(Error::PathKeyNotFound(format!( + "get: key \"{}\" not found in Merk that has a root key [{}] and is of type {}", + hex::encode(key), + merk.root_key() + .map(hex::encode) + .unwrap_or("None".to_string()), + merk.merk_type + ))) + .wrap_with_cost(cost); + }; + + Self::deserialize(value.as_slice(), grove_version) + .map_err(|_| Error::CorruptedData(String::from("unable to deserialize element"))) + .map(|e| (e, value_hash)) + .wrap_with_cost(cost) + } } #[cfg(feature = "minimal")] @@ -390,8 +431,10 @@ mod tests { let grove_version = GroveVersion::latest(); let storage = TempStorage::new(); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let ctx = storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) + .get_transactional_storage_context(SubtreePath::empty(), Some(&batch), &transaction) .unwrap(); let mut merk = Merk::open_base( ctx, @@ -411,12 +454,12 @@ mod tests { .expect("expected successful insertion 2"); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .unwrap(); let ctx = storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(); let mut merk = Merk::open_base( ctx, diff --git a/grovedb/src/element/helpers.rs b/grovedb/src/element/helpers.rs index d5cf03219..2c062935f 100644 --- a/grovedb/src/element/helpers.rs +++ b/grovedb/src/element/helpers.rs @@ -20,7 +20,7 @@ use grovedb_merk::{ }, }; #[cfg(feature = "minimal")] -use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use grovedb_version::{check_grovedb_v0, version::GroveVersion}; #[cfg(feature = "minimal")] use integer_encoding::VarInt; diff --git a/grovedb/src/element/insert.rs b/grovedb/src/element/insert.rs index 942a6fd59..85dad9f95 100644 --- a/grovedb/src/element/insert.rs +++ b/grovedb/src/element/insert.rs @@ -7,9 +7,7 @@ use grovedb_costs::{ }; use grovedb_merk::{BatchEntry, Error as MerkError, Merk, MerkOptions, Op, TreeFeatureType}; use grovedb_storage::StorageContext; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; use integer_encoding::VarInt; use crate::{Element, Element::SumItem, Error, Hash}; @@ -397,10 +395,10 @@ impl Element { let cost = OperationCost::default(); let merk_feature_type = - cost_return_on_error_no_add!(&cost, self.get_feature_type(merk.tree_type)); + cost_return_on_error_no_add!(cost, self.get_feature_type(merk.tree_type)); let tree_cost = - cost_return_on_error_no_add!(&cost, self.get_specialized_cost(grove_version)); + cost_return_on_error_no_add!(cost, self.get_specialized_cost(grove_version)); let cost = tree_cost + self.get_flags().as_ref().map_or(0, |flags| { diff --git a/grovedb/src/element/query.rs b/grovedb/src/element/query.rs index 68e570562..26a5143ff 100644 --- a/grovedb/src/element/query.rs +++ b/grovedb/src/element/query.rs @@ -15,15 +15,11 @@ use grovedb_merk::proofs::query::SubqueryBranch; #[cfg(feature = "minimal")] use grovedb_merk::proofs::Query; #[cfg(feature = "minimal")] -use grovedb_merk::tree_type::TreeType; -#[cfg(feature = "minimal")] use grovedb_path::SubtreePath; #[cfg(feature = "minimal")] use grovedb_storage::{rocksdb_storage::RocksDbStorage, RawIterator, StorageContext}; #[cfg(feature = "minimal")] -use grovedb_version::{ - check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0, check_grovedb_v0_with_cost, version::GroveVersion}; #[cfg(feature = "minimal")] use crate::operations::proof::util::hex_to_ascii; @@ -41,7 +37,6 @@ use crate::{ QueryPathKeyElementTrioResultType, }, }, - util::{merk_optional_tx, merk_optional_tx_internal_error, storage_context_optional_tx}, Error, PathQuery, TransactionArg, }; #[cfg(feature = "minimal")] @@ -453,6 +448,8 @@ impl Element { args: PathQueryPushArgs, grove_version: &GroveVersion, ) -> CostResult<(), Error> { + use crate::util::{compat, TxRef}; + check_grovedb_v0_with_cost!( "path_query_push", grove_version.grovedb_versions.element.path_query_push @@ -477,6 +474,9 @@ impl Element { limit, offset, } = args; + + let tx = TxRef::new(storage, transaction); + let QueryOptions { allow_get_raw, allow_cache, @@ -486,7 +486,7 @@ impl Element { if element.is_any_tree() { let mut path_vec = path.to_vec(); let key = cost_return_on_error_no_add!( - &cost, + cost, key.ok_or(Error::MissingParameter( "the key must be provided when using a subquery path", )) @@ -534,89 +534,62 @@ impl Element { path_vec.extend(subquery_path_front_keys.iter().map(|k| k.as_slice())); let subtree_path: SubtreePath<_> = path_vec.as_slice().into(); + let subtree = cost_return_on_error!( + &mut cost, + compat::merk_optional_tx( + storage, + subtree_path, + tx.as_ref(), + None, + grove_version + ) + ); match result_type { QueryElementResultType => { - merk_optional_tx!( - &mut cost, - storage, - subtree_path, - None, - transaction, - subtree, - grove_version, - { - results.push(QueryResultElement::ElementResultItem( - cost_return_on_error!( - &mut cost, - Element::get_with_absolute_refs( - &subtree, - path_vec.as_slice(), - subquery_path_last_key.as_slice(), - allow_cache, - grove_version, - ) - ), - )); - } - ); + results.push(QueryResultElement::ElementResultItem( + cost_return_on_error!( + &mut cost, + Element::get_with_absolute_refs( + &subtree, + path_vec.as_slice(), + subquery_path_last_key.as_slice(), + allow_cache, + grove_version, + ) + ), + )); } QueryKeyElementPairResultType => { - merk_optional_tx!( - &mut cost, - storage, - subtree_path, - None, - transaction, - subtree, - grove_version, - { - results.push(QueryResultElement::KeyElementPairResultItem( - ( - subquery_path_last_key.to_vec(), - cost_return_on_error!( - &mut cost, - Element::get_with_absolute_refs( - &subtree, - path_vec.as_slice(), - subquery_path_last_key.as_slice(), - allow_cache, - grove_version, - ) - ), - ), - )); - } - ); + results.push(QueryResultElement::KeyElementPairResultItem(( + subquery_path_last_key.to_vec(), + cost_return_on_error!( + &mut cost, + Element::get_with_absolute_refs( + &subtree, + path_vec.as_slice(), + subquery_path_last_key.as_slice(), + allow_cache, + grove_version, + ) + ), + ))); } QueryPathKeyElementTrioResultType => { - merk_optional_tx!( - &mut cost, - storage, - subtree_path, - None, - transaction, - subtree, - grove_version, - { - results.push( - QueryResultElement::PathKeyElementTrioResultItem(( - path_vec.iter().map(|p| p.to_vec()).collect(), - subquery_path_last_key.to_vec(), - cost_return_on_error!( - &mut cost, - Element::get_with_absolute_refs( - &subtree, - path_vec.as_slice(), - subquery_path_last_key.as_slice(), - allow_cache, - grove_version, - ) - ), - )), - ); - } - ); + results.push(QueryResultElement::PathKeyElementTrioResultItem(( + path_vec.iter().map(|p| p.to_vec()).collect(), + subquery_path_last_key.to_vec(), + cost_return_on_error!( + &mut cost, + Element::get_with_absolute_refs( + &subtree, + path_vec.as_slice(), + subquery_path_last_key.as_slice(), + allow_cache, + grove_version, + ) + ), + ))); } } } else { @@ -634,7 +607,7 @@ impl Element { } } else if allow_get_raw { cost_return_on_error_no_add!( - &cost, + cost, Element::basic_push( PathQueryPushArgs { storage, @@ -664,7 +637,7 @@ impl Element { } } else { cost_return_on_error_no_add!( - &cost, + cost, Element::basic_push( PathQueryPushArgs { storage, @@ -745,7 +718,12 @@ impl Element { add_element_function: fn(PathQueryPushArgs, &GroveVersion) -> CostResult<(), Error>, grove_version: &GroveVersion, ) -> CostResult<(), Error> { - use crate::error::GroveDbErrorExt; + use grovedb_storage::Storage; + + use crate::{ + error::GroveDbErrorExt, + util::{compat, TxRef}, + }; check_grovedb_v0_with_cost!( "query_item", @@ -753,26 +731,36 @@ impl Element { ); let mut cost = OperationCost::default(); + let tx = TxRef::new(storage, transaction); let subtree_path: SubtreePath<_> = path.into(); if !item.is_range() { // this is a query on a key if let QueryItem::Key(key) = item { - let element_res = merk_optional_tx_internal_error!( - &mut cost, + let subtree_res = compat::merk_optional_tx( storage, subtree_path, + tx.as_ref(), None, - transaction, - subtree, grove_version, - { + ); + + if subtree_res.value().is_err() + && !matches!(subtree_res.value(), Err(Error::PathParentLayerNotFound(..))) + { + // simulating old macro's behavior by letting this particular kind of error to + // pass and to short circuit with the rest + return subtree_res.map_ok(|_| ()); + } + + let element_res = subtree_res + .flat_map_ok(|subtree| { Element::get(&subtree, key, query_options.allow_cache, grove_version) .add_context(format!("path is {}", path_as_slices_hex_to_ascii(path))) - .unwrap_add_cost(&mut cost) - } - ); + }) + .unwrap_add_cost(&mut cost); + match element_res { Ok(element) => { let (subquery_path, subquery) = @@ -829,74 +817,74 @@ impl Element { } } else { // this is a query on a range - storage_context_optional_tx!(storage, subtree_path, None, transaction, ctx, { - let ctx = ctx.unwrap_add_cost(&mut cost); - let mut iter = ctx.raw_iter(); + let ctx = storage + .get_transactional_storage_context(subtree_path, None, tx.as_ref()) + .unwrap_add_cost(&mut cost); - item.seek_for_iter(&mut iter, sized_query.query.left_to_right) - .unwrap_add_cost(&mut cost); + let mut iter = ctx.raw_iter(); + + item.seek_for_iter(&mut iter, sized_query.query.left_to_right) + .unwrap_add_cost(&mut cost); - while item - .iter_is_valid_for_type(&iter, *limit, sized_query.query.left_to_right) + while item + .iter_is_valid_for_type(&iter, *limit, sized_query.query.left_to_right) + .unwrap_add_cost(&mut cost) + { + let element = cost_return_on_error_no_add!( + cost, + raw_decode( + iter.value() + .unwrap_add_cost(&mut cost) + .expect("if key exists then value should too"), + grove_version + ) + ); + let key = iter + .key() .unwrap_add_cost(&mut cost) - { - let element = cost_return_on_error_no_add!( - &cost, - raw_decode( - iter.value() - .unwrap_add_cost(&mut cost) - .expect("if key exists then value should too"), - grove_version - ) - ); - let key = iter - .key() - .unwrap_add_cost(&mut cost) - .expect("key should exist"); - let (subquery_path, subquery) = - Self::subquery_paths_and_value_for_sized_query(sized_query, key); - let result_with_cost = add_element_function( - PathQueryPushArgs { - storage, - transaction, - key: Some(key), - element, - path, - subquery_path, - subquery, - left_to_right: sized_query.query.left_to_right, - query_options, - result_type, - results, - limit, - offset, - }, - grove_version, - ); - let result = result_with_cost.unwrap_add_cost(&mut cost); - match result { - Ok(x) => x, - Err(e) => { - if !query_options.error_if_intermediate_path_tree_not_present { - match e { - Error::PathKeyNotFound(_) - | Error::PathParentLayerNotFound(_) => (), - _ => return Err(e).wrap_with_cost(cost), - } - } else { - return Err(e).wrap_with_cost(cost); + .expect("key should exist"); + let (subquery_path, subquery) = + Self::subquery_paths_and_value_for_sized_query(sized_query, key); + let result_with_cost = add_element_function( + PathQueryPushArgs { + storage, + transaction, + key: Some(key), + element, + path, + subquery_path, + subquery, + left_to_right: sized_query.query.left_to_right, + query_options, + result_type, + results, + limit, + offset, + }, + grove_version, + ); + let result = result_with_cost.unwrap_add_cost(&mut cost); + match result { + Ok(x) => x, + Err(e) => { + if !query_options.error_if_intermediate_path_tree_not_present { + match e { + Error::PathKeyNotFound(_) | Error::PathParentLayerNotFound(_) => (), + _ => return Err(e).wrap_with_cost(cost), } + } else { + return Err(e).wrap_with_cost(cost); } } - if sized_query.query.left_to_right { - iter.next().unwrap_add_cost(&mut cost); - } else { - iter.prev().unwrap_add_cost(&mut cost); - } - cost.seek_count += 1; } - Ok(()) - }) + if sized_query.query.left_to_right { + iter.next().unwrap_add_cost(&mut cost); + } else { + iter.prev().unwrap_add_cost(&mut cost); + } + cost.seek_count += 1; + } + Ok(()) } .wrap_with_cost(cost) } @@ -1204,9 +1192,12 @@ mod tests { let batch = StorageBatch::new(); let storage = &db.db; + let transaction = db.start_transaction(); + let mut merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1235,6 +1226,8 @@ mod tests { .unwrap() .expect("expected successful batch commit"); + transaction.commit().unwrap(); + // Test range inclusive query let mut query = Query::new(); query.insert_range(b"a".to_vec()..b"d".to_vec()); @@ -1316,9 +1309,12 @@ mod tests { let batch = StorageBatch::new(); let storage = &db.db; + let transaction = db.start_transaction(); + let mut merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1347,6 +1343,8 @@ mod tests { .unwrap() .expect("expected successful batch commit"); + transaction.commit().unwrap(); + // Test range inclusive query let mut query = Query::new_with_direction(true); query.insert_range_inclusive(b"a".to_vec()..=b"d".to_vec()); @@ -1718,7 +1716,7 @@ impl ElementsIterator { .unwrap_add_cost(&mut cost) .zip(self.raw_iter.value().unwrap_add_cost(&mut cost)) { - let element = cost_return_on_error_no_add!(&cost, raw_decode(value, grove_version)); + let element = cost_return_on_error_no_add!(cost, raw_decode(value, grove_version)); let key_vec = key.to_vec(); self.raw_iter.next().unwrap_add_cost(&mut cost); Some((key_vec, element)) diff --git a/grovedb/src/element/serialize.rs b/grovedb/src/element/serialize.rs index d0974cc93..d65a0670b 100644 --- a/grovedb/src/element/serialize.rs +++ b/grovedb/src/element/serialize.rs @@ -2,7 +2,7 @@ //! Implements serialization functions in Element use bincode::config; -use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use grovedb_version::{check_grovedb_v0, version::GroveVersion}; #[cfg(any(feature = "minimal", feature = "verify"))] use crate::{Element, Error}; diff --git a/grovedb/src/estimated_costs/average_case_costs.rs b/grovedb/src/estimated_costs/average_case_costs.rs index e779d9b83..fc0da61a0 100644 --- a/grovedb/src/estimated_costs/average_case_costs.rs +++ b/grovedb/src/estimated_costs/average_case_costs.rs @@ -118,7 +118,7 @@ impl GroveDb { let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_size = cost_return_on_error_no_add!( - &cost, + cost, estimated_layer_information .estimated_layer_sizes .layered_flags_size() @@ -154,7 +154,7 @@ impl GroveDb { let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_size = cost_return_on_error_no_add!( - &cost, + cost, estimated_layer_information .estimated_layer_sizes .layered_flags_size() @@ -235,7 +235,7 @@ impl GroveDb { let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_size = cost_return_on_error_no_add!( - &cost, + cost, estimated_layer_information .estimated_layer_sizes .layered_flags_size() @@ -288,7 +288,7 @@ impl GroveDb { add_cost_case_merk_insert( &mut cost, key_len, - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32, in_tree_type, ) } @@ -344,7 +344,7 @@ impl GroveDb { let sum_item_cost_size = if value.is_sum_item() { SUM_ITEM_COST_SIZE } else { - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32 + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32 }; let value_len = sum_item_cost_size + flags_len; add_cost_case_merk_replace_same_size(&mut cost, key_len, value_len, in_tree_type) @@ -352,7 +352,7 @@ impl GroveDb { _ => add_cost_case_merk_replace_same_size( &mut cost, key_len, - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32, in_tree_type, ), }; @@ -395,8 +395,7 @@ impl GroveDb { }); // Items need to be always the same serialized size for this to work let item_cost_size = - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) - as u32; + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32; let value_len = item_cost_size + flags_len; add_cost_case_merk_patch( &mut cost, @@ -439,7 +438,7 @@ impl GroveDb { let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let value_size = cost_return_on_error_no_add!( - &cost, + cost, estimated_layer_information .estimated_layer_sizes .value_with_feature_and_flags_size(grove_version) @@ -638,10 +637,11 @@ mod test { let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); let mut merk = Merk::open_base( storage - .get_storage_context(EMPTY_PATH, Some(&batch)) + .get_transactional_storage_context(EMPTY_PATH, Some(&batch), &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -656,13 +656,15 @@ mod test { // this consumes the batch so storage contexts and merks will be dropped storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .unwrap(); // Reopen merk: this time, only root node is loaded to memory let merk = Merk::open_base( - storage.get_storage_context(EMPTY_PATH, None).unwrap(), + storage + .get_transactional_storage_context(EMPTY_PATH, None, &transaction) + .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, diff --git a/grovedb/src/estimated_costs/worst_case_costs.rs b/grovedb/src/estimated_costs/worst_case_costs.rs index c6f889f97..c5287c14c 100644 --- a/grovedb/src/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/estimated_costs/worst_case_costs.rs @@ -19,9 +19,7 @@ use grovedb_merk::{ HASH_LENGTH, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; -use grovedb_version::{ - check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0, check_grovedb_v0_with_cost, version::GroveVersion}; use integer_encoding::VarInt; use crate::{ @@ -209,7 +207,7 @@ impl GroveDb { _ => add_cost_case_merk_insert( &mut cost, key_len, - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32, in_parent_tree_type, ), }; @@ -277,7 +275,7 @@ impl GroveDb { _ => add_cost_case_merk_replace( &mut cost, key_len, - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32, in_parent_tree_type, ), }; @@ -319,8 +317,7 @@ impl GroveDb { }); // Items need to be always the same serialized size for this to work let sum_item_cost_size = - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) - as u32; + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32; let value_len = sum_item_cost_size + flags_len; add_cost_case_merk_patch( &mut cost, @@ -522,7 +519,9 @@ mod test { // Open a merk and insert 10 elements. let storage = TempStorage::new(); let batch = StorageBatch::new(); - let mut merk = empty_path_merk(&*storage, &batch, grove_version); + let transaction = storage.start_transaction(); + + let mut merk = empty_path_merk(&*storage, &transaction, &batch, grove_version); let merk_batch = make_batch_seq(1..10); merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None, grove_version) @@ -531,12 +530,12 @@ mod test { // this consumes the batch so storage contexts and merks will be dropped storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .unwrap(); // Reopen merk: this time, only root node is loaded to memory - let merk = empty_path_merk_read_only(&*storage, grove_version); + let merk = empty_path_merk_read_only(&*storage, &transaction, grove_version); // To simulate worst case, we need to pick a node that: // 1. Is not in memory diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 7b4e9c003..31c3dcfab 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -135,6 +135,8 @@ pub mod element; pub mod error; #[cfg(feature = "estimated_costs")] mod estimated_costs; +#[cfg(feature = "minimal")] +mod merk_cache; #[cfg(any(feature = "minimal", feature = "verify"))] pub mod operations; #[cfg(any(feature = "minimal", feature = "verify"))] @@ -197,10 +199,7 @@ use grovedb_storage::rocksdb_storage::PrefixedRocksDbImmediateStorageContext; #[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; #[cfg(feature = "minimal")] -use grovedb_storage::{ - rocksdb_storage::{PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext}, - StorageBatch, -}; +use grovedb_storage::{rocksdb_storage::PrefixedRocksDbTransactionContext, StorageBatch}; #[cfg(feature = "minimal")] use grovedb_storage::{Storage, StorageContext}; #[cfg(feature = "minimal")] @@ -213,6 +212,7 @@ pub use query::{PathQuery, SizedQuery}; use reference_path::path_from_reference_path_type; #[cfg(feature = "grovedbg")] use tokio::net::ToSocketAddrs; +use util::{compat, TxRef}; #[cfg(feature = "minimal")] use crate::element::helpers::raw_decode; @@ -221,8 +221,6 @@ pub use crate::error::Error; #[cfg(feature = "minimal")] use crate::operations::proof::util::hex_to_ascii; #[cfg(feature = "minimal")] -use crate::util::{root_merk_optional_tx, storage_context_optional_tx}; -#[cfg(feature = "minimal")] use crate::Error::MerkError; #[cfg(feature = "minimal")] @@ -298,58 +296,32 @@ impl GroveDb { where B: AsRef<[u8]> + 'b, { - let mut cost = OperationCost::default(); - - let storage = self - .db - .get_transactional_storage_context(path.clone(), batch, tx) - .unwrap_add_cost(&mut cost); - if let Some((parent_path, parent_key)) = path.derive_parent() { - let parent_storage = self - .db - .get_transactional_storage_context(parent_path.clone(), batch, tx) - .unwrap_add_cost(&mut cost); - let element = cost_return_on_error!( - &mut cost, - Element::get_from_storage(&parent_storage, parent_key, grove_version).map_err( - |e| { - Error::InvalidParentLayerPath(format!( - "could not get key {} for parent {:?} of subtree: {}", - hex::encode(parent_key), - DebugByteVectors(parent_path.to_vec()), - e - )) - } - ) - ); - if let Some((root_key, tree_type)) = element.root_key_and_tree_type_owned() { - Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .add_cost(cost) - } else { - Err(Error::CorruptedPath( - "cannot open a subtree as parent exists but is not a tree".to_string(), + struct Compat; + + impl compat::OpenMerkErrorsCompat for Compat { + fn parent_key_not_found<'b, B: AsRef<[u8]>>( + e: Error, + parent_path: SubtreePath<'b, B>, + parent_key: &[u8], + ) -> Error { + Error::InvalidParentLayerPath(format!( + "could not get key {} for parent {:?} of subtree: {}", + hex::encode(parent_key), + DebugByteVectors(parent_path.to_vec()), + e )) - .wrap_with_cost(cost) } - } else { - Merk::open_base( - storage, - TreeType::NormalTree, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) - .add_cost(cost) + + fn open_base_error() -> Error { + Error::CorruptedData("cannot open a the root subtree".to_owned()) + } + + fn parent_must_be_tree() -> Error { + Error::CorruptedData("cannot open a subtree with given root key".to_owned()) + } } + + compat::open_merk::<_, Compat>(&self.db, path, tx, batch, grove_version) } fn open_transactional_merk_by_prefix<'db>( @@ -461,110 +433,6 @@ impl GroveDb { } } - /// Opens the non-transactional Merk at the given path. Returns CostResult. - fn open_non_transactional_merk_at_path<'db, 'b, B>( - &'db self, - path: SubtreePath<'b, B>, - batch: Option<&'db StorageBatch>, - grove_version: &GroveVersion, - ) -> CostResult>, Error> - where - B: AsRef<[u8]> + 'b, - { - let mut cost = OperationCost::default(); - - let storage = self - .db - .get_storage_context(path.clone(), batch) - .unwrap_add_cost(&mut cost); - - if let Some((parent_path, parent_key)) = path.derive_parent() { - let parent_storage = self - .db - .get_storage_context(parent_path.clone(), batch) - .unwrap_add_cost(&mut cost); - let element = cost_return_on_error!( - &mut cost, - Element::get_from_storage(&parent_storage, parent_key, grove_version).map_err( - |e| { - Error::InvalidParentLayerPath(format!( - "could not get key {} for parent {:?} of subtree: {}", - hex::encode(parent_key), - DebugByteVectors(parent_path.to_vec()), - e - )) - } - ) - ); - if let Some((root_key, tree_type)) = element.root_key_and_tree_type_owned() { - Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .add_cost(cost) - } else { - Err(Error::CorruptedPath( - "cannot open a subtree as parent exists but is not a tree".to_string(), - )) - .wrap_with_cost(cost) - } - } else { - Merk::open_base( - storage, - TreeType::NormalTree, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) - .add_cost(cost) - } - } - - fn open_non_transactional_merk_by_prefix<'db>( - &'db self, - prefix: SubtreePrefix, - root_key: Option>, - tree_type: TreeType, - batch: Option<&'db StorageBatch>, - grove_version: &GroveVersion, - ) -> CostResult>, Error> { - let mut cost = OperationCost::default(); - let storage = self - .db - .get_storage_context_by_subtree_prefix(prefix, batch) - .unwrap_add_cost(&mut cost); - if root_key.is_some() { - Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| { - Error::CorruptedData( - "cannot open a subtree by prefix with given root key".to_owned(), - ) - }) - .add_cost(cost) - } else { - Merk::open_base( - storage, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| Error::CorruptedData("cannot open a root subtree by prefix".to_owned())) - .add_cost(cost) - } - } - /// Creates a checkpoint pub fn create_checkpoint>(&self, path: P) -> Result<(), Error> { self.db.create_checkpoint(path).map_err(|e| e.into()) @@ -576,27 +444,21 @@ impl GroveDb { &self, transaction: TransactionArg, grove_version: &GroveVersion, - ) -> CostResult, Error> { + ) -> CostResult>, Error> { let mut cost = OperationCost { ..Default::default() }; - root_merk_optional_tx!( - &mut cost, - self.db, - None, - transaction, - subtree, - grove_version, - { - let root_key = subtree.root_key().unwrap(); - Ok(root_key).wrap_with_cost(cost) - } - ) + let tx = TxRef::new(&self.db, transaction); + + let root_merk = + cost_return_on_error!(&mut cost, self.open_root_merk(tx.as_ref(), grove_version)); + + let root_key = root_merk.root_key(); + Ok(root_key).wrap_with_cost(cost) } /// Returns root hash of GroveDb. - /// Will be `None` if GroveDb is empty. pub fn root_hash( &self, transaction: TransactionArg, @@ -606,18 +468,34 @@ impl GroveDb { ..Default::default() }; - root_merk_optional_tx!( - &mut cost, - self.db, - None, - transaction, - subtree, - grove_version, - { - let root_hash = subtree.root_hash().unwrap_add_cost(&mut cost); - Ok(root_hash).wrap_with_cost(cost) - } - ) + let tx = TxRef::new(&self.db, transaction); + + let root_merk = + cost_return_on_error!(&mut cost, self.open_root_merk(tx.as_ref(), grove_version)); + + root_merk.root_hash().map(Ok).add_cost(cost) + } + + fn open_root_merk<'tx, 'db>( + &'db self, + tx: &'tx Transaction<'db>, + grove_version: &GroveVersion, + ) -> CostResult>, Error> { + self.db + .get_transactional_storage_context(SubtreePath::empty(), None, tx) + .flat_map(|storage_ctx| { + grovedb_merk::Merk::open_base( + storage_ctx, + TreeType::NormalTree, + Some(Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map(|merk_res| { + merk_res.map_err(|_| { + crate::Error::CorruptedData("cannot open a subtree".to_owned()) + }) + }) + }) } /// Method to propagate updated subtree key changes one level up inside a @@ -633,7 +511,7 @@ impl GroveDb { let mut cost = OperationCost::default(); let mut child_tree = cost_return_on_error_no_add!( - &cost, + cost, merk_cache.remove(path).ok_or(Error::CorruptedCodeExecution( "Merk Cache should always contain the last path", )) @@ -688,7 +566,7 @@ impl GroveDb { let mut cost = OperationCost::default(); let mut child_tree = cost_return_on_error_no_add!( - &cost, + cost, merk_cache .remove(&path) .ok_or(Error::CorruptedCodeExecution( @@ -731,59 +609,6 @@ impl GroveDb { Ok(()).wrap_with_cost(cost) } - /// Method to propagate updated subtree key changes one level up - fn propagate_changes_without_transaction<'b, B: AsRef<[u8]>>( - &self, - mut merk_cache: HashMap, Merk>, - path: SubtreePath<'b, B>, - batch: &StorageBatch, - grove_version: &GroveVersion, - ) -> CostResult<(), Error> { - let mut cost = OperationCost::default(); - - let mut child_tree = cost_return_on_error_no_add!( - &cost, - merk_cache - .remove(&path) - .ok_or(Error::CorruptedCodeExecution( - "Merk Cache should always contain the last path", - )) - ); - - let mut current_path: SubtreePath = path; - - while let Some((parent_path, parent_key)) = current_path.derive_parent() { - let mut parent_tree: Merk = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path( - parent_path.clone(), - Some(batch), - grove_version - ) - ); - let (root_hash, root_key, sum) = cost_return_on_error!( - &mut cost, - child_tree - .root_hash_key_and_aggregate_data() - .map_err(Error::MerkError) - ); - cost_return_on_error!( - &mut cost, - Self::update_tree_item_preserve_flag( - &mut parent_tree, - parent_key, - root_key, - root_hash, - sum, - grove_version, - ) - ); - child_tree = parent_tree; - current_path = parent_path; - } - Ok(()).wrap_with_cost(cost) - } - /// Updates a tree item and preserves flags. Returns CostResult. pub(crate) fn update_tree_item_preserve_flag<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( parent_tree: &mut Merk, @@ -1147,186 +972,20 @@ impl GroveDb { allow_cache: bool, grove_version: &GroveVersion, ) -> Result { - if let Some(transaction) = transaction { - let root_merk = self - .open_transactional_merk_at_path( - SubtreePath::empty(), - transaction, - None, - grove_version, - ) - .unwrap()?; - self.verify_merk_and_submerks_in_transaction( - root_merk, - &SubtreePath::empty(), - None, - transaction, - verify_references, - allow_cache, - grove_version, - ) - } else { - let root_merk = self - .open_non_transactional_merk_at_path(SubtreePath::empty(), None, grove_version) - .unwrap()?; - self.verify_merk_and_submerks( - root_merk, - &SubtreePath::empty(), - None, - verify_references, - allow_cache, - grove_version, - ) - } - } - - /// Verifies that the root hash of the given merk and all submerks match - /// those of the merk and submerks at the given path. Returns any issues. - fn verify_merk_and_submerks<'db, B: AsRef<[u8]>, S: StorageContext<'db>>( - &'db self, - merk: Merk, - path: &SubtreePath, - batch: Option<&'db StorageBatch>, - verify_references: bool, - allow_cache: bool, - grove_version: &GroveVersion, - ) -> Result { - let mut all_query = Query::new(); - all_query.insert_all(); - - let mut issues = HashMap::new(); - let mut element_iterator = KVIterator::new(merk.storage.raw_iter(), &all_query).unwrap(); - - while let Some((key, element_value)) = element_iterator.next_kv().unwrap() { - let element = raw_decode(&element_value, grove_version)?; - match element { - Element::SumTree(..) - | Element::Tree(..) - | Element::BigSumTree(..) - | Element::CountTree(..) - | Element::CountSumTree(..) => { - let (kv_value, element_value_hash) = merk - .get_value_and_value_hash( - &key, - allow_cache, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version, - ) - .unwrap() - .map_err(MerkError)? - .ok_or(Error::CorruptedData(format!( - "expected merk to contain value at key {} for {}", - hex_to_ascii(&key), - element.type_str() - )))?; - let new_path = path.derive_owned_with_child(key); - let new_path_ref = SubtreePath::from(&new_path); - - let inner_merk = self - .open_non_transactional_merk_at_path( - new_path_ref.clone(), - batch, - grove_version, - ) - .unwrap()?; - let root_hash = inner_merk.root_hash().unwrap(); - - let actual_value_hash = value_hash(&kv_value).unwrap(); - let combined_value_hash = combine_hash(&actual_value_hash, &root_hash).unwrap(); - - if combined_value_hash != element_value_hash { - issues.insert( - new_path.to_vec(), - (root_hash, combined_value_hash, element_value_hash), - ); - } - issues.extend(self.verify_merk_and_submerks( - inner_merk, - &new_path_ref, - batch, - verify_references, - true, - grove_version, - )?); - } - Element::Item(..) | Element::SumItem(..) => { - let (kv_value, element_value_hash) = merk - .get_value_and_value_hash( - &key, - allow_cache, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version, - ) - .unwrap() - .map_err(MerkError)? - .ok_or(Error::CorruptedData(format!( - "expected merk to contain value at key {} for {}", - hex_to_ascii(&key), - element.type_str() - )))?; - let actual_value_hash = value_hash(&kv_value).unwrap(); - if actual_value_hash != element_value_hash { - issues.insert( - path.derive_owned_with_child(key).to_vec(), - (actual_value_hash, element_value_hash, actual_value_hash), - ); - } - } - Element::Reference(ref reference_path, ..) => { - // Skip this whole check if we don't `verify_references` - if !verify_references { - continue; - } - - // Merk we're checking: - let (kv_value, element_value_hash) = merk - .get_value_and_value_hash( - &key, - allow_cache, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version, - ) - .unwrap() - .map_err(MerkError)? - .ok_or(Error::CorruptedData(format!( - "expected merk to contain value at key {} for reference", - hex_to_ascii(&key) - )))?; - - let referenced_value_hash = { - let full_path = path_from_reference_path_type( - reference_path.clone(), - &path.to_vec(), - Some(&key), - )?; - let item = self - .follow_reference( - (full_path.as_slice()).into(), - allow_cache, - None, - grove_version, - ) - .unwrap()?; - item.value_hash(grove_version).unwrap()? - }; - - // Take the current item (reference) hash and combine it with referenced value's - // hash - - let self_actual_value_hash = value_hash(&kv_value).unwrap(); - let combined_value_hash = - combine_hash(&self_actual_value_hash, &referenced_value_hash).unwrap(); - - if combined_value_hash != element_value_hash { - issues.insert( - path.derive_owned_with_child(key).to_vec(), - (combined_value_hash, element_value_hash, combined_value_hash), - ); - } - } - } - } - Ok(issues) + let tx = TxRef::new(&self.db, transaction); + + let root_merk = self + .open_transactional_merk_at_path(SubtreePath::empty(), tx.as_ref(), None, grove_version) + .unwrap()?; + self.verify_merk_and_submerks_in_transaction( + root_merk, + &SubtreePath::empty(), + None, + tx.as_ref(), + verify_references, + allow_cache, + grove_version, + ) } fn verify_merk_and_submerks_in_transaction<'db, B: AsRef<[u8]>, S: StorageContext<'db>>( diff --git a/grovedb/src/merk_cache.rs b/grovedb/src/merk_cache.rs new file mode 100644 index 000000000..2b737f14c --- /dev/null +++ b/grovedb/src/merk_cache.rs @@ -0,0 +1,271 @@ +//! Module dedicated to keep necessary Merks in memory. + +use std::{ + cell::{Cell, UnsafeCell}, + collections::{btree_map::Entry, BTreeMap}, +}; + +use grovedb_costs::{cost_return_on_error, CostResult, CostsExt}; +use grovedb_merk::Merk; +use grovedb_path::SubtreePathBuilder; +use grovedb_storage::{rocksdb_storage::PrefixedRocksDbTransactionContext, StorageBatch}; +use grovedb_version::version::GroveVersion; + +use crate::{Error, GroveDb, Transaction}; + +type TxMerk<'db> = Merk>; + +/// We store Merk on heap to preserve its location as well as borrow flag +/// alongside. +type CachedMerkEntry<'db> = Box<(Cell, TxMerk<'db>)>; + +/// Structure to keep subtrees open in memory for repeated access. +pub(crate) struct MerkCache<'db, 'b, B: AsRef<[u8]>> { + db: &'db GroveDb, + pub(crate) version: &'db GroveVersion, + batch: Box, + tx: &'db Transaction<'db>, + merks: UnsafeCell, CachedMerkEntry<'db>>>, +} + +impl<'db, 'b, B: AsRef<[u8]>> MerkCache<'db, 'b, B> { + /// Initialize a new `MerkCache` instance + pub(crate) fn new( + db: &'db GroveDb, + tx: &'db Transaction<'db>, + version: &'db GroveVersion, + ) -> Self { + MerkCache { + db, + tx, + version, + merks: Default::default(), + batch: Default::default(), + } + } + + /// Gets a smart pointer to a cached Merk or opens one if needed. + pub(crate) fn get_merk<'c>( + &'c self, + path: SubtreePathBuilder<'b, B>, + ) -> CostResult, Error> { + let mut cost = Default::default(); + + // SAFETY: there are no other references to `merks` memory at the same time. + // Note while it's possible to have direct references to actual Merk trees, + // outside of the scope of this function, this map (`merks`) has + // indirect connection to them through `Box`, thus there are no overlapping + // references, and that is requirement of `UnsafeCell` we have there. + let boxed_flag_merk = match unsafe { + self.merks + .get() + .as_mut() + .expect("`UnsafeCell` is never null") + } + .entry(path) + { + Entry::Vacant(e) => { + let merk = cost_return_on_error!( + &mut cost, + self.db.open_transactional_merk_at_path( + e.key().into(), + self.tx, + // SAFETY: batch is allocated on the heap and we use only shared + // references, so as long as the `Box` allocation + // outlives those references we're safe, + // and it will outlive because Merks are dropped first. + Some(unsafe { + (&*self.batch as *const StorageBatch) + .as_ref() + .expect("`Box` is never null") + }), + self.version + ) + ); + e.insert(Box::new((false.into(), merk))) + } + Entry::Occupied(e) => e.into_mut(), + }; + + let taken_handle_ref: *const Cell = &boxed_flag_merk.0 as *const _; + let merk_ptr: *mut TxMerk<'db> = &mut boxed_flag_merk.1 as *mut _; + + // SAFETY: `MerkHandle` contains two references to the heap allocated memory, + // and we want to be sure that the referenced data will outlive those + // references plus borrowing rules aren't violated (one `&mut` or many + // `&` with no `&mut` at a time). + // + // To make sure changes to the map won't affect existing borrows we have an + // indirection in a form of `Box`, that allows us to move and update + // `MerkCache` with new subtrees and possible reallocations without breaking + // `MerkHandle`'s references. We use `UnsafeCell` to connect lifetimes and check + // in compile time that `MerkHandle`s won't outlive the cache, even though we + // don't hold any references to it, but `&mut` reference would make this borrow + // exclusive for the whole time of `MerkHandle`, so it shall go intially through + // a shared reference. + // + // Borrowing rules are covered using a borrow flag of each Merk: + // 1. Borrow flag's reference points to a heap allocated memory and will remain + // valid. Since the reference is shared and no need to obtain a `&mut` + // reference this part of the memory is covered. + // 2. For the same reason the Merk's pointer can be converted to a reference, + // because the memory behind the `Box` is valid and `MerkHandle` can't + // outlive it since we use lifetime parameters. + // 3. We can get unique reference out of that pointer safely because of + // borrowing flag. + Ok(unsafe { + MerkHandle { + merk: merk_ptr, + taken_handle: taken_handle_ref + .as_ref() + .expect("`Box` contents are never null"), + } + }) + .wrap_with_cost(cost) + } + + /// Consumes `MerkCache` into accumulated batch of uncommited operations + /// with subtrees' root hash propagation done. + pub(crate) fn into_batch(mut self) -> CostResult, Error> { + let mut cost = Default::default(); + cost_return_on_error!(&mut cost, self.propagate_subtrees()); + + // SAFETY: By this time all subtrees are taken and dropped during + // propagation, so there are no more references to the batch and in can be + // safely released into the world. + Ok(self.batch).wrap_with_cost(cost) + } + + fn propagate_subtrees(&mut self) -> CostResult<(), Error> { + let mut cost = Default::default(); + + // This relies on [SubtreePath]'s ordering implementation to put the deepest + // path's first. + while let Some((path, flag_and_merk)) = self.merks.get_mut().pop_first() { + let merk = flag_and_merk.1; + if let Some((parent_path, parent_key)) = path.derive_parent_owned() { + let mut parent_merk = cost_return_on_error!(&mut cost, self.get_merk(parent_path)); + + let (root_hash, root_key, aggregate_data) = cost_return_on_error!( + &mut cost, + merk.root_hash_key_and_aggregate_data() + .map_err(Error::MerkError) + ); + cost_return_on_error!( + &mut cost, + parent_merk.for_merk(|m| GroveDb::update_tree_item_preserve_flag( + m, + parent_key, + root_key, + root_hash, + aggregate_data, + self.version, + )) + ); + } + } + + Ok(()).wrap_with_cost(cost) + } +} + +/// Wrapper over `Merk` tree to manage unqiue borrow dynamically. +#[derive(Clone)] +pub(crate) struct MerkHandle<'db, 'c> { + merk: *mut TxMerk<'db>, + taken_handle: &'c Cell, +} + +impl<'db> MerkHandle<'db, '_> { + pub(crate) fn for_merk(&mut self, f: impl FnOnce(&mut TxMerk<'db>) -> T) -> T { + if self.taken_handle.get() { + panic!("Attempt to have double &mut borrow on Merk"); + } + + self.taken_handle.set(true); + + // SAFETY: here we want to have `&mut` reference to Merk out of a pointer, there + // is a checklist for that: + // 1. Memory is valid, because `MerkHandle` can't outlive `MerkCache` and heap + // allocated Merks stay at their place for the whole `MerkCache` lifetime. + // 2. No other references exist because of `taken_handle` check above. + let result = f(unsafe { self.merk.as_mut().expect("`Box` contents are never null") }); + + self.taken_handle.set(false); + + result + } +} + +#[cfg(test)] +mod tests { + use grovedb_path::SubtreePath; + use grovedb_storage::StorageBatch; + use grovedb_version::version::GroveVersion; + + use super::MerkCache; + use crate::{ + tests::{make_deep_tree, make_test_grovedb, TEST_LEAF}, + Element, + }; + + #[test] + #[should_panic] + fn cant_borrow_twice() { + let version = GroveVersion::latest(); + let db = make_test_grovedb(&version); + let tx = db.start_transaction(); + + let cache = MerkCache::new(&db, &tx, version); + + let mut merk1 = cache + .get_merk(SubtreePath::empty().derive_owned()) + .unwrap() + .unwrap(); + let mut merk2 = cache + .get_merk(SubtreePath::empty().derive_owned()) + .unwrap() + .unwrap(); + + merk1.for_merk(|_m1| { + merk2.for_merk(|_m2| { + // this shouldn't happen + }) + }); + } + + #[test] + fn subtrees_are_propagated() { + let version = GroveVersion::latest(); + let db = make_deep_tree(&version); + let tx = db.start_transaction(); + + let path = SubtreePath::from(&[TEST_LEAF, b"innertree"]); + let item = Element::new_item(b"hello".to_vec()); + + let no_propagation_ops_count = { + let batch = StorageBatch::new(); + + let mut merk = db + .open_transactional_merk_at_path(path.clone(), &tx, Some(&batch), &version) + .unwrap() + .unwrap(); + + item.insert(&mut merk, b"k1", None, &version) + .unwrap() + .unwrap(); + + batch.len() + }; + + let cache = MerkCache::new(&db, &tx, version); + + let mut merk = cache.get_merk(path.derive_owned()).unwrap().unwrap(); + + merk.for_merk(|m| item.insert(m, b"k1", None, &version).unwrap().unwrap()); + + drop(merk); + + assert!(cache.into_batch().unwrap().unwrap().len() > no_propagation_ops_count); + } +} diff --git a/grovedb/src/operations/auxiliary.rs b/grovedb/src/operations/auxiliary.rs index 493e6e747..855960232 100644 --- a/grovedb/src/operations/auxiliary.rs +++ b/grovedb/src/operations/auxiliary.rs @@ -28,22 +28,16 @@ //! Auxiliary operations -#[cfg(feature = "minimal")] use grovedb_costs::{ - cost_return_on_error, cost_return_on_error_no_add, - storage_cost::key_value_cost::KeyValueStorageCost, CostResult, CostsExt, OperationCost, + cost_return_on_error, storage_cost::key_value_cost::KeyValueStorageCost, CostResult, CostsExt, + OperationCost, }; use grovedb_path::SubtreePath; -#[cfg(feature = "minimal")] -use grovedb_storage::StorageContext; -use grovedb_storage::{Storage, StorageBatch}; +use grovedb_storage::{Storage, StorageContext}; use grovedb_version::version::GroveVersion; -use crate::util::storage_context_optional_tx; -#[cfg(feature = "minimal")] -use crate::{util::meta_storage_context_optional_tx, Element, Error, GroveDb, TransactionArg}; +use crate::{util::TxRef, Element, Error, GroveDb, TransactionArg}; -#[cfg(feature = "minimal")] impl GroveDb { /// Put op for aux storage pub fn put_aux>( @@ -54,23 +48,29 @@ impl GroveDb { transaction: TransactionArg, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); - let batch = StorageBatch::new(); - - meta_storage_context_optional_tx!(self.db, Some(&batch), transaction, aux_storage, { - cost_return_on_error_no_add!( - &cost, - aux_storage - .unwrap_add_cost(&mut cost) - .put_aux(key.as_ref(), value, cost_info) - .unwrap_add_cost(&mut cost) - .map_err(|e| e.into()) - ); - }); - - self.db - .commit_multi_context_batch(batch, transaction) - .add_cost(cost) - .map_err(Into::into) + let tx = TxRef::new(&self.db, transaction); + let batch = Default::default(); + + let aux_storage = self + .db + .get_transactional_storage_context(SubtreePath::empty(), Some(&batch), tx.as_ref()) + .unwrap_add_cost(&mut cost); + + cost_return_on_error!( + &mut cost, + aux_storage + .put_aux(key.as_ref(), value, cost_info) + .map_err(Into::into) + ); + + cost_return_on_error!( + &mut cost, + self.db + .commit_multi_context_batch(batch, Some(tx.as_ref())) + .map_err(Into::into) + ); + + tx.commit_local().wrap_with_cost(cost) } /// Delete op for aux storage @@ -81,23 +81,29 @@ impl GroveDb { transaction: TransactionArg, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); - let batch = StorageBatch::new(); - - meta_storage_context_optional_tx!(self.db, Some(&batch), transaction, aux_storage, { - cost_return_on_error_no_add!( - &cost, - aux_storage - .unwrap_add_cost(&mut cost) - .delete_aux(key.as_ref(), cost_info) - .unwrap_add_cost(&mut cost) - .map_err(|e| e.into()) - ); - }); - - self.db - .commit_multi_context_batch(batch, transaction) - .add_cost(cost) - .map_err(Into::into) + let tx = TxRef::new(&self.db, transaction); + let batch = Default::default(); + + let aux_storage = self + .db + .get_transactional_storage_context(SubtreePath::empty(), Some(&batch), tx.as_ref()) + .unwrap_add_cost(&mut cost); + + cost_return_on_error!( + &mut cost, + aux_storage + .delete_aux(key.as_ref(), cost_info) + .map_err(|e| e.into()) + ); + + cost_return_on_error!( + &mut cost, + self.db + .commit_multi_context_batch(batch, Some(tx.as_ref())) + .map_err(Into::into) + ); + + tx.commit_local().wrap_with_cost(cost) } /// Get op for aux storage @@ -107,19 +113,17 @@ impl GroveDb { transaction: TransactionArg, ) -> CostResult>, Error> { let mut cost = OperationCost::default(); + let tx = TxRef::new(&self.db, transaction); + + let aux_storage = self + .db + .get_transactional_storage_context(SubtreePath::empty(), None, tx.as_ref()) + .unwrap_add_cost(&mut cost); - meta_storage_context_optional_tx!(self.db, None, transaction, aux_storage, { - let value = cost_return_on_error_no_add!( - &cost, - aux_storage - .unwrap_add_cost(&mut cost) - .get_aux(key) - .unwrap_add_cost(&mut cost) - .map_err(|e| e.into()) - ); - - Ok(value).wrap_with_cost(cost) - }) + aux_storage + .get_aux(key.as_ref()) + .map_err(|e| e.into()) + .add_cost(cost) } // TODO: dumb traversal should not be tolerated @@ -148,23 +152,26 @@ impl GroveDb { let mut queue: Vec>> = vec![path.to_vec()]; let mut result: Vec>> = queue.clone(); + let tx = TxRef::new(&self.db, transaction); + while let Some(q) = queue.pop() { let subtree_path: SubtreePath> = q.as_slice().into(); // Get the correct subtree with q_ref as path - storage_context_optional_tx!(self.db, subtree_path, None, transaction, storage, { - let storage = storage.unwrap_add_cost(&mut cost); - let mut raw_iter = Element::iterator(storage.raw_iter()).unwrap_add_cost(&mut cost); - while let Some((key, value)) = - cost_return_on_error!(&mut cost, raw_iter.next_element(grove_version)) - { - if value.is_any_tree() { - let mut sub_path = q.clone(); - sub_path.push(key.to_vec()); - queue.push(sub_path.clone()); - result.push(sub_path); - } + let storage = self + .db + .get_transactional_storage_context(subtree_path, None, tx.as_ref()) + .unwrap_add_cost(&mut cost); + let mut raw_iter = Element::iterator(storage.raw_iter()).unwrap_add_cost(&mut cost); + while let Some((key, value)) = + cost_return_on_error!(&mut cost, raw_iter.next_element(grove_version)) + { + if value.is_any_tree() { + let mut sub_path = q.clone(); + sub_path.push(key.to_vec()); + queue.push(sub_path.clone()); + result.push(sub_path); } - }) + } } Ok(result).wrap_with_cost(cost) } diff --git a/grovedb/src/operations/delete/average_case.rs b/grovedb/src/operations/delete/average_case.rs index 6e8b0158d..cab1e1821 100644 --- a/grovedb/src/operations/delete/average_case.rs +++ b/grovedb/src/operations/delete/average_case.rs @@ -12,9 +12,7 @@ use grovedb_merk::{ HASH_LENGTH_U32, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; use intmap::IntMap; use crate::{ @@ -68,11 +66,11 @@ impl GroveDb { estimated_element_size, tree_type, ) = cost_return_on_error_no_add!( - &cost, + cost, if height == path_len - 1 { if let Some(layer_info) = estimated_layer_info.get(height) { let estimated_value_len = cost_return_on_error_no_add!( - &cost, + cost, layer_info .estimated_layer_sizes .value_with_feature_and_flags_size(grove_version) @@ -97,7 +95,7 @@ impl GroveDb { used_path = smaller_path; if let Some(layer_info) = estimated_layer_info.get(height) { let estimated_value_len = cost_return_on_error_no_add!( - &cost, + cost, layer_info .estimated_layer_sizes .subtree_with_feature_and_flags_size(grove_version) @@ -159,7 +157,7 @@ impl GroveDb { if validate { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_average_case_get_merk_at_path::( &mut cost, path, @@ -171,7 +169,7 @@ impl GroveDb { } if check_if_tree { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_average_case_get_raw_cost::( &mut cost, path, diff --git a/grovedb/src/operations/delete/delete_up_tree.rs b/grovedb/src/operations/delete/delete_up_tree.rs index 2b1673745..a3854dc8f 100644 --- a/grovedb/src/operations/delete/delete_up_tree.rs +++ b/grovedb/src/operations/delete/delete_up_tree.rs @@ -7,13 +7,11 @@ use grovedb_costs::{ }; use grovedb_merk::MaybeTree; use grovedb_path::SubtreePath; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; use crate::{ - batch::QualifiedGroveDbOp, operations::delete::DeleteOptions, ElementFlags, Error, GroveDb, - TransactionArg, + batch::QualifiedGroveDbOp, operations::delete::DeleteOptions, util::TxRef, ElementFlags, Error, + GroveDb, TransactionArg, }; #[cfg(feature = "minimal")] @@ -139,7 +137,7 @@ impl GroveDb { ); let ops = cost_return_on_error_no_add!( - &cost, + cost, if let Some(stop_path_height) = options.stop_path_height { maybe_ops.ok_or_else(|| { Error::DeleteUpTreeStopHeightMoreThanInitialPathSize(format!( @@ -223,10 +221,13 @@ impl GroveDb { return Ok(None).wrap_with_cost(cost); } } + + let tx = TxRef::new(&self.db, transaction); + if options.validate_tree_at_path_exists { cost_return_on_error!( &mut cost, - self.check_subtree_exists_path_not_found(path.clone(), transaction, grove_version) + self.check_subtree_exists_path_not_found(path.clone(), tx.as_ref(), grove_version) ); } if let Some(delete_operation_this_level) = cost_return_on_error!( @@ -237,7 +238,7 @@ impl GroveDb { &options.to_delete_options(), is_known_to_be_subtree, current_batch_operations, - transaction, + tx.as_ref(), grove_version, ) ) { diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index 8bf2e0ea5..e5a6c590c 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -25,20 +25,19 @@ use grovedb_merk::{Error as MerkError, Merk, MerkOptions}; use grovedb_path::SubtreePath; #[cfg(feature = "minimal")] use grovedb_storage::{ - rocksdb_storage::{PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext}, - Storage, StorageBatch, StorageContext, -}; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, + rocksdb_storage::PrefixedRocksDbTransactionContext, Storage, StorageBatch, StorageContext, }; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; #[cfg(feature = "minimal")] use crate::{ batch::{GroveOp, QualifiedGroveDbOp}, - util::storage_context_with_parent_optional_tx, Element, ElementFlags, Error, GroveDb, Transaction, TransactionArg, }; -use crate::{raw_decode, util::merk_optional_tx_path_not_empty}; +use crate::{ + raw_decode, + util::{compat, TxRef}, +}; #[cfg(feature = "minimal")] #[derive(Clone)] @@ -119,15 +118,20 @@ impl GroveDb { grove_version.grovedb_versions.operations.delete.delete ); + let tx = TxRef::new(&self.db, transaction); + let options = options.unwrap_or_default(); let batch = StorageBatch::new(); - let collect_costs = self - .delete_internal( + let mut cost = Default::default(); + + cost_return_on_error!( + &mut cost, + self.delete_internal_on_transaction( path.into(), key, &options, - transaction, + tx.as_ref(), &mut |_, removed_key_bytes, removed_value_bytes| { Ok(( BasicStorageRemoval(removed_key_bytes), @@ -137,13 +141,17 @@ impl GroveDb { &batch, grove_version, ) - .map_ok(|_| ()); + .map_ok(|_| ()) + ); - collect_costs.flat_map_ok(|_| { + cost_return_on_error!( + &mut cost, self.db - .commit_multi_context_batch(batch, transaction) + .commit_multi_context_batch(batch, Some(tx.as_ref())) .map_err(Into::into) - }) + ); + + tx.commit_local().wrap_with_cost(cost) } /// Delete all elements in a specified subtree @@ -187,120 +195,52 @@ impl GroveDb { .clear_subtree ); + let tx = TxRef::new(&self.db, transaction); + let subtree_path: SubtreePath = path.into(); let mut cost = OperationCost::default(); let batch = StorageBatch::new(); let options = options.unwrap_or_default(); - if let Some(transaction) = transaction { - let mut merk_to_clear = cost_return_on_error!( - &mut cost, - self.open_transactional_merk_at_path( - subtree_path.clone(), - transaction, - Some(&batch), - grove_version, - ) - ); - - if options.check_for_subtrees { - let mut all_query = Query::new(); - all_query.insert_all(); - - let mut element_iterator = - KVIterator::new(merk_to_clear.storage.raw_iter(), &all_query).unwrap(); - - // delete all nested subtrees - while let Some((key, element_value)) = - element_iterator.next_kv().unwrap_add_cost(&mut cost) - { - let element = raw_decode(&element_value, grove_version).unwrap(); - if element.is_any_tree() { - if options.allow_deleting_subtrees { - cost_return_on_error!( - &mut cost, - self.delete( - subtree_path.clone(), - key.as_slice(), - Some(DeleteOptions { - allow_deleting_non_empty_trees: true, - deleting_non_empty_trees_returns_error: false, - ..Default::default() - }), - Some(transaction), - grove_version, - ) - ); - } else if options.trying_to_clear_with_subtrees_returns_error { - return Err(Error::ClearingTreeWithSubtreesNotAllowed( - "options do not allow to clear this merk tree as it contains \ - subtrees", - )) - .wrap_with_cost(cost); - } else { - return Ok(false).wrap_with_cost(cost); - } - } - } - } - - // delete non subtree values - cost_return_on_error!(&mut cost, merk_to_clear.clear().map_err(Error::MerkError)); - - // propagate changes - let mut merk_cache: HashMap, Merk> = - HashMap::default(); - merk_cache.insert(subtree_path.clone(), merk_to_clear); - cost_return_on_error!( - &mut cost, - self.propagate_changes_with_transaction( - merk_cache, - subtree_path.clone(), - transaction, - &batch, - grove_version, - ) - ); - } else { - let mut merk_to_clear = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path( - subtree_path.clone(), - Some(&batch), - grove_version - ) - ); + let mut merk_to_clear = cost_return_on_error!( + &mut cost, + self.open_transactional_merk_at_path( + subtree_path.clone(), + tx.as_ref(), + Some(&batch), + grove_version, + ) + ); - if options.check_for_subtrees { - let mut all_query = Query::new(); - all_query.insert_all(); + if options.check_for_subtrees { + let mut all_query = Query::new(); + all_query.insert_all(); - let mut element_iterator = - KVIterator::new(merk_to_clear.storage.raw_iter(), &all_query).unwrap(); + let mut element_iterator = + KVIterator::new(merk_to_clear.storage.raw_iter(), &all_query).unwrap(); - // delete all nested subtrees - while let Some((key, element_value)) = - element_iterator.next_kv().unwrap_add_cost(&mut cost) - { - let element = raw_decode(&element_value, grove_version).unwrap(); + // delete all nested subtrees + while let Some((key, element_value)) = + element_iterator.next_kv().unwrap_add_cost(&mut cost) + { + let element = raw_decode(&element_value, grove_version).unwrap(); + if element.is_any_tree() { if options.allow_deleting_subtrees { - if element.is_any_tree() { - cost_return_on_error!( - &mut cost, - self.delete( - subtree_path.clone(), - key.as_slice(), - Some(DeleteOptions { - allow_deleting_non_empty_trees: true, - deleting_non_empty_trees_returns_error: false, - ..Default::default() - }), - None, - grove_version, - ) - ); - } + cost_return_on_error!( + &mut cost, + self.delete( + subtree_path.clone(), + key.as_slice(), + Some(DeleteOptions { + allow_deleting_non_empty_trees: true, + deleting_non_empty_trees_returns_error: false, + ..Default::default() + }), + Some(tx.as_ref()), + grove_version, + ) + ); } else if options.trying_to_clear_with_subtrees_returns_error { return Err(Error::ClearingTreeWithSubtreesNotAllowed( "options do not allow to clear this merk tree as it contains subtrees", @@ -311,33 +251,34 @@ impl GroveDb { } } } + } - // delete non subtree values - cost_return_on_error!(&mut cost, merk_to_clear.clear().map_err(Error::MerkError)); + // delete non subtree values + cost_return_on_error!(&mut cost, merk_to_clear.clear().map_err(Error::MerkError)); - // propagate changes - let mut merk_cache: HashMap, Merk> = - HashMap::default(); - merk_cache.insert(subtree_path.clone(), merk_to_clear); - cost_return_on_error!( - &mut cost, - self.propagate_changes_without_transaction( - merk_cache, - subtree_path.clone(), - &batch, - grove_version, - ) - ); - } + // propagate changes + let mut merk_cache: HashMap, Merk> = + HashMap::default(); + merk_cache.insert(subtree_path.clone(), merk_to_clear); + cost_return_on_error!( + &mut cost, + self.propagate_changes_with_transaction( + merk_cache, + subtree_path.clone(), + tx.as_ref(), + &batch, + grove_version, + ) + ); cost_return_on_error!( &mut cost, self.db - .commit_multi_context_batch(batch, transaction) + .commit_multi_context_batch(batch, Some(tx.as_ref())) .map_err(Into::into) ); - Ok(true).wrap_with_cost(cost) + tx.commit_local().map(|_| true).wrap_with_cost(cost) } /// Delete element with sectional storage function @@ -366,15 +307,20 @@ impl GroveDb { .delete_with_sectional_storage_function ); + let tx = TxRef::new(&self.db, transaction); + let options = options.unwrap_or_default(); let batch = StorageBatch::new(); - let collect_costs = self - .delete_internal( + let mut cost = Default::default(); + + cost_return_on_error!( + &mut cost, + self.delete_internal_on_transaction( path, key, &options, - transaction, + tx.as_ref(), &mut |value, removed_key_bytes, removed_value_bytes| { let mut element = Element::deserialize(value.as_slice(), grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string()))?; @@ -395,13 +341,16 @@ impl GroveDb { &batch, grove_version, ) - .map_ok(|_| ()); + ); - collect_costs.flat_map_ok(|_| { + cost_return_on_error!( + &mut cost, self.db - .commit_multi_context_batch(batch, transaction) + .commit_multi_context_batch(batch, Some(tx.as_ref())) .map_err(Into::into) - }) + ); + + tx.commit_local().wrap_with_cost(cost) } /// Delete if an empty tree @@ -425,28 +374,36 @@ impl GroveDb { .delete_if_empty_tree ); + let mut cost = Default::default(); + let batch = StorageBatch::new(); + let tx = TxRef::new(&self.db, transaction); - let collect_costs = self.delete_if_empty_tree_with_sectional_storage_function( - path.into(), - key, - transaction, - &mut |_, removed_key_bytes, removed_value_bytes| { - Ok(( - BasicStorageRemoval(removed_key_bytes), - BasicStorageRemoval(removed_value_bytes), - )) - }, - &batch, - grove_version, + let result = cost_return_on_error!( + &mut cost, + self.delete_if_empty_tree_with_sectional_storage_function( + path.into(), + key, + tx.as_ref(), + &mut |_, removed_key_bytes, removed_value_bytes| { + Ok(( + BasicStorageRemoval(removed_key_bytes), + BasicStorageRemoval(removed_value_bytes), + )) + }, + &batch, + grove_version, + ) ); - collect_costs.flat_map_ok(|r| { + cost_return_on_error!( + &mut cost, self.db - .commit_multi_context_batch(batch, transaction) + .commit_multi_context_batch(batch, Some(tx.as_ref())) .map_err(Into::into) - .map_ok(|_| r) - }) + ); + + tx.commit_local().map(|_| result).wrap_with_cost(cost) } /// Delete if an empty tree with section storage function @@ -454,7 +411,7 @@ impl GroveDb { &self, path: SubtreePath, key: &[u8], - transaction: TransactionArg, + transaction: &Transaction, split_removal_bytes_function: &mut impl FnMut( &mut ElementFlags, u32, // key removed bytes @@ -481,7 +438,7 @@ impl GroveDb { ..Default::default() }; - self.delete_internal( + self.delete_internal_on_transaction( path, key, &options, @@ -507,14 +464,14 @@ impl GroveDb { } /// Delete operation for delete internal - pub fn delete_operation_for_delete_internal>( + fn delete_operation_for_delete_internal>( &self, path: SubtreePath, key: &[u8], options: &DeleteOptions, is_known_to_be_subtree: Option, current_batch_operations: &[QualifiedGroveDbOp], - transaction: TransactionArg, + transaction: &Transaction, grove_version: &GroveVersion, ) -> CostResult, Error> { check_grovedb_v0_with_cost!( @@ -549,7 +506,7 @@ impl GroveDb { None => { let element = cost_return_on_error!( &mut cost, - self.get_raw(path.clone(), key.as_ref(), transaction, grove_version) + self.get_raw(path.clone(), key.as_ref(), Some(transaction), grove_version) ); element.maybe_tree_type() } @@ -574,21 +531,21 @@ impl GroveDb { _ => None, }) .collect::>(); - let mut is_empty = merk_optional_tx_path_not_empty!( + let subtree = cost_return_on_error!( &mut cost, - self.db, - SubtreePath::from(&subtree_merk_path), - None, - transaction, - subtree, - grove_version, - { - subtree - .is_empty_tree_except(batch_deleted_keys) - .unwrap_add_cost(&mut cost) - } + compat::merk_optional_tx_path_not_empty( + &self.db, + SubtreePath::from(&subtree_merk_path), + transaction, + None, + grove_version, + ) ); + let mut is_empty = subtree + .is_empty_tree_except(batch_deleted_keys) + .unwrap_add_cost(&mut cost); + // If there is any current batch operation that is inserting something in this // tree then it is not empty either is_empty &= !current_batch_operations.iter().any(|op| match op.op { @@ -628,45 +585,6 @@ impl GroveDb { } } - fn delete_internal>( - &self, - path: SubtreePath, - key: &[u8], - options: &DeleteOptions, - transaction: TransactionArg, - sectioned_removal: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - MerkError, - >, - batch: &StorageBatch, - grove_version: &GroveVersion, - ) -> CostResult { - if let Some(transaction) = transaction { - self.delete_internal_on_transaction( - path, - key, - options, - transaction, - sectioned_removal, - batch, - grove_version, - ) - } else { - self.delete_internal_without_transaction( - path, - key, - options, - sectioned_removal, - batch, - grove_version, - ) - } - } - fn delete_internal_on_transaction>( &self, path: SubtreePath, @@ -864,131 +782,6 @@ impl GroveDb { Ok(true).wrap_with_cost(cost) } - - fn delete_internal_without_transaction>( - &self, - path: SubtreePath, - key: &[u8], - options: &DeleteOptions, - sectioned_removal: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - MerkError, - >, - batch: &StorageBatch, - grove_version: &GroveVersion, - ) -> CostResult { - check_grovedb_v0_with_cost!( - "delete_internal_without_transaction", - grove_version - .grovedb_versions - .operations - .delete - .delete_internal_without_transaction - ); - - let mut cost = OperationCost::default(); - - let element = cost_return_on_error!( - &mut cost, - self.get_raw(path.clone(), key.as_ref(), None, grove_version) - ); - let mut merk_cache: HashMap, Merk> = - HashMap::default(); - let mut subtree_to_delete_from = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path(path.clone(), Some(batch), grove_version) - ); - let uses_sum_tree = subtree_to_delete_from.tree_type; - if element.is_any_tree() { - let subtree_merk_path = path.derive_owned_with_child(key); - let subtree_of_tree_we_are_deleting = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path( - SubtreePath::from(&subtree_merk_path), - Some(batch), - grove_version, - ) - ); - let is_empty = subtree_of_tree_we_are_deleting - .is_empty_tree() - .unwrap_add_cost(&mut cost); - - if !options.allow_deleting_non_empty_trees && !is_empty { - return if options.deleting_non_empty_trees_returns_error { - Err(Error::DeletingNonEmptyTree( - "trying to do a delete operation for a non empty tree, but options not \ - allowing this", - )) - .wrap_with_cost(cost) - } else { - Ok(false).wrap_with_cost(cost) - }; - } else { - if !is_empty { - let subtrees_paths = cost_return_on_error!( - &mut cost, - self.find_subtrees( - &SubtreePath::from(&subtree_merk_path), - None, - grove_version - ) - ); - // TODO: dumb traversal should not be tolerated - for subtree_path in subtrees_paths.into_iter().rev() { - let p: SubtreePath<_> = subtree_path.as_slice().into(); - let mut inner_subtree_to_delete_from = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path(p, Some(batch), grove_version) - ); - cost_return_on_error!( - &mut cost, - inner_subtree_to_delete_from.clear().map_err(|e| { - Error::CorruptedData(format!( - "unable to cleanup tree from storage: {e}", - )) - }) - ); - } - } - cost_return_on_error!( - &mut cost, - Element::delete_with_sectioned_removal_bytes( - &mut subtree_to_delete_from, - key, - Some(options.as_merk_options()), - true, - uses_sum_tree, - sectioned_removal, - grove_version, - ) - ); - } - } else { - cost_return_on_error!( - &mut cost, - Element::delete_with_sectioned_removal_bytes( - &mut subtree_to_delete_from, - key, - Some(options.as_merk_options()), - false, - uses_sum_tree, - sectioned_removal, - grove_version, - ) - ); - } - merk_cache.insert(path.clone(), subtree_to_delete_from); - cost_return_on_error!( - &mut cost, - self.propagate_changes_without_transaction(merk_cache, path, batch, grove_version) - ); - - Ok(true).wrap_with_cost(cost) - } } #[cfg(feature = "minimal")] @@ -1882,9 +1675,13 @@ mod tests { .unwrap() .unwrap(); assert!(!matches!(key1_tree, Element::Tree(None, _))); + + let transaction = db.start_transaction(); + let key1_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, None, grove_version, ) @@ -1946,9 +1743,12 @@ mod tests { .unwrap(); assert!(matches!(key1_tree, Element::Tree(None, _))); + let transaction = db.start_transaction(); + let key1_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, None, grove_version, ) diff --git a/grovedb/src/operations/delete/worst_case.rs b/grovedb/src/operations/delete/worst_case.rs index 894541491..e5d8b563b 100644 --- a/grovedb/src/operations/delete/worst_case.rs +++ b/grovedb/src/operations/delete/worst_case.rs @@ -8,9 +8,7 @@ use grovedb_merk::{ tree_type::TreeType, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; use intmap::IntMap; use crate::{ @@ -62,7 +60,7 @@ impl GroveDb { max_element_size, tree_type, ) = cost_return_on_error_no_add!( - &cost, + cost, if height == path_len { if let Some((tree_type, _)) = intermediate_tree_info.get(height as u64) { Ok((used_path, key, true, 0, max_element_size, *tree_type)) @@ -134,7 +132,7 @@ impl GroveDb { if validate { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_worst_case_get_merk_at_path::( &mut cost, path, @@ -145,7 +143,7 @@ impl GroveDb { } if check_if_tree { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_worst_case_get_raw_cost::( &mut cost, path, diff --git a/grovedb/src/operations/get/average_case.rs b/grovedb/src/operations/get/average_case.rs index d920ad011..c70e90b61 100644 --- a/grovedb/src/operations/get/average_case.rs +++ b/grovedb/src/operations/get/average_case.rs @@ -6,7 +6,7 @@ use grovedb_costs::OperationCost; use grovedb_merk::tree_type::TreeType; #[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; -use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use grovedb_version::{check_grovedb_v0, version::GroveVersion}; use crate::Error; #[cfg(feature = "minimal")] diff --git a/grovedb/src/operations/get/mod.rs b/grovedb/src/operations/get/mod.rs index d52fe4b16..9eba55bcb 100644 --- a/grovedb/src/operations/get/mod.rs +++ b/grovedb/src/operations/get/mod.rs @@ -4,6 +4,7 @@ mod average_case; #[cfg(feature = "minimal")] mod query; +use grovedb_storage::Storage; #[cfg(feature = "minimal")] pub use query::QueryItemOrSumReturnType; #[cfg(feature = "estimated_costs")] @@ -18,16 +19,14 @@ use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; use grovedb_path::SubtreePath; #[cfg(feature = "minimal")] use grovedb_storage::StorageContext; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; #[cfg(feature = "minimal")] use crate::error::GroveDbErrorExt; +use crate::util::TxRef; #[cfg(feature = "minimal")] use crate::{ reference_path::{path_from_reference_path_type, path_from_reference_qualified_path_type}, - util::storage_context_optional_tx, Element, Error, GroveDb, Transaction, TransactionArg, }; @@ -213,17 +212,15 @@ impl GroveDb { .get_raw_caching_optional ); - if let Some(transaction) = transaction { - self.get_raw_on_transaction_caching_optional( - path, - key, - allow_cache, - transaction, - grove_version, - ) - } else { - self.get_raw_without_transaction_caching_optional(path, key, allow_cache, grove_version) - } + let tx = TxRef::new(&self.db, transaction); + + self.get_raw_on_transaction_caching_optional( + path, + key, + allow_cache, + tx.as_ref(), + grove_version, + ) } /// Get Element at specified path and key @@ -266,22 +263,15 @@ impl GroveDb { .get_raw_optional_caching_optional ); - if let Some(transaction) = transaction { - self.get_raw_optional_on_transaction_caching_optional( - path, - key, - allow_cache, - transaction, - grove_version, - ) - } else { - self.get_raw_optional_without_transaction_caching_optional( - path, - key, - allow_cache, - grove_version, - ) - } + let tx = TxRef::new(&self.db, transaction); + + self.get_raw_optional_on_transaction_caching_optional( + path, + key, + allow_cache, + tx.as_ref(), + grove_version, + ) } /// Get tree item without following references @@ -329,67 +319,7 @@ impl GroveDb { }) .unwrap_add_cost(&mut cost); let merk = cost_return_on_error_no_add!( - &cost, - match merk_result { - Ok(result) => Ok(Some(result)), - Err(Error::PathParentLayerNotFound(_)) | Err(Error::InvalidParentLayerPath(_)) => - Ok(None), - Err(e) => Err(e), - } - ); - - if let Some(merk_to_get_from) = merk { - Element::get_optional(&merk_to_get_from, key, allow_cache, grove_version).add_cost(cost) - } else { - Ok(None).wrap_with_cost(cost) - } - } - - /// Get tree item without following references - pub(crate) fn get_raw_without_transaction_caching_optional>( - &self, - path: SubtreePath, - key: &[u8], - allow_cache: bool, - grove_version: &GroveVersion, - ) -> CostResult { - let mut cost = OperationCost::default(); - - let merk_to_get_from = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path(path.clone(), None, grove_version) - .map_err(|e| match e { - Error::InvalidParentLayerPath(s) => { - Error::PathParentLayerNotFound(s) - } - _ => e, - }) - ); - - Element::get(&merk_to_get_from, key, allow_cache, grove_version) - .add_context(format!("path is {}", path)) - .add_cost(cost) - } - - /// Get tree item without following references - pub(crate) fn get_raw_optional_without_transaction_caching_optional>( - &self, - path: SubtreePath, - key: &[u8], - allow_cache: bool, - grove_version: &GroveVersion, - ) -> CostResult, Error> { - let mut cost = OperationCost::default(); - - let merk_result = self - .open_non_transactional_merk_at_path(path, None, grove_version) - .map_err(|e| match e { - Error::InvalidParentLayerPath(s) => Error::PathParentLayerNotFound(s), - _ => e, - }) - .unwrap_add_cost(&mut cost); - let merk = cost_return_on_error_no_add!( - &cost, + cost, match merk_result { Ok(result) => Ok(Some(result)), Err(Error::PathParentLayerNotFound(_)) | Err(Error::InvalidParentLayerPath(_)) => @@ -423,23 +353,25 @@ impl GroveDb { grove_version.grovedb_versions.operations.get.has_raw ); + let tx = TxRef::new(&self.db, transaction); + // Merk's items should be written into data storage and checked accordingly - storage_context_optional_tx!(self.db, path.into(), None, transaction, storage, { - storage.flat_map(|s| s.get(key).map_err(|e| e.into()).map_ok(|x| x.is_some())) - }) + self.db + .get_transactional_storage_context(path.into(), None, tx.as_ref()) + .flat_map(|s| s.get(key).map_err(|e| e.into()).map_ok(|x| x.is_some())) } fn check_subtree_exists>( &self, path: SubtreePath, - transaction: TransactionArg, + transaction: &Transaction, error_fn: impl FnOnce() -> Error, grove_version: &GroveVersion, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); if let Some((parent_path, parent_key)) = path.derive_parent() { - let element = if let Some(transaction) = transaction { + let element = { let merk_to_get_from = cost_return_on_error!( &mut cost, self.open_transactional_merk_at_path( @@ -450,14 +382,6 @@ impl GroveDb { ) ); - Element::get(&merk_to_get_from, parent_key, true, grove_version) - .add_context(format!("path is {}", path)) - } else { - let merk_to_get_from = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path(parent_path, None, grove_version) - ); - Element::get(&merk_to_get_from, parent_key, true, grove_version) .add_context(format!("path is {}", path)) } @@ -480,7 +404,7 @@ impl GroveDb { pub(crate) fn check_subtree_exists_path_not_found<'b, B>( &self, path: SubtreePath<'b, B>, - transaction: TransactionArg, + transaction: &Transaction, grove_version: &GroveVersion, ) -> CostResult<(), Error> where @@ -518,9 +442,11 @@ impl GroveDb { .check_subtree_exists_invalid_path ); + let tx = TxRef::new(&self.db, transaction); + self.check_subtree_exists( path, - transaction, + tx.as_ref(), || Error::InvalidPath("subtree doesn't exist".to_owned()), grove_version, ) diff --git a/grovedb/src/operations/get/query.rs b/grovedb/src/operations/get/query.rs index c1b754681..d82c4f035 100644 --- a/grovedb/src/operations/get/query.rs +++ b/grovedb/src/operations/get/query.rs @@ -5,9 +5,7 @@ use grovedb_costs::cost_return_on_error_default; use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; -use grovedb_version::{ - check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0, check_grovedb_v0_with_cost, version::GroveVersion}; #[cfg(feature = "minimal")] use integer_encoding::VarInt; @@ -140,7 +138,7 @@ where { let mut cost = OperationCost::default(); let query = cost_return_on_error_no_add!( - &cost, + cost, PathQuery::merge(path_queries.to_vec(), grove_version) ); let (result, _) = cost_return_on_error!( @@ -278,7 +276,7 @@ where { }) .collect::, Error>>(); - let results = cost_return_on_error_no_add!(&cost, results_wrapped); + let results = cost_return_on_error_no_add!(cost, results_wrapped); Ok((QueryResultElements { elements: results }, skipped)).wrap_with_cost(cost) } @@ -368,7 +366,7 @@ where { }) .collect::>, Error>>(); - let results = cost_return_on_error_no_add!(&cost, results_wrapped); + let results = cost_return_on_error_no_add!(cost, results_wrapped); Ok((results, skipped)).wrap_with_cost(cost) } @@ -488,7 +486,7 @@ where { }) .collect::, Error>>(); - let results = cost_return_on_error_no_add!(&cost, results_wrapped); + let results = cost_return_on_error_no_add!(cost, results_wrapped); Ok((results, skipped)).wrap_with_cost(cost) } @@ -574,7 +572,7 @@ where { }) .collect::, Error>>(); - let results = cost_return_on_error_no_add!(&cost, results_wrapped); + let results = cost_return_on_error_no_add!(cost, results_wrapped); Ok((results, skipped)).wrap_with_cost(cost) } @@ -639,7 +637,7 @@ where { let mut cost = OperationCost::default(); let terminal_keys = cost_return_on_error_no_add!( - &cost, + cost, path_query.terminal_keys(max_results, grove_version) ); @@ -698,7 +696,7 @@ where { let mut cost = OperationCost::default(); let terminal_keys = cost_return_on_error_no_add!( - &cost, + cost, path_query.terminal_keys(max_results, grove_version) ); diff --git a/grovedb/src/operations/get/worst_case.rs b/grovedb/src/operations/get/worst_case.rs index e6382dd81..75fabb546 100644 --- a/grovedb/src/operations/get/worst_case.rs +++ b/grovedb/src/operations/get/worst_case.rs @@ -6,7 +6,7 @@ use grovedb_costs::OperationCost; use grovedb_merk::tree_type::TreeType; #[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; -use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use grovedb_version::{check_grovedb_v0, version::GroveVersion}; use crate::Error; #[cfg(feature = "minimal")] diff --git a/grovedb/src/operations/insert/mod.rs b/grovedb/src/operations/insert/mod.rs index e362a761a..e7df42823 100644 --- a/grovedb/src/operations/insert/mod.rs +++ b/grovedb/src/operations/insert/mod.rs @@ -11,14 +11,11 @@ use grovedb_costs::{ use grovedb_merk::{tree::NULL_HASH, Merk, MerkOptions}; use grovedb_path::SubtreePath; #[cfg(feature = "minimal")] -use grovedb_storage::rocksdb_storage::{ - PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext, -}; +use grovedb_storage::rocksdb_storage::PrefixedRocksDbTransactionContext; use grovedb_storage::{Storage, StorageBatch}; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; +use crate::util::TxRef; #[cfg(feature = "minimal")] use crate::{ reference_path::path_from_reference_path_type, Element, Error, GroveDb, Transaction, @@ -86,32 +83,31 @@ impl GroveDb { let subtree_path: SubtreePath = path.into(); let batch = StorageBatch::new(); - let collect_costs = if let Some(transaction) = transaction { + let tx = TxRef::new(&self.db, transaction); + + let mut cost = Default::default(); + + cost_return_on_error!( + &mut cost, self.insert_on_transaction( subtree_path, key, element, options.unwrap_or_default(), - transaction, + tx.as_ref(), &batch, grove_version, ) - } else { - self.insert_without_transaction( - subtree_path, - key, - element, - options.unwrap_or_default(), - &batch, - grove_version, - ) - }; + ); - collect_costs.flat_map_ok(|_| { + cost_return_on_error!( + &mut cost, self.db - .commit_multi_context_batch(batch, transaction) + .commit_multi_context_batch(batch, Some(tx.as_ref())) .map_err(Into::into) - }) + ); + + tx.commit_local().wrap_with_cost(cost) } fn insert_on_transaction<'db, 'b, B: AsRef<[u8]>>( @@ -165,50 +161,6 @@ impl GroveDb { Ok(()).wrap_with_cost(cost) } - fn insert_without_transaction<'b, B: AsRef<[u8]>>( - &self, - path: SubtreePath<'b, B>, - key: &[u8], - element: Element, - options: InsertOptions, - batch: &StorageBatch, - grove_version: &GroveVersion, - ) -> CostResult<(), Error> { - check_grovedb_v0_with_cost!( - "insert_without_transaction", - grove_version - .grovedb_versions - .operations - .insert - .insert_without_transaction - ); - - let mut cost = OperationCost::default(); - - let mut merk_cache: HashMap, Merk> = - HashMap::default(); - - let merk = cost_return_on_error!( - &mut cost, - self.add_element_without_transaction( - &path.to_vec(), - key, - element, - options, - batch, - grove_version - ) - ); - merk_cache.insert(path.clone(), merk); - - cost_return_on_error!( - &mut cost, - self.propagate_changes_without_transaction(merk_cache, path, batch, grove_version) - ); - - Ok(()).wrap_with_cost(cost) - } - /// Add subtree to another subtree. /// We want to add a new empty merk to another merk at a key /// first make sure other merk exist @@ -267,7 +219,7 @@ impl GroveDb { } if options.validate_insertion_does_not_override_tree { let element = cost_return_on_error_no_add!( - &cost, + cost, Element::deserialize(element_bytes.as_slice(), grove_version).map_err( |_| { Error::CorruptedData(String::from("unable to deserialize element")) @@ -355,142 +307,6 @@ impl GroveDb { Ok(subtree_to_insert_into).wrap_with_cost(cost) } - /// Add an empty tree or item to a parent tree. - /// We want to add a new empty merk to another merk at a key - /// first make sure other merk exist - /// if it exists, then create merk to be inserted, and get root hash - /// we only care about root hash of merk to be inserted - fn add_element_without_transaction<'db, B: AsRef<[u8]>>( - &'db self, - path: &[B], - key: &[u8], - element: Element, - options: InsertOptions, - batch: &'db StorageBatch, - grove_version: &GroveVersion, - ) -> CostResult, Error> { - check_grovedb_v0_with_cost!( - "add_element_without_transaction", - grove_version - .grovedb_versions - .operations - .insert - .add_element_without_transaction - ); - - let mut cost = OperationCost::default(); - let mut subtree_to_insert_into = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path(path.into(), Some(batch), grove_version) - ); - - if options.checks_for_override() { - let maybe_element_bytes = cost_return_on_error!( - &mut cost, - subtree_to_insert_into - .get( - key, - true, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version - ) - .map_err(|e| Error::CorruptedData(e.to_string())) - ); - if let Some(element_bytes) = maybe_element_bytes { - if options.validate_insertion_does_not_override { - return Err(Error::OverrideNotAllowed( - "insertion not allowed to override", - )) - .wrap_with_cost(cost); - } - if options.validate_insertion_does_not_override_tree { - let element = cost_return_on_error_no_add!( - &cost, - Element::deserialize(element_bytes.as_slice(), grove_version).map_err( - |_| { - Error::CorruptedData(String::from("unable to deserialize element")) - } - ) - ); - if element.is_any_tree() { - return Err(Error::OverrideNotAllowed( - "insertion not allowed to override tree", - )) - .wrap_with_cost(cost); - } - } - } - } - - match element { - Element::Reference(ref reference_path, ..) => { - let reference_path = cost_return_on_error!( - &mut cost, - path_from_reference_path_type(reference_path.clone(), path, Some(key)) - .wrap_with_cost(OperationCost::default()) - ); - let referenced_item = cost_return_on_error!( - &mut cost, - self.follow_reference( - reference_path.as_slice().into(), - false, - None, - grove_version - ) - ); - - let referenced_element_value_hash = - cost_return_on_error!(&mut cost, referenced_item.value_hash(grove_version)); - - cost_return_on_error!( - &mut cost, - element.insert_reference( - &mut subtree_to_insert_into, - key, - referenced_element_value_hash, - Some(options.as_merk_options()), - grove_version - ) - ); - } - Element::Tree(ref value, _) - | Element::SumTree(ref value, ..) - | Element::BigSumTree(ref value, ..) - | Element::CountTree(ref value, ..) => { - if value.is_some() { - return Err(Error::InvalidCodeExecution( - "a tree should be empty at the moment of insertion when not using batches", - )) - .wrap_with_cost(cost); - } else { - cost_return_on_error!( - &mut cost, - element.insert_subtree( - &mut subtree_to_insert_into, - key, - NULL_HASH, - Some(options.as_merk_options()), - grove_version - ) - ); - } - } - _ => { - cost_return_on_error!( - &mut cost, - element.insert( - &mut subtree_to_insert_into, - key, - Some(options.as_merk_options()), - grove_version - ) - ); - } - } - - Ok(subtree_to_insert_into).wrap_with_cost(cost) - } - /// Insert if not exists /// Insert if not exists /// diff --git a/grovedb/src/operations/is_empty_tree.rs b/grovedb/src/operations/is_empty_tree.rs index 4dec3abf3..f1357fe32 100644 --- a/grovedb/src/operations/is_empty_tree.rs +++ b/grovedb/src/operations/is_empty_tree.rs @@ -1,17 +1,14 @@ //! Check if empty tree operations -#[cfg(feature = "minimal")] -use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; -use grovedb_merk::tree_type::TreeType; +use grovedb_costs::{cost_return_on_error, CostResult, OperationCost}; use grovedb_path::SubtreePath; -#[cfg(feature = "minimal")] -use grovedb_version::error::GroveVersionError; use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; -#[cfg(feature = "minimal")] -use crate::{util::merk_optional_tx, Element, Error, GroveDb, TransactionArg}; +use crate::{ + util::{compat, TxRef}, + Error, GroveDb, TransactionArg, +}; -#[cfg(feature = "minimal")] impl GroveDb { /// Check if it's an empty tree pub fn is_empty_tree<'b, B, P>( @@ -31,19 +28,17 @@ impl GroveDb { let mut cost = OperationCost::default(); let path: SubtreePath = path.into(); + let tx = TxRef::new(&self.db, transaction); + cost_return_on_error!( &mut cost, - self.check_subtree_exists_path_not_found(path.clone(), transaction, grove_version) + self.check_subtree_exists_path_not_found(path.clone(), tx.as_ref(), grove_version) ); - merk_optional_tx!( + let subtree = cost_return_on_error!( &mut cost, - self.db, - path, - None, - transaction, - subtree, - grove_version, - { Ok(subtree.is_empty_tree().unwrap_add_cost(&mut cost)).wrap_with_cost(cost) } - ) + compat::merk_optional_tx(&self.db, path, tx.as_ref(), None, grove_version) + ); + + subtree.is_empty_tree().add_cost(cost).map(Ok) } } diff --git a/grovedb/src/operations/proof/generate.rs b/grovedb/src/operations/proof/generate.rs index aff486379..7fe50c6eb 100644 --- a/grovedb/src/operations/proof/generate.rs +++ b/grovedb/src/operations/proof/generate.rs @@ -12,9 +12,7 @@ use grovedb_merk::{ Merk, ProofWithoutEncodingResult, }; use grovedb_storage::StorageContext; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; #[cfg(feature = "proof_debug")] use crate::query_result_type::QueryResultType; @@ -89,7 +87,7 @@ impl GroveDb { .with_big_endian() .with_no_limit(); let encoded_proof = cost_return_on_error_no_add!( - &cost, + cost, bincode::encode_to_vec(proof, config) .map_err(|e| Error::CorruptedData(format!("unable to encode proof {}", e))) ); @@ -192,8 +190,10 @@ impl GroveDb { ) -> CostResult { let mut cost = OperationCost::default(); + let tx = self.start_transaction(); + let query = cost_return_on_error_no_add!( - &cost, + cost, path_query .query_items_at_path(path.as_slice(), grove_version) .and_then(|query_items| { @@ -210,7 +210,7 @@ impl GroveDb { let subtree = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(path.as_slice().into(), None, grove_version) + self.open_transactional_merk_at_path(path.as_slice().into(), &tx, None, grove_version) ); let limit = if path.len() < path_query.path.len() { diff --git a/grovedb/src/operations/proof/verify.rs b/grovedb/src/operations/proof/verify.rs index dca00bb93..80cec1cfe 100644 --- a/grovedb/src/operations/proof/verify.rs +++ b/grovedb/src/operations/proof/verify.rs @@ -9,8 +9,7 @@ use grovedb_merk::{ CryptoHash, }; use grovedb_version::{ - check_grovedb_v0, error::GroveVersionError, version::GroveVersion, TryFromVersioned, - TryIntoVersioned, + check_grovedb_v0, version::GroveVersion, TryFromVersioned, TryIntoVersioned, }; #[cfg(feature = "proof_debug")] diff --git a/grovedb/src/query/mod.rs b/grovedb/src/query/mod.rs index 890da1dbc..01bf8439c 100644 --- a/grovedb/src/query/mod.rs +++ b/grovedb/src/query/mod.rs @@ -12,7 +12,7 @@ use grovedb_merk::proofs::query::query_item::QueryItem; use grovedb_merk::proofs::query::{Key, SubqueryBranch}; #[cfg(any(feature = "minimal", feature = "verify"))] use grovedb_merk::proofs::Query; -use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use grovedb_version::{check_grovedb_v0, version::GroveVersion}; use indexmap::IndexMap; use crate::operations::proof::util::hex_to_ascii; diff --git a/grovedb/src/reference_path.rs b/grovedb/src/reference_path.rs index afd852600..91d7198db 100644 --- a/grovedb/src/reference_path.rs +++ b/grovedb/src/reference_path.rs @@ -2,15 +2,26 @@ #[cfg(any(feature = "minimal", feature = "verify"))] use std::fmt; +use std::{collections::HashSet, iter}; use bincode::{Decode, Encode}; +use grovedb_costs::{cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt}; +use grovedb_merk::CryptoHash; #[cfg(feature = "minimal")] +use grovedb_path::{SubtreePath, SubtreePathBuilder}; +use grovedb_version::check_grovedb_v0_with_cost; +#[cfg(feature = "full")] use grovedb_visualize::visualize_to_vec; #[cfg(feature = "minimal")] use integer_encoding::VarInt; #[cfg(any(feature = "minimal", feature = "verify"))] use crate::Error; +use crate::{ + merk_cache::{MerkCache, MerkHandle}, + operations::MAX_REFERENCE_HOPS, + Element, +}; #[cfg(any(feature = "minimal", feature = "verify"))] #[cfg_attr(not(any(feature = "minimal", feature = "visualize")), derive(Debug))] @@ -59,10 +70,89 @@ pub enum ReferencePathType { SiblingReference(Vec), } +impl ReferencePathType { + /// Get an inverted reference + pub(crate) fn invert>(&self, path: SubtreePath, key: &[u8]) -> Option { + Some(match self { + // Absolute path shall point to a fully qualified path of the reference's origin + ReferencePathType::AbsolutePathReference(_) => { + let mut qualified_path = path.to_vec(); + qualified_path.push(key.to_vec()); + ReferencePathType::AbsolutePathReference(qualified_path) + } + // Since both reference origin and path share N first segments, the backward reference + // can do the same, key we shall persist for a qualified path as the output + ReferencePathType::UpstreamRootHeightReference(n, _) => { + let relative_path: Vec<_> = path + .to_vec() + .into_iter() + .skip(*n as usize) + .chain(iter::once(key.to_vec())) + .collect(); + ReferencePathType::UpstreamRootHeightReference(*n, relative_path) + } + // Since it uses some parent information it get's complicated, so falling back to the + // preivous type of reference + ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference(n, _) => { + let relative_path: Vec<_> = path + .to_vec() + .into_iter() + .skip(*n as usize) + .chain(iter::once(key.to_vec())) + .collect(); + ReferencePathType::UpstreamRootHeightReference(*n, relative_path) + } + // Discarding N latest segments is relative to the previously appended path, so it would + // be easier to discard appended paths both ways and have a shared prefix. + ReferencePathType::UpstreamFromElementHeightReference(n, append_path) => { + let mut relative_path: Vec> = path + .into_reverse_iter() + .take(*n as usize) + .map(|x| x.to_vec()) + .collect(); + relative_path.reverse(); + relative_path.push(key.to_vec()); + ReferencePathType::UpstreamFromElementHeightReference( + append_path.len() as u8 - 1, + relative_path, + ) + } + // Cousin is relative to cousin, key will remain the same + ReferencePathType::CousinReference(_) => ReferencePathType::CousinReference( + path.into_reverse_iter().next().map(|x| x.to_vec())?, + ), + // Here since any number of segments could've been added we need to resort to a more + // specific option + ReferencePathType::RemovedCousinReference(append_path) => { + let mut relative_path = + vec![path.into_reverse_iter().next().map(|x| x.to_vec())?]; + relative_path.push(key.to_vec()); + ReferencePathType::UpstreamFromElementHeightReference( + append_path.len() as u8, + relative_path, + ) + } + // The closest way back would be just to use the key + ReferencePathType::SiblingReference(_) => { + ReferencePathType::SiblingReference(key.to_vec()) + } + }) + } +} + // Helper function to display paths fn display_path(path: &[Vec]) -> String { path.iter() - .map(hex::encode) + .map(|bytes| { + let mut hx = hex::encode(bytes); + if let Ok(s) = String::from_utf8(bytes.clone()) { + hx.push('('); + hx.push_str(&s); + hx.push(')'); + } + + hx + }) .collect::>() .join("/") } @@ -132,6 +222,132 @@ impl ReferencePathType { ) -> Result>, Error> { path_from_reference_path_type(self, current_path, current_key) } + + /// TODO: deprecate the rest + pub fn absolute_qualified_path<'b, B: AsRef<[u8]>>( + self, + mut current_path: SubtreePathBuilder<'b, B>, + current_key: &[u8], + ) -> Result, Error> { + match self { + ReferencePathType::AbsolutePathReference(path) => { + Ok(SubtreePathBuilder::owned_from_iter(path)) + } + + ReferencePathType::UpstreamRootHeightReference(no_of_elements_to_keep, append_path) => { + let len = current_path.len(); + if no_of_elements_to_keep as usize > len { + return Err(Error::InvalidInput( + "reference stored path cannot satisfy reference constraints", + )); + } + let n_to_remove = len - no_of_elements_to_keep as usize; + + let referenced_path = (0..n_to_remove).fold(current_path, |p, _| { + p.derive_parent_owned() + .expect("lenghts were checked above") + .0 + }); + let referenced_path = append_path.into_iter().fold(referenced_path, |mut p, s| { + p.push_segment(&s); + p + }); + + Ok(referenced_path) + } + + ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + no_of_elements_to_keep, + append_path, + ) => { + let len = current_path.len(); + if no_of_elements_to_keep as usize > len || len < 1 { + return Err(Error::InvalidInput( + "reference stored path cannot satisfy reference constraints", + )); + } + + let parent_key = current_path + .reverse_iter() + .next() + .expect("lengths were checked above") + .to_vec(); + + let n_to_remove = len - no_of_elements_to_keep as usize; + + let referenced_path = (0..n_to_remove).fold(current_path, |p, _| { + p.derive_parent_owned() + .expect("lenghts were checked above") + .0 + }); + let mut referenced_path = + append_path.into_iter().fold(referenced_path, |mut p, s| { + p.push_segment(&s); + p + }); + referenced_path.push_segment(&parent_key); + + Ok(referenced_path) + } + + // Discard the last n elements from current path, append new path to subpath + ReferencePathType::UpstreamFromElementHeightReference( + no_of_elements_to_discard_from_end, + append_path, + ) => { + let mut referenced_path = current_path; + for _ in 0..no_of_elements_to_discard_from_end { + if let Some((path, _)) = referenced_path.derive_parent_owned() { + referenced_path = path; + } else { + return Err(Error::InvalidInput( + "reference stored path cannot satisfy reference constraints", + )); + } + } + + let referenced_path = append_path.into_iter().fold(referenced_path, |mut p, s| { + p.push_segment(&s); + p + }); + + Ok(referenced_path) + } + + ReferencePathType::CousinReference(cousin_key) => { + let Some((mut referred_path, _)) = current_path.derive_parent_owned() else { + return Err(Error::InvalidInput( + "reference stored path cannot satisfy reference constraints", + )); + }; + + referred_path.push_segment(&cousin_key); + referred_path.push_segment(current_key); + + Ok(referred_path) + } + + ReferencePathType::RemovedCousinReference(cousin_path) => { + let Some((mut referred_path, _)) = current_path.derive_parent_owned() else { + return Err(Error::InvalidInput( + "reference stored path cannot satisfy reference constraints", + )); + }; + + cousin_path + .into_iter() + .for_each(|s| referred_path.push_segment(&s)); + referred_path.push_segment(current_key); + + Ok(referred_path) + } + + ReferencePathType::SiblingReference(sibling_key) => { + current_path.push_segment(&sibling_key); + Ok(current_path) + } + } + } } #[cfg(any(feature = "minimal", feature = "visualize"))] @@ -320,10 +536,157 @@ impl ReferencePathType { } } +pub(crate) struct ResolvedReference<'db, 'b, 'c, B> { + pub target_merk: MerkHandle<'db, 'c>, + pub target_path: SubtreePathBuilder<'b, B>, + pub target_key: Vec, + pub target_element: Element, + pub target_node_value_hash: CryptoHash, +} + +pub(crate) fn follow_reference<'db, 'b, 'c, B: AsRef<[u8]>>( + merk_cache: &'c MerkCache<'db, 'b, B>, + path: SubtreePathBuilder<'b, B>, + key: &[u8], + ref_path: ReferencePathType, +) -> CostResult, Error> { + // TODO: this is a new version of follow reference + check_grovedb_v0_with_cost!( + "follow_reference", + merk_cache + .version + .grovedb_versions + .operations + .get + .follow_reference + ); + + let mut cost = Default::default(); + + let mut hops_left = MAX_REFERENCE_HOPS; + let mut visited = HashSet::new(); + + let mut qualified_path = path.clone(); + qualified_path.push_segment(key); + + visited.insert(qualified_path); + + let mut current_path = path; + let mut current_key = key.to_vec(); + let mut current_ref = ref_path; + + while hops_left > 0 { + let referred_qualified_path = cost_return_on_error_no_add!( + cost, + current_ref.absolute_qualified_path(current_path, ¤t_key) + ); + + if !visited.insert(referred_qualified_path.clone()) { + return Err(Error::CyclicReference).wrap_with_cost(cost); + } + + let Some((referred_path, referred_key)) = referred_qualified_path.derive_parent_owned() + else { + return Err(Error::InvalidCodeExecution("empty reference")).wrap_with_cost(cost); + }; + + let mut referred_merk = + cost_return_on_error!(&mut cost, merk_cache.get_merk(referred_path.clone())); + let (element, value_hash) = cost_return_on_error!( + &mut cost, + referred_merk + .for_merk(|m| { + Element::get_with_value_hash(m, &referred_key, true, merk_cache.version) + }) + .map_err(|e| match e { + Error::PathKeyNotFound(s) => Error::CorruptedReferencePathKeyNotFound(s), + e => e, + }) + ); + + match element { + Element::Reference(ref_path, ..) => { + current_path = referred_path; + current_key = referred_key; + current_ref = ref_path; + hops_left -= 1; + } + e => { + return Ok(ResolvedReference { + target_merk: referred_merk, + target_path: referred_path, + target_key: referred_key, + target_element: e, + target_node_value_hash: value_hash, + }) + .wrap_with_cost(cost) + } + } + } + + Err(Error::ReferenceLimit).wrap_with_cost(cost) +} + +/// Follow references stopping at the immediate element without following +/// further. +pub(crate) fn follow_reference_once<'db, 'b, 'c, B: AsRef<[u8]>>( + merk_cache: &'c MerkCache<'db, 'b, B>, + path: SubtreePathBuilder<'b, B>, + key: &[u8], + ref_path: ReferencePathType, +) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "follow_reference_once", + merk_cache + .version + .grovedb_versions + .operations + .get + .follow_reference_once + ); + + let mut cost = Default::default(); + + let referred_qualified_path = + cost_return_on_error_no_add!(cost, ref_path.absolute_qualified_path(path.clone(), key)); + + let Some((referred_path, referred_key)) = referred_qualified_path.derive_parent_owned() else { + return Err(Error::InvalidCodeExecution("empty reference")).wrap_with_cost(cost); + }; + + if path == referred_path && key == referred_key { + return Err(Error::CyclicReference).wrap_with_cost(cost); + } + + let mut referred_merk = + cost_return_on_error!(&mut cost, merk_cache.get_merk(referred_path.clone())); + let (element, value_hash) = cost_return_on_error!( + &mut cost, + referred_merk + .for_merk(|m| { + Element::get_with_value_hash(m, &referred_key, true, merk_cache.version) + }) + .map_err(|e| match e { + Error::PathKeyNotFound(s) => Error::CorruptedReferencePathKeyNotFound(s), + e => e, + }) + ); + + Ok(ResolvedReference { + target_merk: referred_merk, + target_path: referred_path, + target_key: referred_key, + target_element: element, + target_node_value_hash: value_hash, + }) + .wrap_with_cost(cost) +} + #[cfg(feature = "minimal")] #[cfg(test)] mod tests { use grovedb_merk::proofs::Query; + use grovedb_path::{SubtreePath, SubtreePathBuilder}; use grovedb_version::version::GroveVersion; use crate::{ @@ -345,6 +708,20 @@ mod tests { ); } + #[test] + fn test_upstream_root_height_reference_path_lib() { + let stored_path: SubtreePathBuilder<&[u8]> = + SubtreePathBuilder::owned_from_iter([b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]); + // selects the first 2 elements from the stored path and appends the new path. + let ref1 = + ReferencePathType::UpstreamRootHeightReference(2, vec![b"c".to_vec(), b"d".to_vec()]); + let final_path = ref1.absolute_qualified_path(stored_path, b"").unwrap(); + assert_eq!( + final_path.to_vec(), + vec![b"a".to_vec(), b"b".to_vec(), b"c".to_vec(), b"d".to_vec()] + ); + } + #[test] fn test_upstream_root_height_with_parent_addition_reference() { let stored_path = vec![b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]; @@ -366,6 +743,28 @@ mod tests { ); } + #[test] + fn test_upstream_root_height_with_parent_addition_reference_path_lib() { + let stored_path: SubtreePathBuilder<&[u8]> = + SubtreePathBuilder::owned_from_iter([b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]); + // selects the first 2 elements from the stored path and appends the new path. + let ref1 = ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + 2, + vec![b"c".to_vec(), b"d".to_vec()], + ); + let final_path = ref1.absolute_qualified_path(stored_path, b"").unwrap(); + assert_eq!( + final_path.to_vec(), + vec![ + b"a".to_vec(), + b"b".to_vec(), + b"c".to_vec(), + b"d".to_vec(), + b"m".to_vec() + ] + ); + } + #[test] fn test_upstream_from_element_height_reference() { let stored_path = vec![b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]; @@ -381,6 +780,22 @@ mod tests { ); } + #[test] + fn test_upstream_from_element_height_reference_path_lib() { + let stored_path: SubtreePathBuilder<&[u8]> = + SubtreePathBuilder::owned_from_iter([b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]); + // discards the last element from the stored_path + let ref1 = ReferencePathType::UpstreamFromElementHeightReference( + 1, + vec![b"c".to_vec(), b"d".to_vec()], + ); + let final_path = ref1.absolute_qualified_path(stored_path, b"").unwrap(); + assert_eq!( + final_path.to_vec(), + vec![b"a".to_vec(), b"b".to_vec(), b"c".to_vec(), b"d".to_vec()] + ); + } + #[test] fn test_cousin_reference_no_key() { let stored_path = vec![b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]; @@ -403,6 +818,20 @@ mod tests { ); } + #[test] + fn test_cousin_reference_path_lib() { + let stored_path: SubtreePathBuilder<&[u8]> = + SubtreePathBuilder::owned_from_iter([b"a".as_ref(), b"b".as_ref()]); + let key = b"m".as_ref(); + // Replaces the immediate parent (in this case b) with the given key (c) + let ref1 = ReferencePathType::CousinReference(b"c".to_vec()); + let final_path = ref1.absolute_qualified_path(stored_path, key).unwrap(); + assert_eq!( + final_path.to_vec(), + vec![b"a".to_vec(), b"c".to_vec(), b"m".to_vec()] + ); + } + #[test] fn test_removed_cousin_reference_no_key() { let stored_path = vec![b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]; @@ -425,6 +854,20 @@ mod tests { ); } + #[test] + fn test_removed_cousin_reference_path_lib() { + let stored_path: SubtreePathBuilder<&[u8]> = + SubtreePathBuilder::owned_from_iter([b"a".as_ref(), b"b".as_ref()]); + let key = b"m".as_ref(); + // Replaces the immediate parent (in this case b) with the given key (c) + let ref1 = ReferencePathType::RemovedCousinReference(vec![b"c".to_vec(), b"d".to_vec()]); + let final_path = ref1.absolute_qualified_path(stored_path, key).unwrap(); + assert_eq!( + final_path.to_vec(), + vec![b"a".to_vec(), b"c".to_vec(), b"d".to_vec(), b"m".to_vec()] + ); + } + #[test] fn test_sibling_reference() { let stored_path = vec![b"a".as_ref(), b"b".as_ref()]; @@ -437,6 +880,19 @@ mod tests { ); } + #[test] + fn test_sibling_reference_path_lib() { + let stored_path: SubtreePathBuilder<&[u8]> = + SubtreePathBuilder::owned_from_iter([b"a".as_ref(), b"b".as_ref()]); + let key = b"m".as_ref(); + let ref1 = ReferencePathType::SiblingReference(b"c".to_vec()); + let final_path = ref1.absolute_qualified_path(stored_path, key).unwrap(); + assert_eq!( + final_path.to_vec(), + vec![b"a".to_vec(), b"b".to_vec(), b"c".to_vec()] + ); + } + #[test] fn test_query_many_with_different_reference_types() { let grove_version = GroveVersion::latest(); @@ -515,4 +971,251 @@ mod tests { assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result.len(), 5); } + + #[test] + fn inverted_absolute_path() { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + + let reference = + ReferencePathType::AbsolutePathReference(vec![b"m".to_vec(), b"n".to_vec()]); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), Some(current_key)) + .unwrap(); + + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + + assert_eq!( + reference, + inverse + .invert(pointed_to_path.into(), pointed_to_key) + .unwrap() + ); + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path + ); + } + + #[test] + fn inverted_upstream_root_height() { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + + let reference = + ReferencePathType::UpstreamRootHeightReference(2, vec![b"m".to_vec(), b"n".to_vec()]); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), None) + .unwrap(); + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + + assert_eq!( + reference, + inverse + .invert(pointed_to_path.into(), pointed_to_key) + .unwrap() + ); + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path.to_vec(), + ); + } + + #[test] + fn inverted_upstream_root_height_with_parent_path_addition() { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + let reference = ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + 2, + vec![b"m".to_vec(), b"n".to_vec()], + ); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), Some(current_key)) + .unwrap(); + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path.to_vec(), + ); + } + + #[test] + fn inverted_upstream_from_element_height() { + { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + let reference = ReferencePathType::UpstreamFromElementHeightReference( + 1, + vec![b"m".to_vec(), b"n".to_vec()], + ); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), Some(current_key)) + .unwrap(); + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + + assert_eq!( + reference, + inverse + .invert(pointed_to_path.into(), pointed_to_key) + .unwrap() + ); + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path.to_vec(), + ); + } + + { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + let reference = ReferencePathType::UpstreamFromElementHeightReference( + 3, + vec![b"m".to_vec(), b"n".to_vec()], + ); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), Some(current_key)) + .unwrap(); + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + + assert_eq!( + reference, + inverse + .invert(pointed_to_path.into(), pointed_to_key) + .unwrap() + ); + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path.to_vec(), + ); + } + } + + #[test] + fn inverted_cousin_reference() { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + let reference = + ReferencePathType::RemovedCousinReference(vec![b"m".to_vec(), b"n".to_vec()]); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), Some(current_key)) + .unwrap(); + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path + ); + } + + #[test] + fn inverted_sibling_reference() { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + let reference = ReferencePathType::SiblingReference(b"yeet".to_vec()); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), Some(current_key)) + .unwrap(); + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + assert_eq!( + reference, + inverse + .invert(pointed_to_path.into(), pointed_to_key) + .unwrap() + ); + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path + ); + } } diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index 000f97b27..3cb237900 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -4,10 +4,10 @@ use std::pin::Pin; use grovedb_merk::{tree::hash::CryptoHash, tree_type::TreeType, ChunkProducer}; use grovedb_path::SubtreePath; -use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use grovedb_version::{check_grovedb_v0, version::GroveVersion}; pub use self::state_sync_session::MultiStateSyncSession; -use crate::{Error, GroveDb, TransactionArg}; +use crate::{util::TxRef, Error, GroveDb, TransactionArg}; /// Type alias representing a chunk identifier in the state synchronization /// process. @@ -74,6 +74,9 @@ impl GroveDb { "fetch_chunk", grove_version.grovedb_versions.replication.fetch_chunk ); + + let tx = TxRef::new(&self.db, transaction); + // For now, only CURRENT_STATE_SYNC_VERSION is supported if version != CURRENT_STATE_SYNC_VERSION { return Err(Error::CorruptedData( @@ -81,104 +84,57 @@ impl GroveDb { )); } - let root_app_hash = self.root_hash(transaction, grove_version).value?; + let root_app_hash = self.root_hash(Some(tx.as_ref()), grove_version).value?; let (chunk_prefix, root_key, tree_type, chunk_id) = utils::decode_global_chunk_id(global_chunk_id, &root_app_hash)?; // TODO: Refactor this by writing fetch_chunk_inner (as only merk constructor // and type are different) - if let Some(tx) = transaction { - let merk = self - .open_transactional_merk_by_prefix( - chunk_prefix, - root_key, - tree_type, - tx, - None, - grove_version, - ) - .value - .map_err(|e| { - Error::CorruptedData(format!( - "failed to open merk by prefix tx:{} with:{}", - hex::encode(chunk_prefix), - e - )) - })?; - if merk.is_empty_tree().unwrap() { - return Ok(vec![]); - } - - let mut chunk_producer = ChunkProducer::new(&merk).map_err(|e| { - Error::CorruptedData(format!( - "failed to create chunk producer by prefix tx:{} with:{}", - hex::encode(chunk_prefix), - e - )) - })?; - let (chunk, _) = chunk_producer - .chunk(&chunk_id, grove_version) - .map_err(|e| { - Error::CorruptedData(format!( - "failed to apply chunk:{} with:{}", - hex::encode(chunk_prefix), - e - )) - })?; - let op_bytes = utils::encode_vec_ops(chunk).map_err(|e| { + let merk = self + .open_transactional_merk_by_prefix( + chunk_prefix, + root_key, + tree_type, + tx.as_ref(), + None, + grove_version, + ) + .value + .map_err(|e| { Error::CorruptedData(format!( - "failed to encode chunk ops:{} with:{}", + "failed to open merk by prefix tx:{} with:{}", hex::encode(chunk_prefix), e )) })?; - Ok(op_bytes) - } else { - let merk = self - .open_non_transactional_merk_by_prefix( - chunk_prefix, - root_key, - tree_type, - None, - grove_version, - ) - .value - .map_err(|e| { - Error::CorruptedData(format!( - "failed to open merk by prefix non-tx:{} with:{}", - e, - hex::encode(chunk_prefix) - )) - })?; - if merk.is_empty_tree().unwrap() { - return Ok(vec![]); - } + if merk.is_empty_tree().unwrap() { + return Ok(vec![]); + } - let mut chunk_producer = ChunkProducer::new(&merk).map_err(|e| { - Error::CorruptedData(format!( - "failed to create chunk producer by prefix non-tx:{} with:{}", - hex::encode(chunk_prefix), - e - )) - })?; - let (chunk, _) = chunk_producer - .chunk(&chunk_id, grove_version) - .map_err(|e| { - Error::CorruptedData(format!( - "failed to apply chunk:{} with:{}", - hex::encode(chunk_prefix), - e - )) - })?; - let op_bytes = utils::encode_vec_ops(chunk).map_err(|e| { + let mut chunk_producer = ChunkProducer::new(&merk).map_err(|e| { + Error::CorruptedData(format!( + "failed to create chunk producer by prefix tx:{} with:{}", + hex::encode(chunk_prefix), + e + )) + })?; + let (chunk, _) = chunk_producer + .chunk(&chunk_id, grove_version) + .map_err(|e| { Error::CorruptedData(format!( - "failed to encode chunk ops:{} with:{}", + "failed to apply chunk:{} with:{}", hex::encode(chunk_prefix), e )) })?; - Ok(op_bytes) - } + let op_bytes = utils::encode_vec_ops(chunk).map_err(|e| { + Error::CorruptedData(format!( + "failed to encode chunk ops:{} with:{}", + hex::encode(chunk_prefix), + e + )) + })?; + Ok(op_bytes) } /// Starts a state synchronization process for a snapshot with the given diff --git a/grovedb/src/tests/count_sum_tree_tests.rs b/grovedb/src/tests/count_sum_tree_tests.rs index f171aee0d..7c5e4b2d8 100644 --- a/grovedb/src/tests/count_sum_tree_tests.rs +++ b/grovedb/src/tests/count_sum_tree_tests.rs @@ -109,9 +109,12 @@ mod count_sum_tree_tests { // Test aggregate data (count and sum) let batch = StorageBatch::new(); + let transaction = db.start_transaction(); + let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_sum_key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -168,9 +171,12 @@ mod count_sum_tree_tests { // Test aggregate data let batch = StorageBatch::new(); + let transaction = db.start_transaction(); + let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_sum_key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -249,9 +255,12 @@ mod count_sum_tree_tests { // Open merk and check all elements in it let batch = StorageBatch::new(); + let transaction = db.start_transaction(); + let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_sum_key3"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -344,11 +353,13 @@ mod count_sum_tree_tests { .expect("should insert regular tree"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Aggregate data should be None for regular tree let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"regular_key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -407,8 +418,9 @@ mod count_sum_tree_tests { // Verify aggregate data let batch = StorageBatch::new(); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_sum_key4"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -452,9 +464,12 @@ mod count_sum_tree_tests { // Open the CountSumTree and verify aggregate data let batch = StorageBatch::new(); + let transaction = db.start_transaction(); + let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_sum_key6"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -521,11 +536,14 @@ mod count_sum_tree_tests { // Verify aggregate data of child let batch = StorageBatch::new(); + let transaction = db.start_transaction(); + let child_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"parent_count_sum", b"child_count_sum"] .as_ref() .into(), + &transaction, Some(&batch), grove_version, ) @@ -540,8 +558,9 @@ mod count_sum_tree_tests { // Verify aggregate data of parent let parent_batch = StorageBatch::new(); let parent_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"parent_count_sum"].as_ref().into(), + &transaction, Some(&parent_batch), grove_version, ) diff --git a/grovedb/src/tests/count_tree_tests.rs b/grovedb/src/tests/count_tree_tests.rs index e4dffc06c..690d9fef1 100644 --- a/grovedb/src/tests/count_tree_tests.rs +++ b/grovedb/src/tests/count_tree_tests.rs @@ -165,11 +165,13 @@ mod tests { .expect("should insert item"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Open merk and check all elements in it let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -223,6 +225,8 @@ mod tests { // Perform the same test on regular trees let db = make_test_grovedb(grove_version); + let transaction = db.start_transaction(); + db.insert( [TEST_LEAF].as_ref(), b"key", @@ -255,8 +259,9 @@ mod tests { .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -306,12 +311,14 @@ mod tests { .expect("should insert tree"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Sum should be non for non count tree // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -350,10 +357,12 @@ mod tests { ) .unwrap() .expect("should insert item"); + // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -386,8 +395,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -409,8 +419,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -442,8 +453,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -464,8 +476,9 @@ mod tests { .unwrap() .expect("expected to delete"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -572,11 +585,13 @@ mod tests { assert_eq!(count_tree.count_value_or_default(), 5); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Assert node feature types let test_leaf_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -596,8 +611,9 @@ mod tests { assert_matches!(root_tree_feature_type, BasicMerkNode); let parent_count_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -616,8 +632,9 @@ mod tests { assert_matches!(count_tree_feature_type, CountedMerkNode(4)); let child_count_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_key", b"tree2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -702,9 +719,12 @@ mod tests { .expect("should apply batch"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); + let count_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -748,8 +768,9 @@ mod tests { let batch = StorageBatch::new(); let count_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -837,8 +858,9 @@ mod tests { let batch = StorageBatch::new(); let count_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) diff --git a/grovedb/src/tests/mod.rs b/grovedb/src/tests/mod.rs index a7f01eb7b..1b961a7e4 100644 --- a/grovedb/src/tests/mod.rs +++ b/grovedb/src/tests/mod.rs @@ -1409,12 +1409,17 @@ mod tests { fn test_root_tree_leaves_are_noted() { let grove_version = GroveVersion::latest(); let db = make_test_grovedb(grove_version); - db.check_subtree_exists_path_not_found([TEST_LEAF].as_ref().into(), None, grove_version) - .unwrap() - .expect("should exist"); + let transaction = db.start_transaction(); + db.check_subtree_exists_path_not_found( + [TEST_LEAF].as_ref().into(), + &transaction, + grove_version, + ) + .unwrap() + .expect("should exist"); db.check_subtree_exists_path_not_found( [ANOTHER_TEST_LEAF].as_ref().into(), - None, + &transaction, grove_version, ) .unwrap() @@ -3120,10 +3125,15 @@ mod tests { // let mut iter = db // .elements_iterator([TEST_LEAF, b"subtree1"].as_ref(), None) // .expect("cannot create iterator"); + let transaction = db.grove_db.start_transaction(); let storage_context = db .grove_db .db - .get_storage_context([TEST_LEAF, b"subtree1"].as_ref().into(), None) + .get_transactional_storage_context( + [TEST_LEAF, b"subtree1"].as_ref().into(), + None, + &transaction, + ) .unwrap(); let mut iter = Element::iterator(storage_context.raw_iter()).unwrap(); assert_eq!( @@ -3212,7 +3222,12 @@ mod tests { fn test_root_subtree_has_root_key() { let grove_version = GroveVersion::latest(); let db = make_test_grovedb(grove_version); - let storage = db.db.get_storage_context(EMPTY_PATH, None).unwrap(); + let transaction = db.start_transaction(); + + let storage = db + .db + .get_transactional_storage_context(EMPTY_PATH, None, &transaction) + .unwrap(); let root_merk = Merk::open_base( storage, TreeType::NormalTree, @@ -3312,10 +3327,16 @@ mod tests { // Retrieve subtree instance // Check if it returns the same instance that was inserted { + let transaction = db.grove_db.start_transaction(); + let subtree_storage = db .grove_db .db - .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) + .get_transactional_storage_context( + [TEST_LEAF, b"key1", b"key2"].as_ref().into(), + None, + &transaction, + ) .unwrap(); let subtree = Merk::open_layered_with_root_key( subtree_storage, @@ -3330,6 +3351,11 @@ mod tests { .unwrap() .unwrap(); assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); + + db.grove_db + .commit_transaction(transaction) + .unwrap() + .unwrap(); } // Insert a new tree with transaction let transaction = db.start_transaction(); @@ -3384,7 +3410,11 @@ mod tests { let subtree_storage = db .grove_db .db - .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) + .get_transactional_storage_context( + [TEST_LEAF, b"key1", b"key2"].as_ref().into(), + None, + &transaction, + ) .unwrap(); let subtree = Merk::open_layered_with_root_key( subtree_storage, diff --git a/grovedb/src/tests/sum_tree_tests.rs b/grovedb/src/tests/sum_tree_tests.rs index 777fcb453..4aa96ee8e 100644 --- a/grovedb/src/tests/sum_tree_tests.rs +++ b/grovedb/src/tests/sum_tree_tests.rs @@ -268,11 +268,13 @@ mod tests { .expect("should insert item"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Open merk and check all elements in it let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -331,6 +333,8 @@ mod tests { // Perform the same test on regular trees let db = make_test_grovedb(grove_version); + let transaction = db.start_transaction(); + db.insert( [TEST_LEAF].as_ref(), b"key", @@ -363,8 +367,9 @@ mod tests { .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -415,11 +420,13 @@ mod tests { let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Sum should be non for non sum tree // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -460,8 +467,9 @@ mod tests { .expect("should insert item"); // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -494,8 +502,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -518,8 +527,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -552,8 +562,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -586,8 +597,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -619,11 +631,13 @@ mod tests { let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Sum should be non for non sum tree // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -664,8 +678,9 @@ mod tests { .expect("should insert item"); // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -688,8 +703,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -712,8 +728,9 @@ mod tests { .unwrap() .expect_err("should not be able to insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -736,8 +753,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -760,8 +778,9 @@ mod tests { .unwrap() .expect_err("should not be able to insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -784,8 +803,9 @@ mod tests { .unwrap() .expect("should be able to insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -808,8 +828,9 @@ mod tests { .expect("should be able to insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -831,8 +852,9 @@ mod tests { .unwrap() .expect_err("expected not be able to delete"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -938,11 +960,13 @@ mod tests { assert_eq!(sum_tree.sum_value_or_default(), 35); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Assert node feature types let test_leaf_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -962,8 +986,9 @@ mod tests { )); let parent_sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -985,8 +1010,9 @@ mod tests { )); let child_sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key", b"tree2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1175,11 +1201,13 @@ mod tests { ); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Assert node feature types let test_leaf_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1199,8 +1227,9 @@ mod tests { )); let parent_sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"big_sum_tree"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1237,8 +1266,9 @@ mod tests { ); let child_sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"big_sum_tree", b"sum_tree_1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1295,8 +1325,9 @@ mod tests { ); let child_sum_tree_2 = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"big_sum_tree", b"sum_tree_2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1341,11 +1372,13 @@ mod tests { db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); + let transaction = db.start_transaction(); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1386,11 +1419,13 @@ mod tests { db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); + let transaction = db.start_transaction(); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1476,11 +1511,13 @@ mod tests { db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); + let transaction = db.start_transaction(); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1516,11 +1553,13 @@ mod tests { db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); + let transaction = db.start_transaction(); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1561,11 +1600,13 @@ mod tests { db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); + let transaction = db.start_transaction(); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1651,11 +1692,13 @@ mod tests { db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); + let transaction = db.start_transaction(); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) diff --git a/grovedb/src/tests/tree_hashes_tests.rs b/grovedb/src/tests/tree_hashes_tests.rs index e86b8fd0a..670b0918e 100644 --- a/grovedb/src/tests/tree_hashes_tests.rs +++ b/grovedb/src/tests/tree_hashes_tests.rs @@ -56,10 +56,12 @@ fn test_node_hashes_when_inserting_item() { .expect("successful subtree insert"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); let test_leaf_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -129,10 +131,12 @@ fn test_tree_hashes_when_inserting_empty_tree() { .expect("successful subtree insert"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); let test_leaf_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -173,8 +177,9 @@ fn test_tree_hashes_when_inserting_empty_tree() { .expect("value hash should be some"); let underlying_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -225,10 +230,12 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .expect("successful subtree insert"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); let under_top_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -236,8 +243,9 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .expect("should open merk"); let middle_merk_key1 = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -258,8 +266,9 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .expect("value hash should be some"); let bottom_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1", b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) diff --git a/grovedb/src/util.rs b/grovedb/src/util.rs index 20ec46d8a..c91205a2c 100644 --- a/grovedb/src/util.rs +++ b/grovedb/src/util.rs @@ -1,477 +1,39 @@ -/// Macro to execute same piece of code on different storage contexts -/// (transactional or not) using path argument. -macro_rules! storage_context_optional_tx { - ($db:expr, $path:expr, $batch:expr, $transaction:ident, $storage:ident, { $($body:tt)* }) => { - { - use ::grovedb_storage::Storage; - if let Some(tx) = $transaction { - let $storage = $db - .get_transactional_storage_context($path, $batch, tx); - $($body)* - } else { - let $storage = $db - .get_storage_context($path, $batch); - $($body)* - } - } - }; -} +pub(crate) mod compat; -/// Macro to execute same piece of code on different storage contexts -/// (transactional or not) using path argument. -macro_rules! storage_context_with_parent_optional_tx { - ( - &mut $cost:ident, - $db:expr, - $path:expr, - $batch:expr, - $transaction:ident, - $storage:ident, - $root_key:ident, - $tree_type:ident, - $grove_version:ident, - { $($body:tt)* } - ) => { - { - use ::grovedb_storage::Storage; - if let Some(tx) = $transaction { - let $storage = $db - .get_transactional_storage_context($path.clone(), $batch, tx) - .unwrap_add_cost(&mut $cost); - if let Some((parent_path, parent_key)) = $path.derive_parent() { - let parent_storage = $db - .get_transactional_storage_context(parent_path, $batch, tx) - .unwrap_add_cost(&mut $cost); - let element = cost_return_on_error!( - &mut $cost, - Element::get_from_storage(&parent_storage, parent_key, $grove_version) - .map_err(|e| { - Error::PathParentLayerNotFound( - format!( - "could not get key for parent of subtree optional on tx: {}", - e - ) - ) - }) - ); - let Some(($root_key, $tree_type)) = element.root_key_and_tree_type_owned() else - { - return Err(Error::CorruptedData( - "parent is not a tree" - .to_owned(), - )).wrap_with_cost($cost); - }; - $($body)* - } else { - return Err(Error::CorruptedData( - "path is empty".to_owned(), - )).wrap_with_cost($cost); - } - } else { - let $storage = $db - .get_storage_context($path.clone(), $batch).unwrap_add_cost(&mut $cost); - if let Some((parent_path, parent_key)) = $path.derive_parent() { - let parent_storage = $db.get_storage_context( - parent_path, $batch - ).unwrap_add_cost(&mut $cost); - let element = cost_return_on_error!( - &mut $cost, - Element::get_from_storage(&parent_storage, parent_key, $grove_version).map_err(|e| { - Error::PathParentLayerNotFound( - format!( - "could not get key for parent of subtree optional no tx: {}", - e - ) - ) - }) - ); - let Some(($root_key, $tree_type)) = element.root_key_and_tree_type_owned() else - { - return Err(Error::CorruptedData( - "parent is not a tree" - .to_owned(), - )).wrap_with_cost($cost); - }; - $($body)* - } else { - return Err(Error::CorruptedData( - "path is empty".to_owned(), - )).wrap_with_cost($cost); - } - } - } - }; -} +use grovedb_storage::Storage; -/// Macro to execute same piece of code on different storage contexts -/// (transactional or not) using path argument. -macro_rules! storage_context_with_parent_optional_tx_internal_error { - ( - &mut $cost:ident, - $db:expr, - $path:expr, - $batch:expr, - $transaction:ident, - $storage:ident, - $root_key:ident, - $tree_type:ident, - $grove_version:ident, - { $($body:tt)* } - ) => { - { - use ::grovedb_storage::Storage; - if let Some(tx) = $transaction { - let $storage = $db - .get_transactional_storage_context($path.clone(), $batch, tx) - .unwrap_add_cost(&mut $cost); - if let Some((parent_path, parent_key)) = $path.derive_parent() { - let parent_storage = $db - .get_transactional_storage_context(parent_path, $batch, tx) - .unwrap_add_cost(&mut $cost); - let result = Element::get_from_storage( - &parent_storage, - parent_key, - $grove_version - ).map_err(|e| { - Error::PathParentLayerNotFound( - format!( - "could not get key for parent of subtree optional on tx: {}", - e - ) - ) - }).unwrap_add_cost(&mut $cost); - match result { - Ok(element) => { - let Some(($root_key, $tree_type)) - = element.root_key_and_tree_type_owned() else - { - return Err(Error::CorruptedData( - "parent is not a tree" - .to_owned(), - )).wrap_with_cost($cost); - }; - $($body)* - }, - Err(e) => Err(e), - } - } else { - return Err(Error::CorruptedData( - "path is empty".to_owned(), - )).wrap_with_cost($cost); - } - } else { - let $storage = $db - .get_storage_context($path.clone(), $batch).unwrap_add_cost(&mut $cost); - if let Some((parent_path, parent_key)) = $path.derive_parent() { - let parent_storage = $db.get_storage_context( - parent_path, - $batch - ).unwrap_add_cost(&mut $cost); - let result = Element::get_from_storage( - &parent_storage, - parent_key, - $grove_version - ).map_err(|e| { - Error::PathParentLayerNotFound( - format!( - "could not get key for parent of subtree optional no tx: {}", - e - ) - ) - }).unwrap_add_cost(&mut $cost); - match result { - Ok(element) => { - let Some(($root_key, $tree_type)) - = element.root_key_and_tree_type_owned() else - { - return Err(Error::CorruptedData( - "parent is not a tree" - .to_owned(), - )).wrap_with_cost($cost); - }; - $($body)* - }, - Err(e) => Err(e), - } - } else { - return Err(Error::CorruptedData( - "path is empty".to_owned(), - )).wrap_with_cost($cost); - } - } - } - }; -} +use crate::{Error, RocksDbStorage, Transaction, TransactionArg}; -/// Macro to execute same piece of code on different storage contexts with -/// empty prefix. -macro_rules! meta_storage_context_optional_tx { - ($db:expr, $batch:expr, $transaction:ident, $storage:ident, { $($body:tt)* }) => { - { - use ::grovedb_storage::Storage; - if let Some(tx) = $transaction { - let $storage = $db - .get_transactional_storage_context( - ::grovedb_path::SubtreePath::empty(), - $batch, - tx - ); - $($body)* - } else { - let $storage = $db - .get_storage_context( - ::grovedb_path::SubtreePath::empty(), - $batch - ); - $($body)* - } - } - }; +pub(crate) enum TxRef<'a, 'db: 'a> { + Owned(Transaction<'db>), + Borrowed(&'a Transaction<'db>), } -/// Macro to execute same piece of code on Merk with varying storage -/// contexts. -macro_rules! merk_optional_tx { - ( - &mut $cost:ident, - $db:expr, - $path:expr, - $batch:expr, - $transaction:ident, - $subtree:ident, - $grove_version:ident, - { $($body:tt)* } - ) => { - if $path.is_root() { - use crate::util::storage_context_optional_tx; - storage_context_optional_tx!( - $db, - ::grovedb_path::SubtreePath::empty(), - $batch, - $transaction, - storage, - { - let $subtree = cost_return_on_error!( - &mut $cost, - ::grovedb_merk::Merk::open_base( - storage.unwrap_add_cost(&mut $cost), - TreeType::NormalTree, - Some(&Element::value_defined_cost_for_serialized_value), - $grove_version, - ).map(|merk_res| - merk_res - .map_err(|_| crate::Error::CorruptedData( - "cannot open a subtree".to_owned() - )) - ) - ); - $($body)* - }) - } else { - use crate::util::storage_context_with_parent_optional_tx; - storage_context_with_parent_optional_tx!( - &mut $cost, - $db, - $path, - $batch, - $transaction, - storage, - root_key, - tree_type, - $grove_version, - { - #[allow(unused_mut)] - let mut $subtree = cost_return_on_error!( - &mut $cost, - ::grovedb_merk::Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - $grove_version, - ).map(|merk_res| - merk_res - .map_err(|_| crate::Error::CorruptedData( - "cannot open a subtree".to_owned() - )) - ) - ); - $($body)* - } - ) - } - }; -} - -/// Macro to execute same piece of code on Merk with varying storage -/// contexts. -macro_rules! merk_optional_tx_internal_error { - ( - &mut $cost:ident, - $db:expr, - $path:expr, - $batch:expr, - $transaction:ident, - $subtree:ident, - $grove_version:ident, - { $($body:tt)* } - ) => { - if $path.is_root() { - use crate::util::storage_context_optional_tx; - storage_context_optional_tx!( - $db, - ::grovedb_path::SubtreePath::empty(), - $batch, - $transaction, - storage, - { - let $subtree = cost_return_on_error!( - &mut $cost, - ::grovedb_merk::Merk::open_base( - storage.unwrap_add_cost(&mut $cost), - TreeType::NormalTree, - Some(&Element::value_defined_cost_for_serialized_value), - $grove_version - ).map(|merk_res| - merk_res - .map_err(|_| crate::Error::CorruptedData( - "cannot open a subtree".to_owned() - )) - ) - ); - $($body)* - }) - } else { - use crate::util::storage_context_with_parent_optional_tx_internal_error; - storage_context_with_parent_optional_tx_internal_error!( - &mut $cost, - $db, - $path, - $batch, - $transaction, - storage, - root_key, - tree_type, - $grove_version, - { - #[allow(unused_mut)] - let mut $subtree = cost_return_on_error!( - &mut $cost, - ::grovedb_merk::Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - $grove_version, - ).map(|merk_res| - merk_res - .map_err(|_| crate::Error::CorruptedData( - "cannot open a subtree".to_owned() - )) - ) - ); - $($body)* - } - ) - } - }; -} +impl<'a, 'db> TxRef<'a, 'db> { + pub(crate) fn new(db: &'db RocksDbStorage, transaction_arg: TransactionArg<'db, 'a>) -> Self { + if let Some(tx) = transaction_arg { + Self::Borrowed(tx) + } else { + Self::Owned(db.start_transaction()) + } + } -/// Macro to execute same piece of code on Merk with varying storage -/// contexts. -macro_rules! merk_optional_tx_path_not_empty { - ( - &mut $cost:ident, - $db:expr, - $path:expr, - $batch:expr, - $transaction:ident, - $subtree:ident, - $grove_version:ident, - { $($body:tt)* } - ) => { - { - use crate::util::storage_context_with_parent_optional_tx; - storage_context_with_parent_optional_tx!( - &mut $cost, - $db, - $path, - $batch, - $transaction, - storage, - root_key, - tree_type, - $grove_version, - { - #[allow(unused_mut)] - let mut $subtree = cost_return_on_error!( - &mut $cost, - ::grovedb_merk::Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - $grove_version, - ).map(|merk_res| - merk_res - .map_err(|_| crate::Error::CorruptedData( - "cannot open a subtree".to_owned() - )) - ) - ); - $($body)* - } - ) + /// Commit the transaction if it wasn't received from outside + pub(crate) fn commit_local(self) -> Result<(), Error> { + match self { + TxRef::Owned(tx) => tx + .commit() + .map_err(|e| grovedb_storage::Error::from(e).into()), + TxRef::Borrowed(_) => Ok(()), + } } - }; } -/// Macro to execute same piece of code on Merk with varying storage -/// contexts. -macro_rules! root_merk_optional_tx { - ( - &mut $cost:ident, - $db:expr, - $batch:expr, - $transaction:ident, - $subtree:ident, - $grove_version:ident, - { $($body:tt)* } - ) => { - { - use crate::util::storage_context_optional_tx; - storage_context_optional_tx!( - $db, - ::grovedb_path::SubtreePath::empty(), - $batch, - $transaction, - storage, - { - let $subtree = cost_return_on_error!( - &mut $cost, - ::grovedb_merk::Merk::open_base( - storage.unwrap_add_cost(&mut $cost), - TreeType::NormalTree, - Some(&Element::value_defined_cost_for_serialized_value), - $grove_version, - ).map(|merk_res| - merk_res - .map_err(|_| crate::Error::CorruptedData( - "cannot open a subtree".to_owned() - )) - ) - ); - $($body)* - }) +impl<'db> AsRef> for TxRef<'_, 'db> { + fn as_ref(&self) -> &Transaction<'db> { + match self { + TxRef::Owned(tx) => tx, + TxRef::Borrowed(tx) => tx, } - }; + } } - -pub(crate) use merk_optional_tx; -pub(crate) use merk_optional_tx_internal_error; -pub(crate) use merk_optional_tx_path_not_empty; -pub(crate) use meta_storage_context_optional_tx; -pub(crate) use root_merk_optional_tx; -pub(crate) use storage_context_optional_tx; -pub(crate) use storage_context_with_parent_optional_tx; -pub(crate) use storage_context_with_parent_optional_tx_internal_error; diff --git a/grovedb/src/util/compat.rs b/grovedb/src/util/compat.rs new file mode 100644 index 000000000..6134e61cc --- /dev/null +++ b/grovedb/src/util/compat.rs @@ -0,0 +1,131 @@ +use grovedb_costs::{cost_return_on_error, CostResult, CostsExt}; +use grovedb_merk::{Merk, TreeType}; +use grovedb_path::SubtreePath; +use grovedb_storage::{ + rocksdb_storage::{PrefixedRocksDbTransactionContext, RocksDbStorage}, + Storage, StorageBatch, +}; +use grovedb_version::version::GroveVersion; + +use crate::{Element, Error, Transaction}; + +pub(crate) trait OpenMerkErrorsCompat { + fn parent_key_not_found<'b, B: AsRef<[u8]>>( + e: Error, + parent_path: SubtreePath<'b, B>, + parent_key: &[u8], + ) -> Error; + + fn open_base_error() -> Error; + + fn parent_must_be_tree() -> Error; +} + +pub(crate) fn open_merk<'db, 'b, B, C: OpenMerkErrorsCompat>( + db: &'db RocksDbStorage, + path: SubtreePath<'b, B>, + tx: &'db Transaction, + batch: Option<&'db StorageBatch>, + grove_version: &GroveVersion, +) -> CostResult>, Error> +where + B: AsRef<[u8]> + 'b, +{ + let mut cost = Default::default(); + + let storage = db + .get_transactional_storage_context(path.clone(), batch, tx) + .unwrap_add_cost(&mut cost); + if let Some((parent_path, parent_key)) = path.derive_parent() { + let parent_storage = db + .get_transactional_storage_context(parent_path.clone(), batch, tx) + .unwrap_add_cost(&mut cost); + let element = cost_return_on_error!( + &mut cost, + Element::get_from_storage(&parent_storage, parent_key, grove_version) + .map_err(|e| C::parent_key_not_found(e, parent_path, parent_key)) + ); + if let Some((root_key, tree_type)) = element.root_key_and_tree_type_owned() { + Merk::open_layered_with_root_key( + storage, + root_key, + tree_type, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| C::parent_must_be_tree()) + .add_cost(cost) + } else { + Err(Error::CorruptedPath( + "cannot open a subtree as parent exists but is not a tree".to_string(), + )) + .wrap_with_cost(cost) + } + } else { + Merk::open_base( + storage, + TreeType::NormalTree, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| C::open_base_error()) + .add_cost(cost) + } +} + +/// Opens a subtree with errors returned compatible to now removed +/// `merk_optional_tx!` macro. +pub(crate) fn merk_optional_tx<'db, 'b, B>( + db: &'db RocksDbStorage, + path: SubtreePath<'b, B>, + tx: &'db Transaction, + batch: Option<&'db StorageBatch>, + grove_version: &GroveVersion, +) -> CostResult>, Error> +where + B: AsRef<[u8]> + 'b, +{ + struct Compat; + + impl OpenMerkErrorsCompat for Compat { + fn parent_key_not_found<'b, B: AsRef<[u8]>>( + e: Error, + _parent_path: SubtreePath<'b, B>, + _parent_key: &[u8], + ) -> Error { + Error::PathParentLayerNotFound(format!( + "could not get key for parent of subtree optional on tx: {}", + e + )) + } + + fn open_base_error() -> Error { + Error::CorruptedData("cannot open a subtree".to_owned()) + } + + fn parent_must_be_tree() -> Error { + Error::CorruptedData("parent is not a tree".to_owned()) + } + } + + open_merk::<_, Compat>(db, path, tx, batch, grove_version) +} + +/// Opens a subtree with errors returned compatible to now removed +/// `merk_optional_tx_path_not_empty!` macro. +pub(crate) fn merk_optional_tx_path_not_empty<'db, 'b, B>( + db: &'db RocksDbStorage, + path: SubtreePath<'b, B>, + tx: &'db Transaction, + batch: Option<&'db StorageBatch>, + grove_version: &GroveVersion, +) -> CostResult>, Error> +where + B: AsRef<[u8]> + 'b, +{ + if path.is_root() { + Err(Error::CorruptedData("path is empty".to_owned())).wrap_with_cost(Default::default()) + } else { + merk_optional_tx(db, path, tx, batch, grove_version) + } +} diff --git a/grovedb/src/visualize.rs b/grovedb/src/visualize.rs index 8fdccc7e0..fbf79ee53 100644 --- a/grovedb/src/visualize.rs +++ b/grovedb/src/visualize.rs @@ -36,13 +36,12 @@ use bincode::{ }; use grovedb_merk::{Merk, VisualizeableMerk}; use grovedb_path::SubtreePathBuilder; -use grovedb_storage::StorageContext; +use grovedb_storage::{Storage, StorageContext}; use grovedb_version::version::GroveVersion; use grovedb_visualize::{visualize_stdout, Drawer, Visualize}; use crate::{ - element::Element, reference_path::ReferencePathType, util::storage_context_optional_tx, - GroveDb, TransactionArg, + element::Element, reference_path::ReferencePathType, util::TxRef, GroveDb, TransactionArg, }; impl Visualize for Element { @@ -225,35 +224,40 @@ impl GroveDb { ) -> Result> { drawer.down(); - storage_context_optional_tx!(self.db, (&path).into(), None, transaction, storage, { - let mut iter = Element::iterator(storage.unwrap().raw_iter()).unwrap(); - while let Some((key, element)) = iter - .next_element(grove_version) - .unwrap() - .expect("cannot get next element") - { - drawer.write(b"\n[key: ")?; - drawer = key.visualize(drawer)?; - drawer.write(b" ")?; - match element { - Element::Tree(..) => { - drawer.write(b"Merk root is: ")?; - drawer = element.visualize(drawer)?; - drawer.down(); - drawer = self.draw_subtree( - drawer, - path.derive_owned_with_child(key), - transaction, - grove_version, - )?; - drawer.up(); - } - other => { - drawer = other.visualize(drawer)?; - } + let tx = TxRef::new(&self.db, transaction); + + let storage = self + .db + .get_transactional_storage_context((&path).into(), None, tx.as_ref()) + .unwrap(); + + let mut iter = Element::iterator(storage.raw_iter()).unwrap(); + while let Some((key, element)) = iter + .next_element(grove_version) + .unwrap() + .expect("cannot get next element") + { + drawer.write(b"\n[key: ")?; + drawer = key.visualize(drawer)?; + drawer.write(b" ")?; + match element { + Element::Tree(..) => { + drawer.write(b"Merk root is: ")?; + drawer = element.visualize(drawer)?; + drawer.down(); + drawer = self.draw_subtree( + drawer, + path.derive_owned_with_child(key), + transaction, + grove_version, + )?; + drawer.up(); + } + other => { + drawer = other.visualize(drawer)?; } } - }); + } drawer.up(); Ok(drawer) diff --git a/merk/src/merk/meta.rs b/merk/src/merk/meta.rs new file mode 100644 index 000000000..a51b7acff --- /dev/null +++ b/merk/src/merk/meta.rs @@ -0,0 +1,111 @@ +//! Metadata access for Merk trees + +use std::collections::hash_map::Entry; + +use grovedb_costs::{CostResult, CostsExt}; +use grovedb_storage::StorageContext; + +use super::Merk; +use crate::Error; + +impl<'db, S: StorageContext<'db>> Merk { + /// Get metadata for the Merk under `key`. + pub fn get_meta(&mut self, key: Vec) -> CostResult, Error> { + match self.meta_cache.entry(key) { + Entry::Occupied(e) => Ok(e.into_mut().as_deref()).wrap_with_cost(Default::default()), + Entry::Vacant(e) => self + .storage + .get_meta(e.key()) + .map_ok(|b| e.insert(b).as_deref()) + .map_err(Error::StorageError), + } + } + + /// Set metadata under `key`. This doesn't affect the state (root hash). + pub fn put_meta(&mut self, key: Vec, value: Vec) -> CostResult<(), Error> { + self.storage + .put_meta(&key, &value, None) + .map_ok(|_| { + self.meta_cache.insert(key, Some(value)); + }) + .map_err(Error::StorageError) + } + + /// Delete metadata under `key`. + pub fn delete_meta(&mut self, key: &[u8]) -> CostResult<(), Error> { + self.storage + .delete_meta(key, None) + .map_ok(|_| { + self.meta_cache.remove(key); + }) + .map_err(Error::StorageError) + } +} + +#[cfg(test)] +mod tests { + use grovedb_costs::OperationCost; + use grovedb_version::version::GroveVersion; + + use crate::test_utils::TempMerk; + + #[test] + fn meta_storage_data_retrieval() { + let version = GroveVersion::latest(); + let mut merk = TempMerk::new(&version); + + merk.put_meta(b"key".to_vec(), b"value".to_vec()) + .unwrap() + .unwrap(); + + let mut cost: OperationCost = Default::default(); + assert_eq!( + merk.get_meta(b"key".to_vec()) + .unwrap_add_cost(&mut cost) + .unwrap(), + Some(b"value".as_slice()) + ); + assert!(cost.is_nothing()); + } + + #[test] + fn meta_storage_works_uncommited() { + let version = GroveVersion::latest(); + let mut merk = TempMerk::new(&version); + + let mut cost_1: OperationCost = Default::default(); + assert!(merk + .get_meta(b"key".to_vec()) + .unwrap_add_cost(&mut cost_1) + .unwrap() + .is_none()); + assert!(!cost_1.is_nothing()); + + let mut cost_2: OperationCost = Default::default(); + assert!(merk + .get_meta(b"key".to_vec()) + .unwrap_add_cost(&mut cost_2) + .unwrap() + .is_none()); + assert!(cost_2.is_nothing()); + } + + #[test] + fn meta_storage_deletion() { + let version = GroveVersion::latest(); + let mut merk = TempMerk::new(&version); + + merk.put_meta(b"key".to_vec(), b"value".to_vec()) + .unwrap() + .unwrap(); + + assert_eq!( + merk.get_meta(b"key".to_vec()).unwrap().unwrap(), + Some(b"value".as_slice()) + ); + + merk.delete_meta(b"key").unwrap().unwrap(); + + assert!(merk.get_meta(b"key".to_vec()).unwrap().unwrap().is_none()); + } +} diff --git a/merk/src/merk/mod.rs b/merk/src/merk/mod.rs index 27d679c12..39eef077f 100644 --- a/merk/src/merk/mod.rs +++ b/merk/src/merk/mod.rs @@ -412,7 +412,7 @@ where // update pointer to root node cost_return_on_error_no_add!( - &inner_cost, + inner_cost, batch .put_root(ROOT_KEY_KEY, tree_key, costs) .map_err(CostsError) @@ -448,7 +448,7 @@ where for (key, maybe_sum_tree_cost, maybe_value, storage_cost) in to_batch { if let Some((value, left_size, right_size)) = maybe_value { cost_return_on_error_no_add!( - &cost, + cost, batch .put( &key, @@ -466,7 +466,7 @@ where for (key, value, storage_cost) in aux { match value { Op::Put(value, ..) => cost_return_on_error_no_add!( - &cost, + cost, batch .put_aux(key, value, storage_cost.clone()) .map_err(CostsError) @@ -474,7 +474,7 @@ where Op::Delete => batch.delete_aux(key, storage_cost.clone()), _ => { cost_return_on_error_no_add!( - &cost, + cost, Err(Error::InvalidOperation( "only put and delete allowed for aux storage" )) @@ -795,7 +795,7 @@ mod test { use grovedb_path::SubtreePath; use grovedb_storage::{ - rocksdb_storage::{PrefixedRocksDbStorageContext, RocksDbStorage}, + rocksdb_storage::{PrefixedRocksDbTransactionContext, RocksDbStorage}, RawIterator, Storage, StorageBatch, StorageContext, }; use grovedb_version::version::GroveVersion; @@ -1027,9 +1027,11 @@ mod test { let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); + let transaction = storage.start_transaction(); + let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1053,9 +1055,11 @@ mod test { let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); + let transaction = storage.start_transaction(); + let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1074,7 +1078,7 @@ mod test { fn reopen() { let grove_version = GroveVersion::latest(); fn collect( - mut node: RefWalker>, + mut node: RefWalker>, nodes: &mut Vec>, ) { let grove_version = GroveVersion::latest(); @@ -1109,9 +1113,15 @@ mod test { let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) + .get_transactional_storage_context( + SubtreePath::empty(), + Some(&batch), + &transaction, + ) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1125,12 +1135,13 @@ mod test { .unwrap(); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .expect("cannot commit batch"); + let merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1144,14 +1155,19 @@ mod test { let mut nodes = vec![]; collect(walker, &mut nodes); + + storage.commit_transaction(transaction).unwrap().unwrap(); + nodes }; let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); + let transaction = storage.start_transaction(); + let merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1169,7 +1185,7 @@ mod test { } type PrefixedStorageIter<'db, 'ctx> = - &'ctx mut as StorageContext<'db>>::RawIterator; + &'ctx mut as StorageContext<'db>>::RawIterator; #[test] fn reopen_iter() { @@ -1189,9 +1205,15 @@ mod test { let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) + .get_transactional_storage_context( + SubtreePath::empty(), + Some(&batch), + &transaction, + ) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1205,14 +1227,14 @@ mod test { .unwrap(); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .expect("cannot commit batch"); let mut nodes = vec![]; let merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1221,13 +1243,18 @@ mod test { .unwrap() .expect("cannot open merk"); collect(&mut merk.storage.raw_iter(), &mut nodes); + + storage.commit_transaction(transaction).unwrap().unwrap(); + nodes }; + let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); + let transaction = storage.start_transaction(); let merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1249,9 +1276,11 @@ mod test { let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) + .get_transactional_storage_context(SubtreePath::empty(), Some(&batch), &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1309,13 +1338,13 @@ mod test { assert_eq!(result, Some(b"b".to_vec())); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .expect("cannot commit batch"); let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, diff --git a/merk/src/merk/open.rs b/merk/src/merk/open.rs index a3d4c16e8..5f075157b 100644 --- a/merk/src/merk/open.rs +++ b/merk/src/merk/open.rs @@ -113,12 +113,18 @@ mod test { let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); + let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let test_prefix = [b"ayy"]; - let batch = StorageBatch::new(); let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::from(test_prefix.as_ref()), Some(&batch)) + .get_transactional_storage_context( + SubtreePath::from(test_prefix.as_ref()), + Some(&batch), + &transaction, + ) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -139,13 +145,17 @@ mod test { let root_hash = merk.root_hash(); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .expect("cannot commit batch"); let merk = Merk::open_base( storage - .get_storage_context(SubtreePath::from(test_prefix.as_ref()), None) + .get_transactional_storage_context( + SubtreePath::from(test_prefix.as_ref()), + None, + &transaction, + ) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -161,10 +171,11 @@ mod test { let grove_version = GroveVersion::latest(); let storage = TempStorage::new(); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); let merk_fee_context = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) + .get_transactional_storage_context(SubtreePath::empty(), Some(&batch), &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -188,13 +199,13 @@ mod test { .expect("apply failed"); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .expect("cannot commit batch"); let merk_fee_context = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, diff --git a/merk/src/merk/restore.rs b/merk/src/merk/restore.rs index 0c1784fd9..021504105 100644 --- a/merk/src/merk/restore.rs +++ b/merk/src/merk/restore.rs @@ -555,7 +555,7 @@ mod tests { use grovedb_storage::{ rocksdb_storage::{ test_utils::TempStorage, PrefixedRocksDbImmediateStorageContext, - PrefixedRocksDbStorageContext, + PrefixedRocksDbTransactionContext, }, RawIterator, Storage, }; @@ -581,7 +581,7 @@ mod tests { Op::Push(Node::KV(vec![3], vec![3])), Op::Parent, ]; - assert!(Restorer::::verify_chunk( + assert!(Restorer::::verify_chunk( non_avl_tree_proof, &[0; 32], &None @@ -593,7 +593,7 @@ mod tests { fn test_chunk_verification_only_kv_feature_and_hash() { // should not accept kv let invalid_chunk_proof = vec![Op::Push(Node::KV(vec![1], vec![1]))]; - let verification_result = Restorer::::verify_chunk( + let verification_result = Restorer::::verify_chunk( invalid_chunk_proof, &[0; 32], &None, @@ -607,7 +607,7 @@ mod tests { // should not accept kvhash let invalid_chunk_proof = vec![Op::Push(Node::KVHash([0; 32]))]; - let verification_result = Restorer::::verify_chunk( + let verification_result = Restorer::::verify_chunk( invalid_chunk_proof, &[0; 32], &None, @@ -621,7 +621,7 @@ mod tests { // should not accept kvdigest let invalid_chunk_proof = vec![Op::Push(Node::KVDigest(vec![0], [0; 32]))]; - let verification_result = Restorer::::verify_chunk( + let verification_result = Restorer::::verify_chunk( invalid_chunk_proof, &[0; 32], &None, @@ -635,7 +635,7 @@ mod tests { // should not accept kvvaluehash let invalid_chunk_proof = vec![Op::Push(Node::KVValueHash(vec![0], vec![0], [0; 32]))]; - let verification_result = Restorer::::verify_chunk( + let verification_result = Restorer::::verify_chunk( invalid_chunk_proof, &[0; 32], &None, @@ -649,7 +649,7 @@ mod tests { // should not accept kvrefvaluehash let invalid_chunk_proof = vec![Op::Push(Node::KVRefValueHash(vec![0], vec![0], [0; 32]))]; - let verification_result = Restorer::::verify_chunk( + let verification_result = Restorer::::verify_chunk( invalid_chunk_proof, &[0; 32], &None, diff --git a/merk/src/merk/tree_type.rs b/merk/src/merk/tree_type.rs new file mode 100644 index 000000000..ef845f21a --- /dev/null +++ b/merk/src/merk/tree_type.rs @@ -0,0 +1,78 @@ +use std::fmt; + +use crate::{merk::NodeType, Error, TreeFeatureType}; + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] +pub enum MaybeTree { + Tree(TreeType), + NotTree, +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] +pub enum TreeType { + NormalTree = 0, + SumTree = 1, + BigSumTree = 2, + CountTree = 3, + CountSumTree = 4, +} + +impl TryFrom for TreeType { + type Error = Error; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(TreeType::NormalTree), + 1 => Ok(TreeType::SumTree), + 2 => Ok(TreeType::BigSumTree), + 3 => Ok(TreeType::CountTree), + 4 => Ok(TreeType::CountSumTree), + n => Err(Error::UnknownTreeType(format!("got {}, max is 4", n))), // Error handling + } + } +} + +impl fmt::Display for TreeType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match *self { + TreeType::NormalTree => "Normal Tree", + TreeType::SumTree => "Sum Tree", + TreeType::BigSumTree => "Big Sum Tree", + TreeType::CountTree => "Count Tree", + TreeType::CountSumTree => "Count Sum Tree", + }; + write!(f, "{}", s) + } +} + +impl TreeType { + pub fn allows_sum_item(&self) -> bool { + match self { + TreeType::NormalTree => false, + TreeType::SumTree => true, + TreeType::BigSumTree => true, + TreeType::CountTree => false, + TreeType::CountSumTree => true, + } + } + + pub const fn inner_node_type(&self) -> NodeType { + match self { + TreeType::NormalTree => NodeType::NormalNode, + TreeType::SumTree => NodeType::SumNode, + TreeType::BigSumTree => NodeType::BigSumNode, + TreeType::CountTree => NodeType::CountNode, + TreeType::CountSumTree => NodeType::CountSumNode, + } + } + + pub fn empty_tree_feature_type(&self) -> TreeFeatureType { + match self { + TreeType::NormalTree => TreeFeatureType::BasicMerkNode, + TreeType::SumTree => TreeFeatureType::SummedMerkNode(0), + TreeType::BigSumTree => TreeFeatureType::BigSummedMerkNode(0), + TreeType::CountTree => TreeFeatureType::CountedMerkNode(0), + TreeType::CountSumTree => TreeFeatureType::CountedSummedMerkNode(0, 0), + } + } +} diff --git a/merk/src/proofs/tree.rs b/merk/src/proofs/tree.rs index dafd09aaf..8a378bd14 100644 --- a/merk/src/proofs/tree.rs +++ b/merk/src/proofs/tree.rs @@ -374,11 +374,11 @@ where } for op in ops { - match cost_return_on_error_no_add!(&cost, op) { + match cost_return_on_error_no_add!(cost, op) { Op::Parent => { let (mut parent, child) = ( - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), ); cost_return_on_error!( &mut cost, @@ -395,8 +395,8 @@ where } Op::Child => { let (child, mut parent) = ( - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), ); cost_return_on_error!( &mut cost, @@ -413,8 +413,8 @@ where } Op::ParentInverted => { let (mut parent, child) = ( - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), ); cost_return_on_error!( &mut cost, @@ -431,8 +431,8 @@ where } Op::ChildInverted => { let (child, mut parent) = ( - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), ); cost_return_on_error!( &mut cost, @@ -465,7 +465,7 @@ where maybe_last_key = Some(key.clone()); } - cost_return_on_error_no_add!(&cost, visit_node(&node)); + cost_return_on_error_no_add!(cost, visit_node(&node)); let tree: Tree = node.into(); stack.push(tree); @@ -488,7 +488,7 @@ where maybe_last_key = Some(key.clone()); } - cost_return_on_error_no_add!(&cost, visit_node(&node)); + cost_return_on_error_no_add!(cost, visit_node(&node)); let tree: Tree = node.into(); stack.push(tree); diff --git a/merk/src/test_utils/mod.rs b/merk/src/test_utils/mod.rs index 76eec9480..0f7f93ae9 100644 --- a/merk/src/test_utils/mod.rs +++ b/merk/src/test_utils/mod.rs @@ -311,15 +311,16 @@ pub fn make_tree_seq_with_start_key( /// Shortcut to open a Merk with a provided storage and batch pub fn empty_path_merk<'db, S>( storage: &'db S, + transaction: &'db >::Transaction, batch: &'db StorageBatch, grove_version: &GroveVersion, -) -> Merk<>::BatchStorageContext> +) -> Merk<>::BatchTransactionalStorageContext> where S: Storage<'db>, { Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), Some(batch)) + .get_transactional_storage_context(SubtreePath::empty(), Some(batch), transaction) .unwrap(), TreeType::NormalTree, None:: Option>, @@ -332,14 +333,15 @@ where /// Shortcut to open a Merk for read only pub fn empty_path_merk_read_only<'db, S>( storage: &'db S, + transaction: &'db >::Transaction, grove_version: &GroveVersion, -) -> Merk<>::BatchStorageContext> +) -> Merk<>::BatchTransactionalStorageContext> where S: Storage<'db>, { Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None:: Option>, diff --git a/merk/src/test_utils/temp_merk.rs b/merk/src/test_utils/temp_merk.rs index a9b3b26e3..dafd81d09 100644 --- a/merk/src/test_utils/temp_merk.rs +++ b/merk/src/test_utils/temp_merk.rs @@ -28,40 +28,42 @@ //! Temp merk test utils -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] use std::ops::{Deref, DerefMut}; use grovedb_path::SubtreePath; -use grovedb_storage::StorageBatch; -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] +use grovedb_storage::{rocksdb_storage::test_utils::TempStorage, Storage}; use grovedb_storage::{ - rocksdb_storage::{test_utils::TempStorage, PrefixedRocksDbStorageContext}, - Storage, + rocksdb_storage::{PrefixedRocksDbTransactionContext, RocksDbStorage}, + StorageBatch, }; use grovedb_version::version::GroveVersion; -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] use crate::Merk; -use crate::{tree::kv::ValueDefinedCostType, tree_type::TreeType}; +use crate::{tree::kv::ValueDefinedCostType, TreeType}; -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] /// Wraps a Merk instance and deletes it from disk it once it goes out of scope. pub struct TempMerk { storage: &'static TempStorage, batch: &'static StorageBatch, - merk: Merk>, + merk: Merk>, + tx: &'static >::Transaction, } -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] impl TempMerk { /// Opens a `TempMerk` at the given file path, creating a new one if it /// does not exist. pub fn new(grove_version: &GroveVersion) -> Self { let storage = Box::leak(Box::new(TempStorage::new())); let batch = Box::leak(Box::new(StorageBatch::new())); + let tx = Box::leak(Box::new(storage.start_transaction())); let context = storage - .get_storage_context(SubtreePath::empty(), Some(batch)) + .get_transactional_storage_context(SubtreePath::empty(), Some(batch), tx) .unwrap(); let merk = Merk::open_base( @@ -76,20 +78,32 @@ impl TempMerk { storage, merk, batch, + tx, } } /// Commits pending batch operations. pub fn commit(&mut self, grove_version: &GroveVersion) { - let batch = unsafe { Box::from_raw(self.batch as *const _ as *mut StorageBatch) }; + let batch: Box = + unsafe { Box::from_raw(self.batch as *const _ as *mut StorageBatch) }; + let tx: Box<>::Transaction> = unsafe { + Box::from_raw( + self.tx as *const _ as *mut >::Transaction, + ) + }; self.storage - .commit_multi_context_batch(*batch, None) + .commit_multi_context_batch(*batch, Some(self.tx)) .unwrap() .expect("unable to commit batch"); + self.storage + .commit_transaction(*tx) + .unwrap() + .expect("unable to commit transaction"); self.batch = Box::leak(Box::new(StorageBatch::new())); + self.tx = Box::leak(Box::new(self.storage.start_transaction())); let context = self .storage - .get_storage_context(SubtreePath::empty(), Some(self.batch)) + .get_transactional_storage_context(SubtreePath::empty(), Some(self.batch), self.tx) .unwrap(); self.merk = Merk::open_base( context, @@ -102,36 +116,42 @@ impl TempMerk { } } -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] impl Drop for TempMerk { fn drop(&mut self) { unsafe { let batch = Box::from_raw(self.batch as *const _ as *mut StorageBatch); - let _ = self.storage.commit_multi_context_batch(*batch, None); + + let tx: Box<>::Transaction> = Box::from_raw( + self.tx as *const _ as *mut >::Transaction, + ); + + let _ = self.storage.commit_multi_context_batch(*batch, Some(&tx)); + let _ = self.storage.commit_transaction(*tx).unwrap(); drop(Box::from_raw(self.storage as *const _ as *mut TempStorage)); } } } -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] impl Default for TempMerk { fn default() -> Self { Self::new(GroveVersion::latest()) } } -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] impl Deref for TempMerk { - type Target = Merk>; + type Target = Merk>; - fn deref(&self) -> &Merk> { + fn deref(&self) -> &Merk> { &self.merk } } -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] impl DerefMut for TempMerk { - fn deref_mut(&mut self) -> &mut Merk> { + fn deref_mut(&mut self) -> &mut Merk> { &mut self.merk } } diff --git a/merk/src/tree/encoding.rs b/merk/src/tree/encoding.rs index 2ef07cf5a..176f8c3be 100644 --- a/merk/src/tree/encoding.rs +++ b/merk/src/tree/encoding.rs @@ -52,7 +52,7 @@ impl TreeNode { let tree_bytes = cost_return_on_error!(&mut cost, storage.get(&key).map_err(StorageError)); let tree_opt = cost_return_on_error_no_add!( - &cost, + cost, tree_bytes .map(|x| TreeNode::decode_raw( &x, diff --git a/merk/src/tree/mod.rs b/merk/src/tree/mod.rs index 460edbce4..4c4161bbd 100644 --- a/merk/src/tree/mod.rs +++ b/merk/src/tree/mod.rs @@ -809,7 +809,7 @@ impl TreeNode { // in this case there is a possibility that the client would want to update the // element flags based on the change of values cost_return_on_error_no_add!( - &cost, + cost, self.just_in_time_tree_node_value_update( old_specialized_cost, get_temp_new_value_with_old_flags, @@ -865,7 +865,7 @@ impl TreeNode { // in this case there is a possibility that the client would want to update the // element flags based on the change of values cost_return_on_error_no_add!( - &cost, + cost, self.just_in_time_tree_node_value_update( old_specialized_cost, get_temp_new_value_with_old_flags, @@ -919,7 +919,7 @@ impl TreeNode { // in this case there is a possibility that the client would want to update the // element flags based on the change of values cost_return_on_error_no_add!( - &cost, + cost, self.just_in_time_tree_node_value_update( old_specialized_cost, get_temp_new_value_with_old_flags, @@ -981,7 +981,7 @@ impl TreeNode { // in this case there is a possibility that the client would want to update the // element flags based on the change of values cost_return_on_error_no_add!( - &cost, + cost, self.just_in_time_tree_node_value_update( old_specialized_cost, get_temp_new_value_with_old_flags, @@ -1064,7 +1064,7 @@ impl TreeNode { } } - cost_return_on_error_no_add!(&cost, c.write(self, old_specialized_cost,)); + cost_return_on_error_no_add!(cost, c.write(self, old_specialized_cost,)); // println!("done committing {}", std::str::from_utf8(self.key()).unwrap()); diff --git a/merk/src/tree/ops.rs b/merk/src/tree/ops.rs index 2e2cf3fd6..6eed0138a 100644 --- a/merk/src/tree/ops.rs +++ b/merk/src/tree/ops.rs @@ -514,13 +514,13 @@ where Delete => self.tree().inner.kv.value_byte_cost_size(), DeleteLayered | DeleteLayeredMaybeSpecialized => { cost_return_on_error_no_add!( - &cost, + cost, old_specialized_cost(&key_vec, value) ) } DeleteMaybeSpecialized => { cost_return_on_error_no_add!( - &cost, + cost, old_specialized_cost(&key_vec, value) ) } @@ -534,7 +534,7 @@ where prefixed_key_len + prefixed_key_len.required_space() as u32; let value = self.tree().value_ref(); cost_return_on_error_no_add!( - &cost, + cost, section_removal_bytes(value, total_key_len, old_cost) ) }; diff --git a/merk/src/tree/walk/mod.rs b/merk/src/tree/walk/mod.rs index 834643a11..aebae47d3 100644 --- a/merk/src/tree/walk/mod.rs +++ b/merk/src/tree/walk/mod.rs @@ -230,7 +230,7 @@ where ) -> CostResult { let mut cost = OperationCost::default(); cost_return_on_error_no_add!( - &cost, + cost, self.tree.own_result(|t| t .put_value( value, @@ -275,7 +275,7 @@ where ) -> CostResult { let mut cost = OperationCost::default(); cost_return_on_error_no_add!( - &cost, + cost, self.tree.own_result(|t| t .put_value_with_fixed_cost( value, @@ -321,7 +321,7 @@ where ) -> CostResult { let mut cost = OperationCost::default(); cost_return_on_error_no_add!( - &cost, + cost, self.tree.own_result(|t| t .put_value_and_reference_value_hash( value, @@ -368,7 +368,7 @@ where ) -> CostResult { let mut cost = OperationCost::default(); cost_return_on_error_no_add!( - &cost, + cost, self.tree.own_result(|t| t .put_value_with_reference_value_hash_and_value_cost( value, diff --git a/path/Cargo.toml b/path/Cargo.toml index 91738bfe5..bd1f1679f 100644 --- a/path/Cargo.toml +++ b/path/Cargo.toml @@ -10,3 +10,4 @@ repository = "https://github.com/dashpay/grovedb" [dependencies] hex = "0.4.3" +itertools = "0.13.0" diff --git a/path/src/subtree_path.rs b/path/src/subtree_path.rs index ae8cd9000..48929a174 100644 --- a/path/src/subtree_path.rs +++ b/path/src/subtree_path.rs @@ -35,7 +35,8 @@ //! subtree paths and other path references if use as generic [Into]. use std::{ - fmt::{Display, Formatter}, + cmp, + fmt::{self, Display}, hash::{Hash, Hasher}, }; @@ -51,48 +52,31 @@ pub struct SubtreePath<'b, B> { pub(crate) ref_variant: SubtreePathInner<'b, B>, } -fn hex_to_ascii(hex_value: &[u8]) -> String { - // Define the set of allowed characters - const ALLOWED_CHARS: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ - abcdefghijklmnopqrstuvwxyz\ - 0123456789_-/\\[]@"; - - // Check if all characters in hex_value are allowed - if hex_value.iter().all(|&c| ALLOWED_CHARS.contains(&c)) { - // Try to convert to UTF-8 - String::from_utf8(hex_value.to_vec()) - .unwrap_or_else(|_| format!("0x{}", hex::encode(hex_value))) - } else { - // Hex encode and prepend "0x" - format!("0x{}", hex::encode(hex_value)) - } -} +impl> Display for SubtreePath<'_, B> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let path = self.to_vec(); -impl<'b, B: AsRef<[u8]>> Display for SubtreePath<'b, B> { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match &self.ref_variant { - SubtreePathInner::Slice(slice) => { - let ascii_path = slice - .iter() - .map(|e| hex_to_ascii(e.as_ref())) - .collect::>() - .join("/"); - write!(f, "{}", ascii_path) - } - SubtreePathInner::SubtreePath(subtree_path) => { - let ascii_path = subtree_path - .to_vec() - .into_iter() - .map(|a| hex_to_ascii(a.as_slice())) - .collect::>() - .join("/"); - write!(f, "{}", ascii_path) - } - SubtreePathInner::SubtreePathIter(iter) => { - let ascii_path = iter.clone().map(hex_to_ascii).collect::>().join("/"); - write!(f, "{}", ascii_path) + fn fmt_segment(s: impl AsRef<[u8]>) -> String { + let bytes = s.as_ref(); + let hex_str = hex::encode(bytes); + let utf8_str = String::from_utf8(bytes.to_vec()); + let mut result = format!("h:{hex_str}"); + if let Ok(s) = utf8_str { + result.push_str("/s:"); + result.push_str(&s); } + result } + + f.write_str("[")?; + + for s in itertools::intersperse(path.into_iter().map(fmt_segment), ", ".to_owned()) { + f.write_str(&s)?; + } + + f.write_str("]")?; + + Ok(()) } } @@ -114,7 +98,7 @@ pub(crate) enum SubtreePathInner<'b, B> { SubtreePathIter(SubtreePathIter<'b, B>), } -impl<'bl, 'br, BL, BR> PartialEq> for SubtreePath<'bl, BL> +impl<'br, BL, BR> PartialEq> for SubtreePath<'_, BL> where BL: AsRef<[u8]>, BR: AsRef<[u8]>, @@ -126,7 +110,78 @@ where } } -impl<'b, B: AsRef<[u8]>> Eq for SubtreePath<'b, B> {} +/// First and foremost, the order of subtree paths is dictated by their lengths. +/// Therefore, those subtrees closer to the root will come first. The rest it +/// can guarantee is to be free of false equality; however, seemingly unrelated +/// subtrees can come one after another if they share the same length, which was +/// (not) done for performance reasons. +impl<'br, BL, BR> PartialOrd> for SubtreePath<'_, BL> +where + BL: AsRef<[u8]>, + BR: AsRef<[u8]>, +{ + fn partial_cmp(&self, other: &SubtreePath<'br, BR>) -> Option { + let iter_a = self.clone().into_reverse_iter(); + let iter_b = other.clone().into_reverse_iter(); + + Some( + iter_a + .len() + .cmp(&iter_b.len()) + .reverse() + .then_with(|| iter_a.cmp(iter_b)), + ) + } +} + +impl<'br, BL, BR> PartialOrd> for SubtreePathBuilder<'_, BL> +where + BL: AsRef<[u8]>, + BR: AsRef<[u8]>, +{ + fn partial_cmp(&self, other: &SubtreePathBuilder<'br, BR>) -> Option { + let iter_a = self.reverse_iter(); + let iter_b = other.reverse_iter(); + + Some( + iter_a + .len() + .cmp(&iter_b.len()) + .reverse() + .then_with(|| iter_a.cmp(iter_b)), + ) + } +} + +impl<'br, BL, BR> PartialOrd> for SubtreePath<'_, BL> +where + BL: AsRef<[u8]>, + BR: AsRef<[u8]>, +{ + fn partial_cmp(&self, other: &SubtreePathBuilder<'br, BR>) -> Option { + self.partial_cmp(&SubtreePath::from(other)) + } +} + +impl Ord for SubtreePath<'_, BL> +where + BL: AsRef<[u8]>, +{ + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.partial_cmp(other).expect("order is totally defined") + } +} + +impl Ord for SubtreePathBuilder<'_, BL> +where + BL: AsRef<[u8]>, +{ + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.partial_cmp(other).expect("order is totally defined") + } +} + +impl> Eq for SubtreePath<'_, B> {} impl<'b, B> From> for SubtreePath<'b, B> { fn from(ref_variant: SubtreePathInner<'b, B>) -> Self { @@ -156,7 +211,7 @@ impl<'s, 'b, B> From<&'s SubtreePathBuilder<'b, B>> for SubtreePath<'s, B> { /// Hash order is the same as iteration order: from most deep path segment up to /// root. -impl<'b, B: AsRef<[u8]>> Hash for SubtreePath<'b, B> { +impl> Hash for SubtreePath<'_, B> { fn hash(&self, state: &mut H) { match &self.ref_variant { SubtreePathInner::Slice(slice) => slice @@ -222,7 +277,7 @@ impl<'b, B: AsRef<[u8]>> SubtreePath<'b, B> { } /// Get a derived path with a child path segment added. - pub fn derive_owned_with_child<'s, S>(&'b self, segment: S) -> SubtreePathBuilder<'b, B> + pub fn derive_owned_with_child<'s, S>(&self, segment: S) -> SubtreePathBuilder<'b, B> where S: Into>, 's: 'b, @@ -322,4 +377,29 @@ mod tests { assert_eq!(as_vec, reference_vec); assert_eq!(parent.len(), reference_vec.len()); } + + #[test] + fn ordering() { + let path_a: SubtreePath<_> = (&[b"one" as &[u8], b"two", b"three"]).into(); + let path_b = path_a.derive_owned_with_child(b"four"); + let path_c = path_a.derive_owned_with_child(b"notfour"); + let (path_d_parent, _) = path_a.derive_parent().unwrap(); + let path_d = path_d_parent.derive_owned_with_child(b"three"); + + // Same lengths for different paths don't make them equal: + assert!(!matches!( + SubtreePath::from(&path_b).cmp(&SubtreePath::from(&path_c)), + cmp::Ordering::Equal + )); + + // Equal paths made the same way are equal: + assert!(matches!( + path_a.cmp(&SubtreePath::from(&path_d)), + cmp::Ordering::Equal + )); + + // Longer paths come first + assert!(path_a > path_b); + assert!(path_a > path_c); + } } diff --git a/path/src/subtree_path_builder.rs b/path/src/subtree_path_builder.rs index 4ef25f0a1..d834a5f32 100644 --- a/path/src/subtree_path_builder.rs +++ b/path/src/subtree_path_builder.rs @@ -46,16 +46,25 @@ pub struct SubtreePathBuilder<'b, B> { pub(crate) relative: SubtreePathRelative<'b>, } +impl Clone for SubtreePathBuilder<'_, B> { + fn clone(&self) -> Self { + SubtreePathBuilder { + base: self.base.clone(), + relative: self.relative.clone(), + } + } +} + /// Hash order is the same as iteration order: from most deep path segment up to /// root. -impl<'b, B: AsRef<[u8]>> Hash for SubtreePathBuilder<'b, B> { +impl> Hash for SubtreePathBuilder<'_, B> { fn hash(&self, state: &mut H) { self.relative.hash(state); self.base.hash(state); } } -impl<'bl, 'br, BL, BR> PartialEq> for SubtreePathBuilder<'bl, BL> +impl<'br, BL, BR> PartialEq> for SubtreePathBuilder<'_, BL> where BL: AsRef<[u8]>, BR: AsRef<[u8]>, @@ -65,7 +74,7 @@ where } } -impl<'bl, 'br, BL, BR> PartialEq> for SubtreePath<'bl, BL> +impl<'br, BL, BR> PartialEq> for SubtreePath<'_, BL> where BL: AsRef<[u8]>, BR: AsRef<[u8]>, @@ -75,7 +84,7 @@ where } } -impl<'bl, 'br, BL, BR> PartialEq> for SubtreePathBuilder<'bl, BL> +impl<'br, BL, BR> PartialEq> for SubtreePathBuilder<'_, BL> where BL: AsRef<[u8]>, BR: AsRef<[u8]>, @@ -85,7 +94,7 @@ where } } -impl<'b, B: AsRef<[u8]>> Eq for SubtreePathBuilder<'b, B> {} +impl> Eq for SubtreePathBuilder<'_, B> {} impl<'s, 'b, B> From<&'s SubtreePath<'b, B>> for SubtreePathBuilder<'b, B> { fn from(value: &'s SubtreePath<'b, B>) -> Self { @@ -97,7 +106,7 @@ impl<'s, 'b, B> From<&'s SubtreePath<'b, B>> for SubtreePathBuilder<'b, B> { } /// Derived subtree path on top of base path. -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) enum SubtreePathRelative<'r> { /// Equivalent to the base path. Empty, @@ -149,6 +158,28 @@ impl Default for SubtreePathBuilder<'static, [u8; 0]> { } } +impl SubtreePathBuilder<'_, B> { + /// Makes an owned `SubtreePathBuilder` out of iterator. + pub fn owned_from_iter>(iter: impl IntoIterator) -> Self { + let bytes = iter.into_iter().fold(CompactBytes::new(), |mut bytes, s| { + bytes.add_segment(s.as_ref()); + bytes + }); + + SubtreePathBuilder { + base: SubtreePath { + ref_variant: SubtreePathInner::Slice(&[]), + }, + relative: SubtreePathRelative::Multi(bytes), + } + } + + /// Create an owned version of `SubtreePathBuilder` from `SubtreePath`. + pub fn owned_from_path>(path: SubtreePath) -> Self { + Self::owned_from_iter(path.to_vec()) + } +} + impl SubtreePathBuilder<'_, B> { /// Returns the length of the subtree path. pub fn len(&self) -> usize { @@ -159,6 +190,24 @@ impl SubtreePathBuilder<'_, B> { pub fn is_empty(&self) -> bool { self.base.is_empty() && self.relative.is_empty() } + + /// Adds path segment in place. + pub fn push_segment(&mut self, segment: &[u8]) { + match &mut self.relative { + SubtreePathRelative::Empty => { + let mut bytes = CompactBytes::new(); + bytes.add_segment(segment); + self.relative = SubtreePathRelative::Multi(bytes); + } + SubtreePathRelative::Single(old_segment) => { + let mut bytes = CompactBytes::new(); + bytes.add_segment(old_segment); + bytes.add_segment(segment); + self.relative = SubtreePathRelative::Multi(bytes); + } + SubtreePathRelative::Multi(bytes) => bytes.add_segment(segment), + } + } } impl<'b, B: AsRef<[u8]>> SubtreePathBuilder<'b, B> { @@ -191,6 +240,38 @@ impl<'b, B: AsRef<[u8]>> SubtreePathBuilder<'b, B> { } } + /// Get a derived path for a parent and a chopped segment. The lifetime of + /// returned path is constrained solely by the original slice that this + /// whole path hierarchy is based upon, and the point of derivation has + /// no effect on it. + pub fn derive_parent_owned(&self) -> Option<(SubtreePathBuilder<'b, B>, Vec)> { + match &self.relative { + SubtreePathRelative::Empty => self + .base + .derive_parent() + .map(|(path, key)| (path.derive_owned(), key.to_vec())), + SubtreePathRelative::Single(relative) => { + Some((self.base.derive_owned(), relative.to_vec())) + } + SubtreePathRelative::Multi(bytes) => { + let mut new_bytes = bytes.clone(); + if let Some(key) = new_bytes.pop_segment() { + Some(( + SubtreePathBuilder { + base: self.base.clone(), + relative: SubtreePathRelative::Multi(new_bytes), + }, + key, + )) + } else { + self.base + .derive_parent() + .map(|(path, key)| (path.derive_owned(), key.to_vec())) + } + } + } + } + /// Get a derived path with a child path segment added. pub fn derive_owned_with_child<'s, S>(&'b self, segment: S) -> SubtreePathBuilder<'b, B> where @@ -203,24 +284,6 @@ impl<'b, B: AsRef<[u8]>> SubtreePathBuilder<'b, B> { } } - /// Adds path segment in place. - pub fn push_segment(&mut self, segment: &[u8]) { - match &mut self.relative { - SubtreePathRelative::Empty => { - let mut bytes = CompactBytes::new(); - bytes.add_segment(segment); - self.relative = SubtreePathRelative::Multi(bytes); - } - SubtreePathRelative::Single(old_segment) => { - let mut bytes = CompactBytes::new(); - bytes.add_segment(old_segment); - bytes.add_segment(segment); - self.relative = SubtreePathRelative::Multi(bytes); - } - SubtreePathRelative::Multi(bytes) => bytes.add_segment(segment), - } - } - /// Returns an iterator for the subtree path by path segments. pub fn reverse_iter(&'b self) -> SubtreePathIter<'b, B> { match &self.relative { diff --git a/path/src/util/compact_bytes.rs b/path/src/util/compact_bytes.rs index c44b6dd94..e20932404 100644 --- a/path/src/util/compact_bytes.rs +++ b/path/src/util/compact_bytes.rs @@ -31,7 +31,7 @@ use std::mem; /// Bytes vector wrapper to have multiple byte arrays allocated continuosuly. -#[derive(Debug, Default)] +#[derive(Debug, Default, Clone)] pub(crate) struct CompactBytes { n_segments: usize, data: Vec, @@ -64,6 +64,29 @@ impl CompactBytes { pub fn len(&self) -> usize { self.n_segments } + + pub fn pop_segment(&mut self) -> Option> { + if self.n_segments < 1 { + return None; + } + + let length_size = mem::size_of::(); + let last_segment_length = usize::from_ne_bytes( + self.data[self.data.len() - length_size..] + .try_into() + .expect("internal structure bug"), + ); + + let segment = self.data + [self.data.len() - last_segment_length - length_size..self.data.len() - length_size] + .to_vec(); + + self.data + .truncate(self.data.len() - last_segment_length - length_size); + self.n_segments -= 1; + + Some(segment) + } } #[derive(Debug, Clone, Copy)] @@ -160,4 +183,25 @@ mod tests { assert_eq!(iter.next(), None); assert_eq!(iter.next(), None); } + + #[test] + fn pop_segment() { + let mut bytes = CompactBytes::default(); + bytes.add_segment(b"ayya"); + bytes.add_segment(b"ayyb"); + bytes.add_segment(b"ayyc"); + bytes.add_segment(b"ayyd"); + + assert_eq!(bytes.pop_segment(), Some(b"ayyd".to_vec())); + assert_eq!(bytes.pop_segment(), Some(b"ayyc".to_vec())); + + let mut v: Vec<_> = bytes.reverse_iter().collect(); + v.reverse(); + assert_eq!(v, vec![b"ayya".to_vec(), b"ayyb".to_vec()]); + + assert_eq!(bytes.pop_segment(), Some(b"ayyb".to_vec())); + assert_eq!(bytes.pop_segment(), Some(b"ayya".to_vec())); + assert_eq!(bytes.pop_segment(), None); + assert_eq!(bytes.pop_segment(), None); + } } diff --git a/path/src/util/cow_like.rs b/path/src/util/cow_like.rs index 78608ec89..02a535372 100644 --- a/path/src/util/cow_like.rs +++ b/path/src/util/cow_like.rs @@ -35,7 +35,7 @@ use std::{ /// A smart pointer that follows the semantics of [Cow](std::borrow::Cow) except /// provides no means for mutability and thus doesn't require [Clone]. -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum CowLike<'b> { Owned(Vec), Borrowed(&'b [u8]), diff --git a/storage/src/rocksdb_storage.rs b/storage/src/rocksdb_storage.rs index 14c4df5ac..2905adce0 100644 --- a/storage/src/rocksdb_storage.rs +++ b/storage/src/rocksdb_storage.rs @@ -36,7 +36,7 @@ mod tests; pub use rocksdb::{Error, WriteBatchWithTransaction}; pub use storage_context::{ PrefixedRocksDbBatch, PrefixedRocksDbImmediateStorageContext, PrefixedRocksDbRawIterator, - PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext, + PrefixedRocksDbTransactionContext, }; pub use self::storage::RocksDbStorage; diff --git a/storage/src/rocksdb_storage/storage.rs b/storage/src/rocksdb_storage/storage.rs index f43c05e2e..9a3d66ece 100644 --- a/storage/src/rocksdb_storage/storage.rs +++ b/storage/src/rocksdb_storage/storage.rs @@ -44,10 +44,7 @@ use rocksdb::{ Transaction, WriteBatchWithTransaction, DEFAULT_COLUMN_FAMILY_NAME, }; -use super::{ - PrefixedRocksDbImmediateStorageContext, PrefixedRocksDbStorageContext, - PrefixedRocksDbTransactionContext, -}; +use super::{PrefixedRocksDbImmediateStorageContext, PrefixedRocksDbTransactionContext}; use crate::{ error, error::Error::{CostError, RocksDBError}, @@ -190,7 +187,7 @@ impl RocksDbStorage { db_batch.put(&key, &value); cost.seek_count += 1; cost_return_on_error_no_add!( - &cost, + cost, pending_costs .add_key_value_storage_costs( key.len() as u32, @@ -209,7 +206,7 @@ impl RocksDbStorage { db_batch.put_cf(cf_aux(&self.db), &key, &value); cost.seek_count += 1; cost_return_on_error_no_add!( - &cost, + cost, pending_costs .add_key_value_storage_costs( key.len() as u32, @@ -230,7 +227,7 @@ impl RocksDbStorage { // We only add costs for put root if they are set, otherwise it is free if cost_info.is_some() { cost_return_on_error_no_add!( - &cost, + cost, pending_costs .add_key_value_storage_costs( key.len() as u32, @@ -250,7 +247,7 @@ impl RocksDbStorage { db_batch.put_cf(cf_meta(&self.db), &key, &value); cost.seek_count += 1; cost_return_on_error_no_add!( - &cost, + cost, pending_costs .add_key_value_storage_costs( key.len() as u32, @@ -274,7 +271,7 @@ impl RocksDbStorage { cost.seek_count += 2; // lets get the values let value_len = cost_return_on_error_no_add!( - &cost, + cost, self.db.get(&key).map_err(RocksDBError) ) .map(|x| x.len() as u32) @@ -301,7 +298,7 @@ impl RocksDbStorage { } else { cost.seek_count += 2; let value_len = cost_return_on_error_no_add!( - &cost, + cost, self.db.get_cf(cf_aux(&self.db), &key).map_err(RocksDBError) ) .map(|x| x.len() as u32) @@ -329,7 +326,7 @@ impl RocksDbStorage { } else { cost.seek_count += 2; let value_len = cost_return_on_error_no_add!( - &cost, + cost, self.db .get_cf(cf_roots(&self.db), &key) .map_err(RocksDBError) @@ -359,7 +356,7 @@ impl RocksDbStorage { } else { cost.seek_count += 2; let value_len = cost_return_on_error_no_add!( - &cost, + cost, self.db .get_cf(cf_meta(&self.db), &key) .map_err(RocksDBError) @@ -434,7 +431,6 @@ impl RocksDbStorage { } impl<'db> Storage<'db> for RocksDbStorage { - type BatchStorageContext = PrefixedRocksDbStorageContext<'db>; type BatchTransactionalStorageContext = PrefixedRocksDbTransactionContext<'db>; type ImmediateStorageContext = PrefixedRocksDbImmediateStorageContext<'db>; type Transaction = Tx<'db>; @@ -459,27 +455,6 @@ impl<'db> Storage<'db> for RocksDbStorage { self.db.flush().map_err(RocksDBError) } - fn get_storage_context<'b, B>( - &'db self, - path: SubtreePath<'b, B>, - batch: Option<&'db StorageBatch>, - ) -> CostContext - where - B: AsRef<[u8]> + 'b, - { - Self::build_prefix(path) - .map(|prefix| PrefixedRocksDbStorageContext::new(&self.db, prefix, batch)) - } - - fn get_storage_context_by_subtree_prefix( - &'db self, - prefix: SubtreePrefix, - batch: Option<&'db StorageBatch>, - ) -> CostContext { - PrefixedRocksDbStorageContext::new(&self.db, prefix, batch) - .wrap_with_cost(OperationCost::default()) - } - fn get_transactional_storage_context<'b, B>( &'db self, path: SubtreePath<'b, B>, @@ -621,11 +596,13 @@ mod tests { }; let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let left = storage - .get_storage_context(left_path.clone(), Some(&batch)) + .get_transactional_storage_context(left_path.clone(), Some(&batch), &transaction) .unwrap(); let right = storage - .get_storage_context(right_path.clone(), Some(&batch)) + .get_transactional_storage_context(right_path.clone(), Some(&batch), &transaction) .unwrap(); left.put(b"a", b"a", None, None).unwrap().unwrap(); @@ -643,10 +620,10 @@ mod tests { let batch = StorageBatch::new(); let left = storage - .get_storage_context(left_path.clone(), Some(&batch)) + .get_transactional_storage_context(left_path.clone(), Some(&batch), &transaction) .unwrap(); let right = storage - .get_storage_context(right_path.clone(), Some(&batch)) + .get_transactional_storage_context(right_path.clone(), Some(&batch), &transaction) .unwrap(); // Iterate over left subtree while right subtree contains 1 byte keys: @@ -687,7 +664,9 @@ mod tests { .unwrap() .expect("cannot commit batch"); - let left = storage.get_storage_context(left_path, None).unwrap(); + let left = storage + .get_transactional_storage_context(left_path, None, &transaction) + .unwrap(); // Iterate over left subtree once again let mut iteration_cost_after = OperationCost::default(); let mut iter = left.raw_iter(); diff --git a/storage/src/rocksdb_storage/storage_context.rs b/storage/src/rocksdb_storage/storage_context.rs index 0611d51c1..758ba16fb 100644 --- a/storage/src/rocksdb_storage/storage_context.rs +++ b/storage/src/rocksdb_storage/storage_context.rs @@ -30,13 +30,11 @@ mod batch; pub mod context_immediate; -mod context_no_tx; mod context_tx; mod raw_iterator; pub use batch::PrefixedRocksDbBatch; pub use context_immediate::PrefixedRocksDbImmediateStorageContext; -pub use context_no_tx::PrefixedRocksDbStorageContext; pub use context_tx::PrefixedRocksDbTransactionContext; pub use raw_iterator::PrefixedRocksDbRawIterator; diff --git a/storage/src/rocksdb_storage/storage_context/context_no_tx.rs b/storage/src/rocksdb_storage/storage_context/context_no_tx.rs deleted file mode 100644 index 80ad01499..000000000 --- a/storage/src/rocksdb_storage/storage_context/context_no_tx.rs +++ /dev/null @@ -1,286 +0,0 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Storage context batch implementation without a transaction - -use error::Error; -use grovedb_costs::{ - storage_cost::key_value_cost::KeyValueStorageCost, ChildrenSizesWithIsSumTree, CostResult, - CostsExt, OperationCost, -}; -use rocksdb::{ColumnFamily, DBRawIteratorWithThreadMode}; - -use super::{batch::PrefixedMultiContextBatchPart, make_prefixed_key, PrefixedRocksDbRawIterator}; -use crate::{ - error, - error::Error::RocksDBError, - rocksdb_storage::storage::{Db, SubtreePrefix, AUX_CF_NAME, META_CF_NAME, ROOTS_CF_NAME}, - StorageBatch, StorageContext, -}; - -/// Storage context with a prefix applied to be used in a subtree to be used -/// outside of transaction. -pub struct PrefixedRocksDbStorageContext<'db> { - storage: &'db Db, - prefix: SubtreePrefix, - batch: Option<&'db StorageBatch>, -} - -impl<'db> PrefixedRocksDbStorageContext<'db> { - /// Create a new prefixed storage_cost context instance - pub fn new(storage: &'db Db, prefix: SubtreePrefix, batch: Option<&'db StorageBatch>) -> Self { - PrefixedRocksDbStorageContext { - storage, - prefix, - batch, - } - } -} - -impl<'db> PrefixedRocksDbStorageContext<'db> { - /// Get auxiliary data column family - fn cf_aux(&self) -> &'db ColumnFamily { - self.storage - .cf_handle(AUX_CF_NAME) - .expect("aux column family must exist") - } - - /// Get trees roots data column family - fn cf_roots(&self) -> &'db ColumnFamily { - self.storage - .cf_handle(ROOTS_CF_NAME) - .expect("roots column family must exist") - } - - /// Get metadata column family - fn cf_meta(&self) -> &'db ColumnFamily { - self.storage - .cf_handle(META_CF_NAME) - .expect("meta column family must exist") - } -} - -impl<'db> StorageContext<'db> for PrefixedRocksDbStorageContext<'db> { - type Batch = PrefixedMultiContextBatchPart; - type RawIterator = PrefixedRocksDbRawIterator>; - - fn put>( - &self, - key: K, - value: &[u8], - children_sizes: ChildrenSizesWithIsSumTree, - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.put( - make_prefixed_key(&self.prefix, key), - value.to_vec(), - children_sizes, - cost_info, - ); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn put_aux>( - &self, - key: K, - value: &[u8], - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.put_aux( - make_prefixed_key(&self.prefix, key), - value.to_vec(), - cost_info, - ); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn put_root>( - &self, - key: K, - value: &[u8], - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.put_root( - make_prefixed_key(&self.prefix, key), - value.to_vec(), - cost_info, - ); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn put_meta>( - &self, - key: K, - value: &[u8], - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.put_meta( - make_prefixed_key(&self.prefix, key), - value.to_vec(), - cost_info, - ); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn delete>( - &self, - key: K, - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.delete(make_prefixed_key(&self.prefix, key), cost_info); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn delete_aux>( - &self, - key: K, - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.delete_aux(make_prefixed_key(&self.prefix, key), cost_info); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn delete_root>( - &self, - key: K, - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.delete_root(make_prefixed_key(&self.prefix, key), cost_info); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn delete_meta>( - &self, - key: K, - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.delete_meta(make_prefixed_key(&self.prefix, key), cost_info); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn get>(&self, key: K) -> CostResult>, Error> { - self.storage - .get(make_prefixed_key(&self.prefix, key)) - .map_err(RocksDBError) - .wrap_fn_cost(|value| OperationCost { - seek_count: 1, - storage_loaded_bytes: value - .as_ref() - .ok() - .and_then(Option::as_ref) - .map(|x| x.len() as u64) - .unwrap_or(0), - ..Default::default() - }) - } - - fn get_aux>(&self, key: K) -> CostResult>, Error> { - self.storage - .get_cf(self.cf_aux(), make_prefixed_key(&self.prefix, key)) - .map_err(RocksDBError) - .wrap_fn_cost(|value| OperationCost { - seek_count: 1, - storage_loaded_bytes: value - .as_ref() - .ok() - .and_then(Option::as_ref) - .map(|x| x.len() as u64) - .unwrap_or(0), - ..Default::default() - }) - } - - fn get_root>(&self, key: K) -> CostResult>, Error> { - self.storage - .get_cf(self.cf_roots(), make_prefixed_key(&self.prefix, key)) - .map_err(RocksDBError) - .wrap_fn_cost(|value| OperationCost { - seek_count: 1, - storage_loaded_bytes: value - .as_ref() - .ok() - .and_then(Option::as_ref) - .map(|x| x.len() as u64) - .unwrap_or(0), - ..Default::default() - }) - } - - fn get_meta>(&self, key: K) -> CostResult>, Error> { - self.storage - .get_cf(self.cf_meta(), make_prefixed_key(&self.prefix, key)) - .map_err(RocksDBError) - .wrap_fn_cost(|value| OperationCost { - seek_count: 1, - storage_loaded_bytes: value - .as_ref() - .ok() - .and_then(Option::as_ref) - .map(|x| x.len() as u64) - .unwrap_or(0), - ..Default::default() - }) - } - - fn new_batch(&self) -> Self::Batch { - PrefixedMultiContextBatchPart { - prefix: self.prefix, - batch: StorageBatch::new(), - } - } - - fn commit_batch(&self, batch: Self::Batch) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.merge(batch.batch); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn raw_iter(&self) -> Self::RawIterator { - PrefixedRocksDbRawIterator { - prefix: self.prefix, - raw_iterator: self.storage.raw_iterator(), - } - } -} diff --git a/storage/src/rocksdb_storage/tests.rs b/storage/src/rocksdb_storage/tests.rs index c75568cd5..46b830d83 100644 --- a/storage/src/rocksdb_storage/tests.rs +++ b/storage/src/rocksdb_storage/tests.rs @@ -555,11 +555,21 @@ mod batch_no_transaction { fn test_various_cf_methods() { let storage = TempStorage::new(); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let context_ayya = storage - .get_storage_context([b"ayya"].as_ref().into(), Some(&batch)) + .get_transactional_storage_context( + [b"ayya"].as_ref().into(), + Some(&batch), + &transaction, + ) .unwrap(); let context_ayyb = storage - .get_storage_context([b"ayyb"].as_ref().into(), Some(&batch)) + .get_transactional_storage_context( + [b"ayyb"].as_ref().into(), + Some(&batch), + &transaction, + ) .unwrap(); context_ayya @@ -606,15 +616,15 @@ mod batch_no_transaction { assert_eq!(batch.len(), 8); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .expect("cannot commit batch"); let context_ayya = storage - .get_storage_context([b"ayya"].as_ref().into(), None) + .get_transactional_storage_context([b"ayya"].as_ref().into(), None, &transaction) .unwrap(); let context_ayyb = storage - .get_storage_context([b"ayyb"].as_ref().into(), None) + .get_transactional_storage_context([b"ayyb"].as_ref().into(), None, &transaction) .unwrap(); assert_eq!( @@ -696,11 +706,21 @@ mod batch_no_transaction { fn test_with_db_batches() { let storage = TempStorage::new(); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let context_ayya = storage - .get_storage_context([b"ayya"].as_ref().into(), Some(&batch)) + .get_transactional_storage_context( + [b"ayya"].as_ref().into(), + Some(&batch), + &transaction, + ) .unwrap(); let context_ayyb = storage - .get_storage_context([b"ayyb"].as_ref().into(), Some(&batch)) + .get_transactional_storage_context( + [b"ayyb"].as_ref().into(), + Some(&batch), + &transaction, + ) .unwrap(); context_ayya @@ -756,12 +776,12 @@ mod batch_no_transaction { .is_none()); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .expect("cannot commit multi context batch"); let context_ayya = storage - .get_storage_context([b"ayya"].as_ref().into(), None) + .get_transactional_storage_context([b"ayya"].as_ref().into(), None, &transaction) .unwrap(); assert_eq!( context_ayya @@ -782,15 +802,24 @@ mod batch_transaction { #[test] fn test_transaction_properties() { let storage = TempStorage::new(); + let other_transaction = storage.start_transaction(); let transaction = storage.start_transaction(); let batch = StorageBatch::new(); let batch_tx = StorageBatch::new(); let context_ayya = storage - .get_storage_context([b"ayya"].as_ref().into(), Some(&batch)) + .get_transactional_storage_context( + [b"ayya"].as_ref().into(), + Some(&batch), + &other_transaction, + ) .unwrap(); let context_ayyb = storage - .get_storage_context([b"ayyb"].as_ref().into(), Some(&batch)) + .get_transactional_storage_context( + [b"ayyb"].as_ref().into(), + Some(&batch), + &other_transaction, + ) .unwrap(); let context_ayya_tx = storage .get_transactional_storage_context( @@ -945,6 +974,7 @@ mod batch_transaction { .commit_transaction(transaction) .unwrap() .expect("cannot commit transaction"); + assert_eq!( context_ayya .get_aux(b"key2") @@ -1033,11 +1063,12 @@ mod batch_transaction { ); // And still no data in the database until transaction is commited + let other_transaction = storage.start_transaction(); let context_ayya = storage - .get_storage_context([b"ayya"].as_ref().into(), None) + .get_transactional_storage_context([b"ayya"].as_ref().into(), None, &other_transaction) .unwrap(); let context_ayyb = storage - .get_storage_context([b"ayyb"].as_ref().into(), None) + .get_transactional_storage_context([b"ayyb"].as_ref().into(), None, &other_transaction) .unwrap(); let mut iter = context_ayya.raw_iter(); @@ -1053,11 +1084,12 @@ mod batch_transaction { .unwrap() .expect("cannot commit transaction"); + let other_transaction = storage.start_transaction(); let context_ayya = storage - .get_storage_context([b"ayya"].as_ref().into(), None) + .get_transactional_storage_context([b"ayya"].as_ref().into(), None, &other_transaction) .unwrap(); let context_ayyb = storage - .get_storage_context([b"ayyb"].as_ref().into(), None) + .get_transactional_storage_context([b"ayyb"].as_ref().into(), None, &other_transaction) .unwrap(); assert_eq!( diff --git a/storage/src/storage.rs b/storage/src/storage.rs index 196507a6a..640cff76a 100644 --- a/storage/src/storage.rs +++ b/storage/src/storage.rs @@ -51,9 +51,6 @@ pub trait Storage<'db> { /// Storage transaction type type Transaction; - /// Storage context type for mutli-tree batch operations - type BatchStorageContext: StorageContext<'db>; - /// Storage context type for multi-tree batch operations inside transaction type BatchTransactionalStorageContext: StorageContext<'db>; @@ -80,24 +77,6 @@ pub trait Storage<'db> { /// Forces data to be written fn flush(&self) -> Result<(), Error>; - /// Make storage context for a subtree with path, keeping all write - /// operations inside a `batch` if provided. - fn get_storage_context<'b, B>( - &'db self, - path: SubtreePath<'b, B>, - batch: Option<&'db StorageBatch>, - ) -> CostContext - where - B: AsRef<[u8]> + 'b; - - /// Make storage context for a subtree with prefix, keeping all write - /// operations inside a `batch` if provided. - fn get_storage_context_by_subtree_prefix( - &'db self, - prefix: SubtreePrefix, - batch: Option<&'db StorageBatch>, - ) -> CostContext; - /// Make context for a subtree on transactional data, keeping all write /// operations inside a `batch` if provided. fn get_transactional_storage_context<'b, B>( @@ -344,8 +323,8 @@ impl StorageBatch { } } - #[cfg(test)] - pub(crate) fn len(&self) -> usize { + /// Get batch length + pub fn len(&self) -> usize { let operations = self.operations.borrow(); operations.data.len() + operations.roots.len() @@ -353,6 +332,11 @@ impl StorageBatch { + operations.meta.len() } + /// Batch emptiness predicate + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + /// Add deferred `put` operation pub(crate) fn put( &self, From 57e356085fe8b97ca6c8686b81e1417977acc380 Mon Sep 17 00:00:00 2001 From: Evgeny Fomin Date: Fri, 17 Jan 2025 15:57:30 +0100 Subject: [PATCH 2/5] fix --- grovedb/src/lib.rs | 1 + grovedb/src/reference_path.rs | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 31c3dcfab..22c42c66f 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -212,6 +212,7 @@ pub use query::{PathQuery, SizedQuery}; use reference_path::path_from_reference_path_type; #[cfg(feature = "grovedbg")] use tokio::net::ToSocketAddrs; +#[cfg(feature = "minimal")] use util::{compat, TxRef}; #[cfg(feature = "minimal")] diff --git a/grovedb/src/reference_path.rs b/grovedb/src/reference_path.rs index 91d7198db..11d407a6c 100644 --- a/grovedb/src/reference_path.rs +++ b/grovedb/src/reference_path.rs @@ -7,16 +7,17 @@ use std::{collections::HashSet, iter}; use bincode::{Decode, Encode}; use grovedb_costs::{cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt}; use grovedb_merk::CryptoHash; -#[cfg(feature = "minimal")] +#[cfg(any(feature = "minimal", feature = "verify"))] use grovedb_path::{SubtreePath, SubtreePathBuilder}; use grovedb_version::check_grovedb_v0_with_cost; -#[cfg(feature = "full")] +#[cfg(any(feature = "minimal", feature = "visualize"))] use grovedb_visualize::visualize_to_vec; #[cfg(feature = "minimal")] use integer_encoding::VarInt; #[cfg(any(feature = "minimal", feature = "verify"))] use crate::Error; +#[cfg(feature = "minimal")] use crate::{ merk_cache::{MerkCache, MerkHandle}, operations::MAX_REFERENCE_HOPS, @@ -536,6 +537,7 @@ impl ReferencePathType { } } +#[cfg(feature = "minimal")] pub(crate) struct ResolvedReference<'db, 'b, 'c, B> { pub target_merk: MerkHandle<'db, 'c>, pub target_path: SubtreePathBuilder<'b, B>, @@ -544,6 +546,7 @@ pub(crate) struct ResolvedReference<'db, 'b, 'c, B> { pub target_node_value_hash: CryptoHash, } +#[cfg(feature = "minimal")] pub(crate) fn follow_reference<'db, 'b, 'c, B: AsRef<[u8]>>( merk_cache: &'c MerkCache<'db, 'b, B>, path: SubtreePathBuilder<'b, B>, @@ -551,6 +554,7 @@ pub(crate) fn follow_reference<'db, 'b, 'c, B: AsRef<[u8]>>( ref_path: ReferencePathType, ) -> CostResult, Error> { // TODO: this is a new version of follow reference + check_grovedb_v0_with_cost!( "follow_reference", merk_cache @@ -627,6 +631,7 @@ pub(crate) fn follow_reference<'db, 'b, 'c, B: AsRef<[u8]>>( Err(Error::ReferenceLimit).wrap_with_cost(cost) } +#[cfg(feature = "minimal")] /// Follow references stopping at the immediate element without following /// further. pub(crate) fn follow_reference_once<'db, 'b, 'c, B: AsRef<[u8]>>( From 338d7e7781c310d05327194483549b62110f5c4b Mon Sep 17 00:00:00 2001 From: Evgeny Fomin Date: Tue, 21 Jan 2025 12:01:04 +0100 Subject: [PATCH 3/5] revert to pub --- grovedb/src/operations/delete/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index e5a6c590c..c4839ac28 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -464,7 +464,7 @@ impl GroveDb { } /// Delete operation for delete internal - fn delete_operation_for_delete_internal>( + pub fn delete_operation_for_delete_internal>( &self, path: SubtreePath, key: &[u8], From c7e2481ed01d799e2d3cf5cfe6ea6cf55b0c8eb0 Mon Sep 17 00:00:00 2001 From: Evgeny Fomin Date: Tue, 21 Jan 2025 12:17:53 +0100 Subject: [PATCH 4/5] fix --- grovedb/src/operations/delete/delete_up_tree.rs | 2 +- grovedb/src/operations/delete/mod.rs | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/grovedb/src/operations/delete/delete_up_tree.rs b/grovedb/src/operations/delete/delete_up_tree.rs index a3854dc8f..633b7abb8 100644 --- a/grovedb/src/operations/delete/delete_up_tree.rs +++ b/grovedb/src/operations/delete/delete_up_tree.rs @@ -238,7 +238,7 @@ impl GroveDb { &options.to_delete_options(), is_known_to_be_subtree, current_batch_operations, - tx.as_ref(), + Some(tx.as_ref()), grove_version, ) ) { diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index c4839ac28..667a7ad8c 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -471,7 +471,7 @@ impl GroveDb { options: &DeleteOptions, is_known_to_be_subtree: Option, current_batch_operations: &[QualifiedGroveDbOp], - transaction: &Transaction, + transaction: TransactionArg, grove_version: &GroveVersion, ) -> CostResult, Error> { check_grovedb_v0_with_cost!( @@ -483,6 +483,8 @@ impl GroveDb { .delete_operation_for_delete_internal ); + let tx = TxRef::new(&self.db, transaction); + let mut cost = OperationCost::default(); if path.is_root() { @@ -497,7 +499,7 @@ impl GroveDb { &mut cost, self.check_subtree_exists_path_not_found( path.clone(), - transaction, + tx.as_ref(), grove_version ) ); @@ -506,7 +508,7 @@ impl GroveDb { None => { let element = cost_return_on_error!( &mut cost, - self.get_raw(path.clone(), key.as_ref(), Some(transaction), grove_version) + self.get_raw(path.clone(), key.as_ref(), Some(tx.as_ref()), grove_version) ); element.maybe_tree_type() } @@ -536,7 +538,7 @@ impl GroveDb { compat::merk_optional_tx_path_not_empty( &self.db, SubtreePath::from(&subtree_merk_path), - transaction, + tx.as_ref(), None, grove_version, ) From 5d4d504cae432f046dc3a70d83202cd65fe459ca Mon Sep 17 00:00:00 2001 From: Evgeny Fomin Date: Tue, 21 Jan 2025 13:13:48 +0100 Subject: [PATCH 5/5] address clippy warnings --- .../estimated_costs/average_case_costs.rs | 4 +- .../batch/just_in_time_reference_update.rs | 2 +- grovedb/src/batch/mod.rs | 47 ++++++-------- grovedb/src/element/delete.rs | 2 - grovedb/src/element/exists.rs | 2 +- grovedb/src/element/get.rs | 2 +- grovedb/src/element/helpers.rs | 6 +- grovedb/src/element/query.rs | 2 +- grovedb/src/lib.rs | 4 +- grovedb/src/operations/proof/verify.rs | 4 +- grovedb/src/query/mod.rs | 6 +- grovedb/src/util/compat.rs | 8 +-- .../src/estimated_costs/average_case_costs.rs | 4 +- merk/src/merk/mod.rs | 2 +- merk/src/merk/source.rs | 4 +- merk/src/proofs/chunk/chunk.rs | 6 +- merk/src/proofs/encoding.rs | 2 +- merk/src/proofs/query/map.rs | 4 +- merk/src/proofs/query/mod.rs | 2 +- merk/src/test_utils/mod.rs | 2 +- merk/src/tree/iter.rs | 2 +- merk/src/tree/mod.rs | 2 +- merk/src/visualize.rs | 6 +- path/Cargo.toml | 1 - path/src/subtree_path.rs | 61 +++++++++++++------ path/src/subtree_path_iter.rs | 6 +- .../rocksdb_storage/storage_context/batch.rs | 2 +- .../storage_context/raw_iterator.rs | 16 +++-- 28 files changed, 107 insertions(+), 104 deletions(-) diff --git a/grovedb/src/batch/estimated_costs/average_case_costs.rs b/grovedb/src/batch/estimated_costs/average_case_costs.rs index 8e2bbbb26..13fdbd29c 100644 --- a/grovedb/src/batch/estimated_costs/average_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/average_case_costs.rs @@ -212,7 +212,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { .estimated_to_be_empty(); // Then we have to get the tree - if self.cached_merks.get(path).is_none() { + if !self.cached_merks.contains_key(path) { let layer_info = cost_return_on_error_no_add!( cost, self.paths.get(path).ok_or_else(|| { @@ -256,6 +256,8 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { Ok(([0u8; 32], None, AggregateData::NoAggregateData)).wrap_with_cost(cost) } + // Clippy's suggestion doesn't respect ownership in this case + #[allow(clippy::map_entry)] fn update_base_merk_root_key( &mut self, _root_key: Option>, diff --git a/grovedb/src/batch/just_in_time_reference_update.rs b/grovedb/src/batch/just_in_time_reference_update.rs index 21a06aab8..53f6a835f 100644 --- a/grovedb/src/batch/just_in_time_reference_update.rs +++ b/grovedb/src/batch/just_in_time_reference_update.rs @@ -60,7 +60,7 @@ where let val_hash = value_hash(&new_serialized_bytes).unwrap_add_cost(&mut cost); Ok(val_hash).wrap_with_cost(cost) } else { - let val_hash = value_hash(&serialized).unwrap_add_cost(&mut cost); + let val_hash = value_hash(serialized).unwrap_add_cost(&mut cost); Ok(val_hash).wrap_with_cost(cost) } } else { diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index 33d9bbef3..d101a3ee3 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -706,11 +706,11 @@ where /// change in the same batch. It distinguishes between two cases: /// /// 1. When the hop count is exactly 1, it tries to directly extract the - /// value hash from the reference element. + /// value hash from the reference element. /// /// 2. When the hop count is greater than 1, it retrieves the referenced - /// element and then determines the next step based on the type of the - /// element. + /// element and then determines the next step based on the type of the + /// element. /// /// # Arguments /// @@ -722,12 +722,11 @@ where /// # Returns /// /// * `Ok(CryptoHash)`: Returns the crypto hash of the referenced element - /// wrapped in the - /// associated cost, if successful. + /// wrapped in the associated cost, if successful. /// /// * `Err(Error)`: Returns an error if there is an issue with the - /// operation, such as - /// missing reference, corrupted data, or invalid batch operation. + /// operation, such as missing reference, corrupted data, or invalid batch + /// operation. /// /// # Errors /// @@ -868,8 +867,8 @@ where /// the Merk tree. /// * `Error::CorruptedData` - If the referenced element cannot be /// deserialized due to corrupted data. - fn get_and_deserialize_referenced_element<'a>( - &'a mut self, + fn get_and_deserialize_referenced_element( + &mut self, key: &[u8], reference_path: &[Vec], grove_version: &GroveVersion, @@ -1527,7 +1526,7 @@ where cost_return_on_error!( &mut cost, GroveDb::update_tree_item_preserve_flag_into_batch_operations( - &merk, + merk, key_info.get_key(), root_key, hash, @@ -1711,12 +1710,9 @@ where ) .map_err(|e| Error::CorruptedData(e.to_string())) ); - let r = merk - .root_hash_key_and_aggregate_data() + merk.root_hash_key_and_aggregate_data() .add_cost(cost) - .map_err(Error::MerkError); - - r + .map_err(Error::MerkError) } fn get_batch_run_mode(&self) -> BatchRunMode { @@ -1824,14 +1820,11 @@ impl GroveDb { { match ops_on_path.entry(key.clone()) { Entry::Vacant(vacant_entry) => { - vacant_entry.insert( - GroveOp::ReplaceTreeRootKey { - hash: root_hash, - root_key: calculated_root_key, - aggregate_data, - } - .into(), - ); + vacant_entry.insert(GroveOp::ReplaceTreeRootKey { + hash: root_hash, + root_key: calculated_root_key, + aggregate_data, + }); } Entry::Occupied(occupied_entry) => { let mutable_occupied_entry = occupied_entry.into_mut(); @@ -1864,7 +1857,6 @@ impl GroveDb { aggregate_data: AggregateData::NoAggregateData, } - .into(); } else if let Element::SumTree(.., flags) = element { @@ -1875,7 +1867,6 @@ impl GroveDb { flags: flags.clone(), aggregate_data, } - .into(); } else if let Element::BigSumTree(.., flags) = element { @@ -1886,7 +1877,6 @@ impl GroveDb { flags: flags.clone(), aggregate_data, } - .into(); } else if let Element::CountTree(.., flags) = element { @@ -1897,7 +1887,6 @@ impl GroveDb { flags: flags.clone(), aggregate_data, } - .into(); } else if let Element::CountSumTree(.., flags) = element { @@ -1908,7 +1897,6 @@ impl GroveDb { flags: flags.clone(), aggregate_data, } - .into(); } else { return Err(Error::InvalidBatchOperation( "insertion of element under a non tree", @@ -1956,8 +1944,7 @@ impl GroveDb { hash: root_hash, root_key: calculated_root_key, aggregate_data, - } - .into(), + }, ); let mut ops_on_level: BTreeMap< KeyInfoPath, diff --git a/grovedb/src/element/delete.rs b/grovedb/src/element/delete.rs index 9243eb398..4440d426b 100644 --- a/grovedb/src/element/delete.rs +++ b/grovedb/src/element/delete.rs @@ -1,8 +1,6 @@ //! Delete //! Implements functions in Element for deleting -#[cfg(feature = "minimal")] -use grovedb_costs::OperationCost; #[cfg(feature = "minimal")] use grovedb_costs::{storage_cost::removal::StorageRemovedBytes, CostResult, CostsExt}; #[cfg(feature = "minimal")] diff --git a/grovedb/src/element/exists.rs b/grovedb/src/element/exists.rs index b57d5c5c7..6380a7b48 100644 --- a/grovedb/src/element/exists.rs +++ b/grovedb/src/element/exists.rs @@ -1,7 +1,7 @@ //! Exists //! Implements in Element functions for checking if stuff exists -use grovedb_costs::{CostResult, CostsExt, OperationCost}; +use grovedb_costs::CostResult; use grovedb_merk::Merk; use grovedb_storage::StorageContext; use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; diff --git a/grovedb/src/element/get.rs b/grovedb/src/element/get.rs index 010e3738b..c5c893c51 100644 --- a/grovedb/src/element/get.rs +++ b/grovedb/src/element/get.rs @@ -41,7 +41,7 @@ impl Element { let value = result?; value.ok_or_else(|| { let key_single_byte = if key.as_ref().len() == 1 { - format!("({} in decimal) ", key.as_ref().get(0).unwrap()) + format!("({} in decimal) ", key.as_ref().first().unwrap()) } else { String::new() }; diff --git a/grovedb/src/element/helpers.rs b/grovedb/src/element/helpers.rs index 2c062935f..54e996046 100644 --- a/grovedb/src/element/helpers.rs +++ b/grovedb/src/element/helpers.rs @@ -390,7 +390,7 @@ impl Element { #[cfg(feature = "minimal")] /// Get tree costs for a key value pub fn specialized_costs_for_key_value( - key: &Vec, + key: &[u8], value: &[u8], node_type: NodeType, grove_version: &GroveVersion, @@ -497,9 +497,7 @@ impl Element { #[cfg(feature = "minimal")] /// Get the value defined cost for a serialized value pub fn value_defined_cost(&self, grove_version: &GroveVersion) -> Option { - let Some(value_cost) = self.get_specialized_cost(grove_version).ok() else { - return None; - }; + let value_cost = self.get_specialized_cost(grove_version).ok()?; let cost = value_cost + self.get_flags().as_ref().map_or(0, |flags| { diff --git a/grovedb/src/element/query.rs b/grovedb/src/element/query.rs index e3cc80798..1626ad5c4 100644 --- a/grovedb/src/element/query.rs +++ b/grovedb/src/element/query.rs @@ -169,7 +169,7 @@ fn format_subquery_branch(branch: &SubqueryBranch, indent: usize) -> String { } #[cfg(feature = "minimal")] -impl<'db, 'ctx, 'a> fmt::Display for PathQueryPushArgs<'db, 'ctx, 'a> +impl<'db, 'ctx> fmt::Display for PathQueryPushArgs<'db, 'ctx, '_> where 'db: 'ctx, { diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 22c42c66f..85a0189c5 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -300,9 +300,9 @@ impl GroveDb { struct Compat; impl compat::OpenMerkErrorsCompat for Compat { - fn parent_key_not_found<'b, B: AsRef<[u8]>>( + fn parent_key_not_found>( e: Error, - parent_path: SubtreePath<'b, B>, + parent_path: SubtreePath, parent_key: &[u8], ) -> Error { Error::InvalidParentLayerPath(format!( diff --git a/grovedb/src/operations/proof/verify.rs b/grovedb/src/operations/proof/verify.rs index 80cec1cfe..e67b7eb17 100644 --- a/grovedb/src/operations/proof/verify.rs +++ b/grovedb/src/operations/proof/verify.rs @@ -289,7 +289,7 @@ impl GroveDb { if merk_result.result_set.is_empty() { if prove_options.decrease_limit_on_empty_sub_query_result { - limit_left.as_mut().map(|limit| *limit -= 1); + limit_left.iter_mut().for_each(|limit| *limit -= 1); } } else { for proved_key_value in merk_result.result_set { @@ -370,7 +370,7 @@ impl GroveDb { } result.push(path_key_optional_value.try_into_versioned(grove_version)?); - limit_left.as_mut().map(|limit| *limit -= 1); + limit_left.iter_mut().for_each(|limit| *limit -= 1); if limit_left == &Some(0) { break; } diff --git a/grovedb/src/query/mod.rs b/grovedb/src/query/mod.rs index 01bf8439c..41c09245e 100644 --- a/grovedb/src/query/mod.rs +++ b/grovedb/src/query/mod.rs @@ -464,7 +464,7 @@ pub enum HasSubquery<'a> { } #[cfg(any(feature = "minimal", feature = "verify"))] -impl<'a> fmt::Display for HasSubquery<'a> { +impl fmt::Display for HasSubquery<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { HasSubquery::NoSubquery => write!(f, "NoSubquery"), @@ -480,7 +480,7 @@ impl<'a> fmt::Display for HasSubquery<'a> { } } -impl<'a> HasSubquery<'a> { +impl HasSubquery<'_> { /// Checks to see if we have a subquery on a specific key pub fn has_subquery_on_key(&self, key: &[u8]) -> bool { match self { @@ -509,7 +509,7 @@ pub struct SinglePathSubquery<'a> { } #[cfg(any(feature = "minimal", feature = "verify"))] -impl<'a> fmt::Display for SinglePathSubquery<'a> { +impl fmt::Display for SinglePathSubquery<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "InternalCowItemsQuery {{")?; writeln!(f, " items: [")?; diff --git a/grovedb/src/util/compat.rs b/grovedb/src/util/compat.rs index 6134e61cc..894861c26 100644 --- a/grovedb/src/util/compat.rs +++ b/grovedb/src/util/compat.rs @@ -10,9 +10,9 @@ use grovedb_version::version::GroveVersion; use crate::{Element, Error, Transaction}; pub(crate) trait OpenMerkErrorsCompat { - fn parent_key_not_found<'b, B: AsRef<[u8]>>( + fn parent_key_not_found>( e: Error, - parent_path: SubtreePath<'b, B>, + parent_path: SubtreePath, parent_key: &[u8], ) -> Error; @@ -88,9 +88,9 @@ where struct Compat; impl OpenMerkErrorsCompat for Compat { - fn parent_key_not_found<'b, B: AsRef<[u8]>>( + fn parent_key_not_found>( e: Error, - _parent_path: SubtreePath<'b, B>, + _parent_path: SubtreePath, _parent_key: &[u8], ) -> Error { Error::PathParentLayerNotFound(format!( diff --git a/merk/src/estimated_costs/average_case_costs.rs b/merk/src/estimated_costs/average_case_costs.rs index 3b535767a..865d66a6b 100644 --- a/merk/src/estimated_costs/average_case_costs.rs +++ b/merk/src/estimated_costs/average_case_costs.rs @@ -523,7 +523,7 @@ fn add_average_case_merk_propagate_v1( // we can get about 1 rotation, if there are more than 2 levels nodes_updated += 1; } - cost.seek_count += nodes_updated as u32; + cost.seek_count += nodes_updated; cost.hash_node_calls += nodes_updated * 2; @@ -794,7 +794,7 @@ fn add_average_case_merk_propagate_v0( // we can get about 1 rotation, if there are more than 2 levels nodes_updated += 1; } - cost.seek_count += nodes_updated as u32; + cost.seek_count += nodes_updated; cost.hash_node_calls += nodes_updated * 2; diff --git a/merk/src/merk/mod.rs b/merk/src/merk/mod.rs index 39eef077f..dc495ed8f 100644 --- a/merk/src/merk/mod.rs +++ b/merk/src/merk/mod.rs @@ -187,7 +187,7 @@ impl<'a, I: RawIterator> KVIterator<'a, I> { } // Cannot be an Iterator as it should return cost -impl<'a, I: RawIterator> KVIterator<'a, I> { +impl KVIterator<'_, I> { /// Next key-value pub fn next_kv(&mut self) -> CostContext, Vec)>> { let mut cost = OperationCost::default(); diff --git a/merk/src/merk/source.rs b/merk/src/merk/source.rs index 7c7568be8..99a1a707e 100644 --- a/merk/src/merk/source.rs +++ b/merk/src/merk/source.rs @@ -26,7 +26,7 @@ pub struct MerkSource<'s, S> { tree_type: TreeType, } -impl<'s, S> Clone for MerkSource<'s, S> { +impl Clone for MerkSource<'_, S> { fn clone(&self) -> Self { MerkSource { storage: self.storage, @@ -35,7 +35,7 @@ impl<'s, S> Clone for MerkSource<'s, S> { } } -impl<'s, 'db, S> Fetch for MerkSource<'s, S> +impl<'db, S> Fetch for MerkSource<'_, S> where S: StorageContext<'db>, { diff --git a/merk/src/proofs/chunk/chunk.rs b/merk/src/proofs/chunk/chunk.rs index 4960c53f9..f40c761c4 100644 --- a/merk/src/proofs/chunk/chunk.rs +++ b/merk/src/proofs/chunk/chunk.rs @@ -40,7 +40,7 @@ use crate::{ pub const LEFT: bool = true; pub const RIGHT: bool = false; -impl<'a, S> RefWalker<'a, S> +impl RefWalker<'_, S> where S: Fetch + Sized + Clone, { @@ -214,7 +214,7 @@ pub fn verify_height_proof(proof: Vec, expected_root_hash: CryptoHash) -> Re // TODO: add documentation pub fn verify_height_tree(height_proof_tree: &Tree) -> Result { - return Ok(match height_proof_tree.child(LEFT) { + Ok(match height_proof_tree.child(LEFT) { Some(child) => { if !matches!(child.tree.node, Node::KVHash(..)) { // todo deal with old chunk restoring error @@ -225,7 +225,7 @@ pub fn verify_height_tree(height_proof_tree: &Tree) -> Result { verify_height_tree(&child.tree)? + 1 } None => 1, - }); + }) } #[cfg(test)] diff --git a/merk/src/proofs/encoding.rs b/merk/src/proofs/encoding.rs index 5996e388b..745c5d1cc 100644 --- a/merk/src/proofs/encoding.rs +++ b/merk/src/proofs/encoding.rs @@ -414,7 +414,7 @@ impl<'a> Decoder<'a> { } #[cfg(any(feature = "minimal", feature = "verify"))] -impl<'a> Iterator for Decoder<'a> { +impl Iterator for Decoder<'_> { type Item = Result; fn next(&mut self) -> Option { diff --git a/merk/src/proofs/query/map.rs b/merk/src/proofs/query/map.rs index 8c3b56525..d5331548a 100644 --- a/merk/src/proofs/query/map.rs +++ b/merk/src/proofs/query/map.rs @@ -107,7 +107,7 @@ impl Map { /// of keys. If during iteration we encounter a gap in the data (e.g. the /// proof did not include all nodes within the range), the iterator will /// yield an error. - pub fn range<'a, R: RangeBounds<&'a [u8]>>(&'a self, bounds: R) -> Range { + pub fn range<'a, R: RangeBounds<&'a [u8]>>(&'a self, bounds: R) -> Range<'a> { let start_key = bound_to_inner(bounds.start_bound()).map(|x| (*x).into()); let bounds = bounds_to_vec(bounds); @@ -159,7 +159,7 @@ pub struct Range<'a> { } #[cfg(feature = "minimal")] -impl<'a> Range<'a> { +impl Range<'_> { /// Returns an error if the proof does not properly prove the end of the /// range. fn check_end_bound(&self) -> Result<(), Error> { diff --git a/merk/src/proofs/query/mod.rs b/merk/src/proofs/query/mod.rs index bd33d4b09..b6e9c0c61 100644 --- a/merk/src/proofs/query/mod.rs +++ b/merk/src/proofs/query/mod.rs @@ -702,7 +702,7 @@ impl Link { } #[cfg(feature = "minimal")] -impl<'a, S> RefWalker<'a, S> +impl RefWalker<'_, S> where S: Fetch + Sized + Clone, { diff --git a/merk/src/test_utils/mod.rs b/merk/src/test_utils/mod.rs index 0f7f93ae9..8fa8f704d 100644 --- a/merk/src/test_utils/mod.rs +++ b/merk/src/test_utils/mod.rs @@ -341,7 +341,7 @@ where { Merk::open_base( storage - .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) + .get_transactional_storage_context(SubtreePath::empty(), None, transaction) .unwrap(), TreeType::NormalTree, None:: Option>, diff --git a/merk/src/tree/iter.rs b/merk/src/tree/iter.rs index 96c04e29b..0566d044f 100644 --- a/merk/src/tree/iter.rs +++ b/merk/src/tree/iter.rs @@ -65,7 +65,7 @@ impl<'a> TreeNode { } #[cfg(feature = "minimal")] -impl<'a> Iterator for Iter<'a> { +impl Iterator for Iter<'_> { type Item = (Vec, Vec); /// Traverses to and yields the next key/value pair, in key order. diff --git a/merk/src/tree/mod.rs b/merk/src/tree/mod.rs index 4c4161bbd..a1cda4d1f 100644 --- a/merk/src/tree/mod.rs +++ b/merk/src/tree/mod.rs @@ -459,7 +459,7 @@ impl TreeNode { match link.aggregate_data() { AggregateData::NoAggregateData => 0, AggregateData::Sum(s) => s.encode_var_vec().len() as u32, - AggregateData::BigSum(_) => 16 as u32, + AggregateData::BigSum(_) => 16_u32, AggregateData::Count(c) => c.encode_var_vec().len() as u32, AggregateData::CountAndSum(c, s) => { s.encode_var_vec().len() as u32 + c.encode_var_vec().len() as u32 diff --git a/merk/src/visualize.rs b/merk/src/visualize.rs index 0235f92dd..d3fe17e2a 100644 --- a/merk/src/visualize.rs +++ b/merk/src/visualize.rs @@ -65,8 +65,8 @@ impl<'a, F> VisualizableTree<'a, F> { } } -impl<'a, 'db, S: StorageContext<'db>, T: Visualize, F: Fn(&[u8]) -> T + Copy> Visualize - for VisualizeableMerk<'a, S, F> +impl<'db, S: StorageContext<'db>, T: Visualize, F: Fn(&[u8]) -> T + Copy> Visualize + for VisualizeableMerk<'_, S, F> { fn visualize(&self, mut drawer: Drawer) -> Result> { drawer.write(b"Merk root: ")?; @@ -84,7 +84,7 @@ impl<'a, 'db, S: StorageContext<'db>, T: Visualize, F: Fn(&[u8]) -> T + Copy> Vi } } -impl<'a, T: Visualize, F: Fn(&[u8]) -> T + Copy> Visualize for VisualizableTree<'a, F> { +impl T + Copy> Visualize for VisualizableTree<'_, F> { fn visualize(&self, mut drawer: Drawer) -> Result> { drawer.write(b"[key: ")?; drawer = self.tree.inner.kv.key_as_ref().visualize(drawer)?; diff --git a/path/Cargo.toml b/path/Cargo.toml index bd1f1679f..91738bfe5 100644 --- a/path/Cargo.toml +++ b/path/Cargo.toml @@ -10,4 +10,3 @@ repository = "https://github.com/dashpay/grovedb" [dependencies] hex = "0.4.3" -itertools = "0.13.0" diff --git a/path/src/subtree_path.rs b/path/src/subtree_path.rs index 48929a174..179db8f2b 100644 --- a/path/src/subtree_path.rs +++ b/path/src/subtree_path.rs @@ -53,30 +53,51 @@ pub struct SubtreePath<'b, B> { } impl> Display for SubtreePath<'_, B> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let path = self.to_vec(); - - fn fmt_segment(s: impl AsRef<[u8]>) -> String { - let bytes = s.as_ref(); - let hex_str = hex::encode(bytes); - let utf8_str = String::from_utf8(bytes.to_vec()); - let mut result = format!("h:{hex_str}"); - if let Ok(s) = utf8_str { - result.push_str("/s:"); - result.push_str(&s); + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result { + fn bytes_to_hex_or_ascii(bytes: &[u8]) -> String { + // Define the set of allowed characters + const ALLOWED_CHARS: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ + abcdefghijklmnopqrstuvwxyz\ + 0123456789_-/\\[]@"; + + // Check if all characters in hex_value are allowed + if bytes.iter().all(|&c| ALLOWED_CHARS.contains(&c)) { + // Try to convert to UTF-8 + String::from_utf8(bytes.to_vec()) + .unwrap_or_else(|_| format!("0x{}", hex::encode(bytes))) + } else { + // Hex encode and prepend "0x" + format!("0x{}", hex::encode(bytes)) } - result } - f.write_str("[")?; - - for s in itertools::intersperse(path.into_iter().map(fmt_segment), ", ".to_owned()) { - f.write_str(&s)?; + match &self.ref_variant { + SubtreePathInner::Slice(slice) => { + let ascii_path = slice + .iter() + .map(|e| bytes_to_hex_or_ascii(e.as_ref())) + .collect::>() + .join("/"); + write!(f, "{}", ascii_path) + } + SubtreePathInner::SubtreePath(subtree_path) => { + let ascii_path = subtree_path + .to_vec() + .into_iter() + .map(|a| bytes_to_hex_or_ascii(a.as_slice())) + .collect::>() + .join("/"); + write!(f, "{}", ascii_path) + } + SubtreePathInner::SubtreePathIter(iter) => { + let ascii_path = iter + .clone() + .map(bytes_to_hex_or_ascii) + .collect::>() + .join("/"); + write!(f, "{}", ascii_path) + } } - - f.write_str("]")?; - - Ok(()) } } diff --git a/path/src/subtree_path_iter.rs b/path/src/subtree_path_iter.rs index 2ca658660..f5e2aeaa7 100644 --- a/path/src/subtree_path_iter.rs +++ b/path/src/subtree_path_iter.rs @@ -42,7 +42,7 @@ pub struct SubtreePathIter<'b, B> { next_subtree_path: Option<&'b SubtreePath<'b, B>>, } -impl<'b, B> Clone for SubtreePathIter<'b, B> { +impl Clone for SubtreePathIter<'_, B> { fn clone(&self) -> Self { SubtreePathIter { current_iter: self.current_iter.clone(), @@ -147,12 +147,12 @@ impl CurrentSubtreePathIter<'_, B> { } } -impl<'b, B> Clone for CurrentSubtreePathIter<'b, B> { +impl Clone for CurrentSubtreePathIter<'_, B> { fn clone(&self) -> Self { match self { CurrentSubtreePathIter::Single(x) => CurrentSubtreePathIter::Single(x), CurrentSubtreePathIter::Slice(x) => CurrentSubtreePathIter::Slice(x.clone()), - CurrentSubtreePathIter::OwnedBytes(x) => CurrentSubtreePathIter::OwnedBytes(x.clone()), + CurrentSubtreePathIter::OwnedBytes(x) => CurrentSubtreePathIter::OwnedBytes(*x), } } } diff --git a/storage/src/rocksdb_storage/storage_context/batch.rs b/storage/src/rocksdb_storage/storage_context/batch.rs index bcf583725..29ae31cad 100644 --- a/storage/src/rocksdb_storage/storage_context/batch.rs +++ b/storage/src/rocksdb_storage/storage_context/batch.rs @@ -33,7 +33,7 @@ pub struct PrefixedMultiContextBatchPart { } /// Implementation of a batch outside a transaction -impl<'db> Batch for PrefixedRocksDbBatch<'db> { +impl Batch for PrefixedRocksDbBatch<'_> { fn put>( &mut self, key: K, diff --git a/storage/src/rocksdb_storage/storage_context/raw_iterator.rs b/storage/src/rocksdb_storage/storage_context/raw_iterator.rs index 7cc2d1fef..58540edeb 100644 --- a/storage/src/rocksdb_storage/storage_context/raw_iterator.rs +++ b/storage/src/rocksdb_storage/storage_context/raw_iterator.rs @@ -46,7 +46,7 @@ pub struct PrefixedRocksDbRawIterator { pub(super) raw_iterator: I, } -impl<'a> RawIterator for PrefixedRocksDbRawIterator> { +impl RawIterator for PrefixedRocksDbRawIterator> { fn seek_to_first(&mut self) -> CostContext<()> { self.raw_iterator.seek(self.prefix); ().wrap_with_cost(OperationCost::with_seek_count(1)) @@ -90,10 +90,9 @@ impl<'a> RawIterator for PrefixedRocksDbRawIterator RawIterator for PrefixedRocksDbRawIterator