From 15662c8cd2d62e4529bc7f857276b6e83706e1a8 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 18 Jan 2024 11:09:25 -0500 Subject: [PATCH 001/112] Add initial work towards DA compression crate --- Cargo.lock | 19 +++ Cargo.toml | 1 + crates/compression/Cargo.toml | 18 +++ crates/compression/README.md | 34 +++++ crates/compression/src/block.rs | 19 +++ crates/compression/src/lib.rs | 2 + crates/compression/src/registry.rs | 205 +++++++++++++++++++++++++++++ crates/compression/src/types.rs | 0 8 files changed, 298 insertions(+) create mode 100644 crates/compression/Cargo.toml create mode 100644 crates/compression/README.md create mode 100644 crates/compression/src/block.rs create mode 100644 crates/compression/src/lib.rs create mode 100644 crates/compression/src/registry.rs create mode 100644 crates/compression/src/types.rs diff --git a/Cargo.lock b/Cargo.lock index 8c69a2ab472..3e0fd3e6c06 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -747,6 +747,15 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bindgen" version = "0.65.1" @@ -2774,6 +2783,16 @@ dependencies = [ "tokio", ] +[[package]] +name = "fuel-core-compression" +version = "0.22.0" +dependencies = [ + "bincode", + "fuel-core-types", + "postcard", + "serde", +] + [[package]] name = "fuel-core-consensus-module" version = "0.22.0" diff --git a/Cargo.toml b/Cargo.toml index 94aafb99c48..81a70648e62 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ members = [ "bin/keygen", "crates/chain-config", "crates/client", + "crates/compression", "crates/database", "crates/fuel-core", "crates/keygen", diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml new file mode 100644 index 00000000000..b350b5d516e --- /dev/null +++ b/crates/compression/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "fuel-core-compression" +version = { workspace = true } +authors = { workspace = true } +categories = ["cryptography::cryptocurrencies"] +edition = { workspace = true } +homepage = { workspace = true } +keywords = ["blockchain", "cryptocurrencies", "fuel-core", "fuel-client", "fuel-compressor"] +license = { workspace = true } +repository = { workspace = true } +description = "Compression and decompression of Fuel blocks for DA storage." + +[dependencies] +fuel-core-types = { workspace = true, features = ["serde"] } +serde = { version = "1.0", features = ["derive"] } + +postcard = { version = "1.0", features = ["use-std"] } +bincode = "1.3" diff --git a/crates/compression/README.md b/crates/compression/README.md new file mode 100644 index 00000000000..23767f0ea14 --- /dev/null +++ b/crates/compression/README.md @@ -0,0 +1,34 @@ +# Compression and decompression of fuel-types for the DA layer + +## Compressed block header + +Each compressed block begins with a single-byte version field, so that it's possible to change the format later. + +## Temporal registry + +This crate provides offchain registries for different types such as `AssetId`, `ContractId`, scripts, and predicates. Each registry is a key-value store with three-byte key. The registires are essentially compression caches. The three byte key allows cache size of 16 million values before reregistering the older values. + +The registries allow replacing repeated objects with their respective keys, so if an object +is used multiple times in a short interval (couple of months, maybe), then the full value +exists on only a single uncompressed block, + +### Fraud proofs + +Compressed block will start with 32 bytes of merkle root over all compression smts, followed by newly registered values along with their keys. Using an SMT provides flexibility around the algorithm we use to define keys without knowing how exactly values were chosen to be registered. + +Each registry also uses an SMT. Since the keys are three bytes long, the depth of the SMT is capped at 24 levels. + + + - More efficient for fraud proofs instead of needing to provide entire previous blocks with proofs + +## Compression of `UtxoIds` + +Since each `UtxoId` only appears once, there's no point in registering them. Instead, they are replaced with `TxPointer`s (7 bytes worst case), which are still unique. + +### Fraud proofs + +During fraud proofs we need to use the `prev_root` to prove that the referenced block height is part of the chain. + +## Other techniques + +- These techniques should be good enough for now, but there are lots of other interesting ideas for this. diff --git a/crates/compression/src/block.rs b/crates/compression/src/block.rs new file mode 100644 index 00000000000..3ba2dc8e8af --- /dev/null +++ b/crates/compression/src/block.rs @@ -0,0 +1,19 @@ +use serde::{ + Deserialize, + Serialize, +}; + +use crate::registry::Registrations; + +/// Compressed block. +/// The versioning here working depends on the serialization format, +/// but as long as we we have less than 128 variants, postcard will +/// make that a single byte. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[non_exhaustive] +pub enum CompressedBlock { + V0 { + /// Registration section of the compressed block + registrations: Registrations, + }, +} diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs new file mode 100644 index 00000000000..3d2ea355958 --- /dev/null +++ b/crates/compression/src/lib.rs @@ -0,0 +1,2 @@ +mod block; +mod registry; diff --git a/crates/compression/src/registry.rs b/crates/compression/src/registry.rs new file mode 100644 index 00000000000..11f6b1455f8 --- /dev/null +++ b/crates/compression/src/registry.rs @@ -0,0 +1,205 @@ +use core::fmt; + +use fuel_core_types::fuel_types::Bytes32; +use serde::{ + ser::SerializeTuple, + Deserialize, + Serialize, +}; + +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)] +pub struct Key([u8; Self::SIZE]); +impl Key { + pub const SIZE: usize = 3; +} +impl TryFrom for Key { + type Error = &'static str; + fn try_from(value: u32) -> Result { + let v = value.to_be_bytes(); + if v[0] != 0 { + return Err("Key must be less than 2^24"); + } + + let mut bytes = [0u8; 3]; + bytes.copy_from_slice(&v[1..]); + Ok(Self(bytes)) + } +} + +/// New registrations written to a specific table. +/// Default value is an empty write. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub struct WriteTo { + /// The values are inserted starting from this key + pub start_key: Key, + /// Values. inserted using incrementing ids starting from `start_key` + pub values: Vec, +} + +/// Custom serialization is used to omit the start_key when the sequence is empty +impl Serialize for WriteTo +where + T: Serialize, +{ + fn serialize(&self, serializer: S) -> Result { + let mut tup = serializer.serialize_tuple(2)?; + tup.serialize_element(&self.values)?; + if self.values.is_empty() { + tup.serialize_element(&())?; + } else { + tup.serialize_element(&self.start_key)?; + } + tup.end() + } +} + +impl<'de, T> Deserialize<'de> for WriteTo +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_tuple( + 2, + Self { + start_key: Key::default(), + values: Vec::new(), + }, + ) + } +} + +impl<'de, T: Deserialize<'de>> serde::de::Visitor<'de> for WriteTo { + type Value = WriteTo; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(concat!("WriteTo<", stringify!(T), "> instance")) + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + let values: Vec = seq.next_element()?.ok_or( + serde::de::Error::invalid_length(0, &"WriteTo<_> with 2 elements"), + )?; + + if values.is_empty() { + let _: () = seq.next_element()?.ok_or(serde::de::Error::invalid_length( + 1, + &"WriteTo<_> with 2 elements", + ))?; + Ok(WriteTo { + start_key: Key::default(), + values, + }) + } else { + let start_key: Key = seq.next_element()?.ok_or( + serde::de::Error::invalid_length(1, &"WriteTo<_> with 2 elements"), + )?; + Ok(WriteTo { start_key, values }) + } + } +} + +macro_rules! tables { + // $index muse use increasing numbers starting from zero + ($($name:ident: $ty:ty = $index:literal),*$(,)?) => { + /// Specifies the table to use for a given key. + /// The data is separated to tables based on the data type being stored. + #[allow(non_camel_case_types)] // These are going to match field names exactly + #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] + #[non_exhaustive] + #[repr(u8)] + pub enum TableId { + $($name = $index),* + } + + /// Registeration changes per table + #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] + pub struct ChangesPerTable { + $(pub $name: WriteTo<$ty>),* + } + }; +} + +tables!( + asset_id: [u8; 32] = 0, + contract_id: [u8; 32] = 1, + script_code: Vec = 2, +); + +/// Registeration section of the compressed block +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Registrations { + /// Merkle root of the registeration table merkle roots + pub tables_root: Bytes32, + /// Changes per table + pub changes: ChangesPerTable, +} + +#[cfg(test)] +mod tests { + use super::*; + use bincode::Options; + use fuel_core_types::{ + fuel_asm::op, + fuel_tx::{ + AssetId, + ContractId, + }, + fuel_types::Bytes32, + }; + + #[test] + fn test_tables() { + let original = Registrations { + tables_root: Bytes32::default(), + changes: ChangesPerTable { + asset_id: WriteTo { + start_key: Key::try_from(100).unwrap(), + values: vec![*AssetId::from([0xa0; 32]), *AssetId::from([0xa1; 32])], + }, + contract_id: WriteTo { + start_key: Key::default(), + values: vec![ + *ContractId::from([0xc0; 32]), + // *ContractId::from([0xc1; 32]), + ], + }, + script_code: WriteTo { + start_key: Key::default(), + values: vec![ + vec![op::addi(0x20, 0x20, 1), op::ret(0)] + .into_iter() + .collect(), + vec![op::muli(0x20, 0x20, 5), op::ret(1)] + .into_iter() + .collect(), + ], + }, + }, + }; + + let pc_compressed = postcard::to_stdvec(&original).unwrap(); + let pc_decompressed: Registrations = + postcard::from_bytes(&pc_compressed).unwrap(); + assert_eq!(original, pc_decompressed); + + let bc_opt = bincode::DefaultOptions::new().with_varint_encoding(); + + let bc_compressed = bc_opt.serialize(&original).unwrap(); + let bc_decompressed: Registrations = bc_opt.deserialize(&bc_compressed).unwrap(); + assert_eq!(original, bc_decompressed); + + println!("data: {original:?}"); + println!("postcard compressed size {}", pc_compressed.len()); + println!("bincode compressed size {}", bc_compressed.len()); + println!("postcard compressed: {:x?}", pc_compressed); + println!("bincode compressed: {:x?}", bc_compressed); + + // panic!("ok, just showing the results"); + } +} diff --git a/crates/compression/src/types.rs b/crates/compression/src/types.rs new file mode 100644 index 00000000000..e69de29bb2d From 3101be65235defa2ae7499912ad31339a41bc314 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 18 Jan 2024 11:57:44 -0500 Subject: [PATCH 002/112] Add distilled and compressed header and tx types --- crates/compression/src/block.rs | 11 +- crates/compression/src/lib.rs | 1 + crates/compression/src/types.rs | 0 crates/compression/src/types/header.rs | 21 +++ crates/compression/src/types/mod.rs | 18 +++ crates/compression/src/types/tx.rs | 179 +++++++++++++++++++++++++ 6 files changed, 228 insertions(+), 2 deletions(-) delete mode 100644 crates/compression/src/types.rs create mode 100644 crates/compression/src/types/header.rs create mode 100644 crates/compression/src/types/mod.rs create mode 100644 crates/compression/src/types/tx.rs diff --git a/crates/compression/src/block.rs b/crates/compression/src/block.rs index 3ba2dc8e8af..5b8d418d084 100644 --- a/crates/compression/src/block.rs +++ b/crates/compression/src/block.rs @@ -3,17 +3,24 @@ use serde::{ Serialize, }; -use crate::registry::Registrations; +use crate::{ + registry::Registrations, + types, +}; /// Compressed block. /// The versioning here working depends on the serialization format, /// but as long as we we have less than 128 variants, postcard will /// make that a single byte. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] #[non_exhaustive] pub enum CompressedBlock { V0 { /// Registration section of the compressed block registrations: Registrations, + /// Compressed block header + header: types::header::Header, + /// Compressed transactions + transactions: Vec, }, } diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 3d2ea355958..707708a2ded 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -1,2 +1,3 @@ mod block; mod registry; +mod types; diff --git a/crates/compression/src/types.rs b/crates/compression/src/types.rs deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/crates/compression/src/types/header.rs b/crates/compression/src/types/header.rs new file mode 100644 index 00000000000..4db4b311c9f --- /dev/null +++ b/crates/compression/src/types/header.rs @@ -0,0 +1,21 @@ +use fuel_core_types::{ + blockchain::primitives::DaBlockHeight, + fuel_types::{ + BlockHeight, + Bytes32, + }, + tai64::Tai64, +}; + +use serde::{ + Deserialize, + Serialize, +}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) struct Header { + pub da_height: DaBlockHeight, + pub prev_root: Bytes32, + pub height: BlockHeight, + pub time: Tai64, +} diff --git a/crates/compression/src/types/mod.rs b/crates/compression/src/types/mod.rs new file mode 100644 index 00000000000..6a8e3152e4e --- /dev/null +++ b/crates/compression/src/types/mod.rs @@ -0,0 +1,18 @@ +use serde::{ + Deserialize, + Serialize, +}; + +use crate::registry::Key; + +pub(crate) mod header; +pub(crate) mod tx; + +/// For types that need an explicit flag marking them +/// references to the registry instead of raw values, +/// this enum can be used. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum MaybeCompressed { + Compressed(Key), + Uncompressed(T), +} diff --git a/crates/compression/src/types/tx.rs b/crates/compression/src/types/tx.rs new file mode 100644 index 00000000000..ead02a1b213 --- /dev/null +++ b/crates/compression/src/types/tx.rs @@ -0,0 +1,179 @@ +//! Compressed versions of fuel-tx types needed for DA storage. + +// TODO: remove malleabile fields + +use fuel_core_types::{ + fuel_tx::{ + self, + TxPointer, + }, + fuel_types::{ + self, + BlockHeight, + Bytes32, + Word, + }, +}; +use serde::{ + Deserialize, + Serialize, +}; + +use crate::registry::Key; + +use super::MaybeCompressed; + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(clippy::large_enum_variant)] +pub(crate) enum Transaction { + Script(Script), + Create(Create), + Mint(Mint), +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) struct Script { + script_gas_limit: Word, + script: MaybeCompressed>, + script_data: Vec, + policies: fuel_tx::policies::Policies, + inputs: Vec, + outputs: Vec, + witnesses: Vec, + receipts_root: Bytes32, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) enum Input { + CoinSigned { + utxo_id: TxPointer, + owner: Key, + amount: Word, + asset_id: Key, + tx_pointer: TxPointer, + witness_index: u8, + maturity: BlockHeight, + }, + CoinPredicate { + utxo_id: TxPointer, + owner: Key, + amount: Word, + asset_id: Key, + tx_pointer: TxPointer, + maturity: BlockHeight, + predicate_gas_used: Word, + predicate: Vec, + predicate_data: Vec, + }, + Contract { + utxo_id: TxPointer, + balance_root: Bytes32, + state_root: Bytes32, + tx_pointer: TxPointer, + contract_id: Key, + }, + MessageCoinSigned { + sender: Key, + recipient: Key, + amount: Word, + nonce: fuel_types::Nonce, + witness_index: u8, + data: Vec, + }, + MessageCoinPredicate { + sender: Key, + recipient: Key, + amount: Word, + nonce: fuel_types::Nonce, + predicate_gas_used: Word, + predicate: Vec, + predicate_data: Vec, + }, + MessageDataSigned { + sender: Key, + recipient: Key, + amount: Word, + nonce: fuel_types::Nonce, + witness_index: u8, + data: Vec, + }, + MessageDataPredicate { + sender: Key, + recipient: Key, + amount: Word, + nonce: fuel_types::Nonce, + data: Vec, + predicate_gas_used: Word, + predicate: Vec, + predicate_data: Vec, + }, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) enum Output { + Coin { + to: Key, + amount: Word, + asset_id: Key, + }, + + Contract { + input_index: u8, + balance_root: Bytes32, + state_root: Bytes32, + }, + + Change { + to: Key, + amount: Word, + asset_id: Key, + }, + + Variable { + to: Key, + amount: Word, + asset_id: Key, + }, + + ContractCreated { + contract_id: Key, + state_root: Bytes32, + }, +} + +#[derive(Default, Debug, Clone, Serialize, Deserialize)] +pub struct Create { + bytecode_length: Word, + bytecode_witness_index: u8, + policies: fuel_tx::policies::Policies, + storage_slots: Vec, + inputs: Vec, + outputs: Vec, + witnesses: Vec, + salt: fuel_types::Salt, +} + +#[derive(Default, Debug, Clone, Serialize, Deserialize)] +pub struct Mint { + tx_pointer: TxPointer, + input_contract: InputContract, + output_contract: OutputContract, + mint_amount: Word, + mint_asset_id: Key, +} + +#[derive(Default, Debug, Clone, Serialize, Deserialize)] +pub struct InputContract { + utxo_id: TxPointer, + balance_root: Bytes32, + state_root: Bytes32, + tx_pointer: TxPointer, + contract_id: Key, +} + +#[derive(Default, Debug, Clone, Serialize, Deserialize)] +pub struct OutputContract { + input_index: u8, + balance_root: Bytes32, + state_root: Bytes32, +} From 4a2372ff92e62f3629c19a4925a372fd159fe85b Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 18 Jan 2024 12:00:17 -0500 Subject: [PATCH 003/112] Add changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 682df211ee2..7621c2b1e97 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/). Description of the upcoming release here. +### Added + +- [#1609](https://github.com/FuelLabs/fuel-core/pull/1609): Add a DA compression crate `fuel-core-compression`. ### Changed From 941e5e54112ef244afe56838a44d45dfc5d55a14 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 19 Jan 2024 02:39:41 -0500 Subject: [PATCH 004/112] Attempt nice types interface, that wont work since Rust is missing specialization --- crates/compression/Cargo.toml | 2 + crates/compression/src/compression.rs | 196 ++++++++++++++++ crates/compression/src/lib.rs | 3 + .../block_section.rs} | 86 ++----- crates/compression/src/registry/db.rs | 20 ++ crates/compression/src/registry/in_memory.rs | 58 +++++ crates/compression/src/registry/key.rs | 99 ++++++++ crates/compression/src/registry/mod.rs | 216 ++++++++++++++++++ crates/compression/src/types/mod.rs | 9 +- crates/compression/src/types/tx.rs | 53 +++-- 10 files changed, 650 insertions(+), 92 deletions(-) create mode 100644 crates/compression/src/compression.rs rename crates/compression/src/{registry.rs => registry/block_section.rs} (67%) create mode 100644 crates/compression/src/registry/db.rs create mode 100644 crates/compression/src/registry/in_memory.rs create mode 100644 crates/compression/src/registry/key.rs create mode 100644 crates/compression/src/registry/mod.rs diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index b350b5d516e..360cf991d3f 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -16,3 +16,5 @@ serde = { version = "1.0", features = ["derive"] } postcard = { version = "1.0", features = ["use-std"] } bincode = "1.3" + +paste = "1.0" diff --git a/crates/compression/src/compression.rs b/crates/compression/src/compression.rs new file mode 100644 index 00000000000..45ef22ab129 --- /dev/null +++ b/crates/compression/src/compression.rs @@ -0,0 +1,196 @@ +use crate::{ + registry::{ + db, + next_keys, + ChangesPerTable, + CountPerTable, + KeyPerTable, + Table, + }, + Key, +}; + +#[must_use] +pub struct CompactionContext<'a, R> { + reg: &'a mut R, + next_keys: KeyPerTable, + key_limits: CountPerTable, + changes: ChangesPerTable, +} +impl<'a, R> CompactionContext<'a, R> +where + R: db::RegistrySelectNextKey, +{ + pub fn new(reg: &'a mut R, target: &C) -> Self { + let next_keys = next_keys(reg); + let key_limits = target.count(); + + Self { + reg, + next_keys, + key_limits, + changes: Default::default(), + } + } +} + +impl<'a, R> CompactionContext<'a, R> +where + R: db::RegistryRead + db::RegistryIndex, +{ + pub fn compact(&mut self, value: T::Type) -> Key { + // Check if the registry contains this value already + if let Some(key) = self.reg.index_lookup::(&value) { + let limit = self.key_limits.by_table::(); + // Check if the value is in the possibly-overwritable range + if false { + // TODO + return key; + } + } + // Allocate a new key for this + let key = self.next_keys.mut_by_table::().take_next(); + self.changes.push::(value); + key + } +} + +impl<'a, R> CompactionContext<'a, R> +where + R: db::RegistryWrite, +{ + /// Apply all changes to the registry + pub fn apply(self) { + self.changes.apply(self.reg); + } +} + +/// Convert data to reference-based format +pub trait Compactable { + type Compact; + + /// Count max number of each key type, for upper limit of overwritten keys + fn count(&self) -> CountPerTable; + + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex; + + fn decompact(compact: Self::Compact, reg: &R) -> Self + where + R: db::RegistryRead; +} + +#[cfg(test)] +mod tests { + use fuel_core_types::fuel_types::Address; + + use crate::{ + registry::{ + db, + in_memory::InMemoryRegistry, + tables, + CountPerTable, + }, + Key, + }; + + use super::{ + Compactable, + CompactionContext, + }; + + impl Compactable for Address { + type Compact = Key; + + fn count(&self) -> crate::registry::CountPerTable { + CountPerTable { + Address: 1, + ..Default::default() + } + } + + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, + { + ctx.compact::(**self) + } + + fn decompact(compact: Self::Compact, reg: &R) -> Self + where + R: db::RegistryRead, + { + Address::from(reg.read::(compact)) + } + } + + #[derive(Debug, PartialEq)] + struct ManualExample { + a: Address, + b: Address, + c: u64, + } + + #[derive(Debug, PartialEq)] + struct ManualExampleCompact { + a: Key, + b: Key, + c: u64, + } + + impl Compactable for ManualExample { + type Compact = ManualExampleCompact; + + fn count(&self) -> crate::registry::CountPerTable { + CountPerTable { + Address: 2, + ..Default::default() + } + } + + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, + { + let a = ctx.compact::(*self.a); + let b = ctx.compact::(*self.b); + ManualExampleCompact { a, b, c: self.c } + } + + fn decompact(compact: Self::Compact, reg: &R) -> Self + where + R: db::RegistryRead, + { + let a = Address::from(reg.read::(compact.a)); + let b = Address::from(reg.read::(compact.b)); + Self { a, b, c: compact.c } + } + } + + fn check(target: C) + where + C::Compact: std::fmt::Debug, + { + let mut registry = InMemoryRegistry::default(); + + let key_counts = target.count(); + let mut ctx = CompactionContext::new(&mut registry, &target); + let compacted = target.compact(&mut ctx); + dbg!(®istry); + dbg!(&compacted); + let decompacted = C::decompact(compacted, ®istry); + assert_eq!(target, decompacted); + } + + #[test] + fn test_compaction_roundtrip() { + check(Address::default()); + check(Address::from([1u8; 32])); + check(ManualExample { + a: Address::from([1u8; 32]), + b: Address::from([2u8; 32]), + c: 3, + }); + } +} diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 707708a2ded..4675758d1d7 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -1,3 +1,6 @@ mod block; +mod compression; mod registry; mod types; + +pub use registry::Key; diff --git a/crates/compression/src/registry.rs b/crates/compression/src/registry/block_section.rs similarity index 67% rename from crates/compression/src/registry.rs rename to crates/compression/src/registry/block_section.rs index 11f6b1455f8..b8b0d3f2281 100644 --- a/crates/compression/src/registry.rs +++ b/crates/compression/src/registry/block_section.rs @@ -7,39 +7,26 @@ use serde::{ Serialize, }; -#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)] -pub struct Key([u8; Self::SIZE]); -impl Key { - pub const SIZE: usize = 3; -} -impl TryFrom for Key { - type Error = &'static str; - fn try_from(value: u32) -> Result { - let v = value.to_be_bytes(); - if v[0] != 0 { - return Err("Key must be less than 2^24"); - } - - let mut bytes = [0u8; 3]; - bytes.copy_from_slice(&v[1..]); - Ok(Self(bytes)) - } -} +use super::{ + key::Key, + ChangesPerTable, + Table, +}; /// New registrations written to a specific table. /// Default value is an empty write. #[derive(Debug, Clone, PartialEq, Eq, Default)] -pub struct WriteTo { +pub struct WriteTo { /// The values are inserted starting from this key - pub start_key: Key, + pub start_key: Key, /// Values. inserted using incrementing ids starting from `start_key` - pub values: Vec, + pub values: Vec, } /// Custom serialization is used to omit the start_key when the sequence is empty impl Serialize for WriteTo where - T: Serialize, + T: Table + Serialize, { fn serialize(&self, serializer: S) -> Result { let mut tup = serializer.serialize_tuple(2)?; @@ -53,7 +40,7 @@ where } } -impl<'de, T> Deserialize<'de> for WriteTo +impl<'de, T: Table> Deserialize<'de> for WriteTo where T: Deserialize<'de>, { @@ -71,7 +58,7 @@ where } } -impl<'de, T: Deserialize<'de>> serde::de::Visitor<'de> for WriteTo { +impl<'de, T: Table + Deserialize<'de>> serde::de::Visitor<'de> for WriteTo { type Value = WriteTo; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { @@ -82,7 +69,7 @@ impl<'de, T: Deserialize<'de>> serde::de::Visitor<'de> for WriteTo { where A: serde::de::SeqAccess<'de>, { - let values: Vec = seq.next_element()?.ok_or( + let values: Vec = seq.next_element()?.ok_or( serde::de::Error::invalid_length(0, &"WriteTo<_> with 2 elements"), )?; @@ -96,7 +83,7 @@ impl<'de, T: Deserialize<'de>> serde::de::Visitor<'de> for WriteTo { values, }) } else { - let start_key: Key = seq.next_element()?.ok_or( + let start_key: Key = seq.next_element()?.ok_or( serde::de::Error::invalid_length(1, &"WriteTo<_> with 2 elements"), )?; Ok(WriteTo { start_key, values }) @@ -104,33 +91,6 @@ impl<'de, T: Deserialize<'de>> serde::de::Visitor<'de> for WriteTo { } } -macro_rules! tables { - // $index muse use increasing numbers starting from zero - ($($name:ident: $ty:ty = $index:literal),*$(,)?) => { - /// Specifies the table to use for a given key. - /// The data is separated to tables based on the data type being stored. - #[allow(non_camel_case_types)] // These are going to match field names exactly - #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] - #[non_exhaustive] - #[repr(u8)] - pub enum TableId { - $($name = $index),* - } - - /// Registeration changes per table - #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] - pub struct ChangesPerTable { - $(pub $name: WriteTo<$ty>),* - } - }; -} - -tables!( - asset_id: [u8; 32] = 0, - contract_id: [u8; 32] = 1, - script_code: Vec = 2, -); - /// Registeration section of the compressed block #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Registrations { @@ -146,11 +106,11 @@ mod tests { use bincode::Options; use fuel_core_types::{ fuel_asm::op, - fuel_tx::{ - AssetId, - ContractId, + fuel_tx::AssetId, + fuel_types::{ + Address, + Bytes32, }, - fuel_types::Bytes32, }; #[test] @@ -158,18 +118,15 @@ mod tests { let original = Registrations { tables_root: Bytes32::default(), changes: ChangesPerTable { - asset_id: WriteTo { + AssetId: WriteTo { start_key: Key::try_from(100).unwrap(), values: vec![*AssetId::from([0xa0; 32]), *AssetId::from([0xa1; 32])], }, - contract_id: WriteTo { + Address: WriteTo { start_key: Key::default(), - values: vec![ - *ContractId::from([0xc0; 32]), - // *ContractId::from([0xc1; 32]), - ], + values: vec![*Address::from([0xc0; 32])], }, - script_code: WriteTo { + ScriptCode: WriteTo { start_key: Key::default(), values: vec![ vec![op::addi(0x20, 0x20, 1), op::ret(0)] @@ -180,6 +137,7 @@ mod tests { .collect(), ], }, + Witness: WriteTo::default(), }, }; diff --git a/crates/compression/src/registry/db.rs b/crates/compression/src/registry/db.rs new file mode 100644 index 00000000000..dd543833595 --- /dev/null +++ b/crates/compression/src/registry/db.rs @@ -0,0 +1,20 @@ +use super::{ + Key, + Table, +}; + +pub trait RegistrySelectNextKey { + fn next_key(&mut self) -> Key; +} + +pub trait RegistryRead { + fn read(&self, key: Key) -> T::Type; +} + +pub trait RegistryWrite { + fn batch_write(&mut self, start_key: Key, values: Vec); +} + +pub trait RegistryIndex { + fn index_lookup(&self, value: &T::Type) -> Option>; +} diff --git a/crates/compression/src/registry/in_memory.rs b/crates/compression/src/registry/in_memory.rs new file mode 100644 index 00000000000..2c025e7b319 --- /dev/null +++ b/crates/compression/src/registry/in_memory.rs @@ -0,0 +1,58 @@ +use std::collections::HashMap; + +use super::{ + db::*, + key::RawKey, + Key, + Table, +}; + +/// Simple and inefficient in-memory registry for testing purposes. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub struct InMemoryRegistry { + next_keys: HashMap<&'static str, RawKey>, + storage: HashMap<&'static str, HashMap>>, +} + +impl RegistrySelectNextKey for InMemoryRegistry { + fn next_key(&mut self) -> Key { + let next_key = self.next_keys.entry(T::NAME).or_default(); + let key = Key::::from_raw(*next_key); + *next_key = next_key.next(); + key + } +} + +impl RegistryRead for InMemoryRegistry { + fn read(&self, key: Key) -> T::Type { + self.storage + .get(T::NAME) + .and_then(|table| table.get(&key.raw())) + .map(|bytes| postcard::from_bytes(bytes).expect("Invalid value in registry")) + .unwrap_or_default() + } +} + +impl RegistryWrite for InMemoryRegistry { + fn batch_write(&mut self, start_key: Key, values: Vec) { + let table = self.storage.entry(T::NAME).or_default(); + let mut key = start_key.raw(); + for value in values.into_iter() { + table.insert(key, postcard::to_stdvec(&value).unwrap()); + key = key.next(); + } + } +} + +impl RegistryIndex for InMemoryRegistry { + fn index_lookup(&self, value: &T::Type) -> Option> { + let needle = postcard::to_stdvec(value).unwrap(); + let table = self.storage.get(T::NAME)?; + for (key, value) in table.iter() { + if value == &needle { + return Some(Key::from_raw(*key)); + } + } + None + } +} diff --git a/crates/compression/src/registry/key.rs b/crates/compression/src/registry/key.rs new file mode 100644 index 00000000000..a69495fde49 --- /dev/null +++ b/crates/compression/src/registry/key.rs @@ -0,0 +1,99 @@ +use std::marker::PhantomData; + +use serde::{ + Deserialize, + Serialize, +}; + +use super::Table; + +/// Untyped key pointing to a registry table entry. +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct RawKey([u8; Self::SIZE]); +impl RawKey { + pub const SIZE: usize = 3; + pub const MIN: Self = Self([0; Self::SIZE]); + pub const MAX: Self = Self([u8::MAX; Self::SIZE]); + + /// Returns incremented key, wrapping around at limit + pub fn next(self) -> Self { + let v = u32::from_be_bytes([0, self.0[0], self.0[1], self.0[2]]) + 1; + + let mut bytes = [0u8; 3]; + bytes.copy_from_slice(&v.to_be_bytes()[1..]); + RawKey(bytes) + } +} +impl TryFrom for RawKey { + type Error = &'static str; + fn try_from(value: u32) -> Result { + let v = value.to_be_bytes(); + if v[0] != 0 { + return Err("RawKey must be less than 2^24"); + } + + let mut bytes = [0u8; 3]; + bytes.copy_from_slice(&v[1..]); + Ok(Self(bytes)) + } +} + +/// Typed key to a registry table entry. +#[derive(Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(transparent)] +pub struct Key(RawKey, PhantomData); +impl Clone for Key { + fn clone(&self) -> Self { + Self(self.0, PhantomData) + } +} +impl Copy for Key {} + +impl Key { + pub fn raw(&self) -> RawKey { + self.0 + } + + pub fn from_raw(raw: RawKey) -> Self { + Self(raw, PhantomData) + } + + pub fn next(self) -> Self { + Self(self.0.next(), PhantomData) + } + + /// Increments the key by one, and returns the previous value. + pub fn take_next(&mut self) -> Self { + let result = *self; + self.0 = self.0.next(); + result + } +} + +impl TryFrom for Key { + type Error = &'static str; + fn try_from(value: u32) -> Result { + Ok(Self(RawKey::try_from(value)?, PhantomData)) + } +} + +impl Default for Key { + fn default() -> Self { + Self(RawKey::default(), PhantomData) + } +} + +#[cfg(test)] +mod tests { + use super::RawKey; + + #[test] + fn key_next() { + assert_eq!(RawKey::default().next(), RawKey([0, 0, 1])); + assert_eq!(RawKey::MIN.next().next(), RawKey([0, 0, 2])); + assert_eq!(RawKey([0, 0, 255]).next(), RawKey([0, 1, 0])); + assert_eq!(RawKey([0, 1, 255]).next(), RawKey([0, 2, 0])); + assert_eq!(RawKey([0, 255, 255]).next(), RawKey([1, 0, 0])); + assert_eq!(RawKey::MAX.next(), RawKey::MIN); + } +} diff --git a/crates/compression/src/registry/mod.rs b/crates/compression/src/registry/mod.rs new file mode 100644 index 00000000000..5b9e53d8137 --- /dev/null +++ b/crates/compression/src/registry/mod.rs @@ -0,0 +1,216 @@ +use serde::{ + Deserialize, + Serialize, +}; + +mod block_section; +pub(crate) mod db; +pub(crate) mod in_memory; +mod key; + +use self::block_section::WriteTo; +pub use self::key::Key; + +pub(crate) use self::block_section::Registrations; + +mod _private { + pub trait Seal {} +} + +pub trait Table: _private::Seal { + const NAME: &'static str; + type Type: Default + Serialize + for<'de> Deserialize<'de>; +} + +macro_rules! tables { + // $index muse use increasing numbers starting from zero + ($($name:ident: $ty:ty),*$(,)?) => { paste::paste!{ + pub mod tables { + $( + /// Specifies the table to use for a given key. + /// The data is separated to tables based on the data type being stored. + #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] + pub struct $name; + + impl super::_private::Seal for $name {} + impl super::Table for $name { + const NAME: &'static str = stringify!($name); + type Type = $ty; + } + + // Type level magic + pub trait []: super::_private::Seal {} + impl [] for $name {} + )* + } + + /// One counter per table + #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] + #[allow(non_snake_case)] // The field names match table type names eactly + pub struct CountPerTable { + $(pub $name: usize),* + } + + impl CountPerTable { + pub fn by_table(&self) -> usize { + match T::NAME { + $( + stringify!($name) => self.$name, + )* + _ => unreachable!(), + } + } + } + + /// One key value per table + #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] + #[allow(non_snake_case)] // The field names match table type names eactly + pub struct KeyPerTable { + $(pub $name: Key),* + } + + impl KeyPerTable { + pub fn by_table(&self) -> Key { + match T::NAME { + $( + stringify!($name) => Key::::from_raw(self.$name.raw()), + )* + _ => unreachable!(), + } + } + + pub fn mut_by_table(&self) -> Key { + match T::NAME { + $( + stringify!($name) => Key::::from_raw(self.$name.raw()), + )* + _ => unreachable!(), + } + } + } + + pub fn next_keys(reg: &mut R) -> KeyPerTable { + KeyPerTable { + $( $name: reg.next_key(), )* + } + } + + /// Registeration changes per table + #[derive(Debug, Clone, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] + #[allow(non_snake_case)] // The field names match table type names eactly + pub struct ChangesPerTable { + $(pub $name: WriteTo),* + } + + impl ChangesPerTable { + pub fn push(&self, value: T::Type) -> &mut WriteTo { + match T::NAME { + $( + stringify!($name) => self.$name.values.push(value), + )* + _ => unreachable!(), + } + } + + pub fn apply(&self, reg: &mut impl db::RegistryWrite) { + $( + reg.batch_write(self.$name.start_key, self.$name.values.clone()); + )* + } + } + }}; +} + +tables!( + AssetId: [u8; 32], + Address: [u8; 32], + ScriptCode: Vec, + Witness: Vec, +); + +#[cfg(test)] +mod tests { + use fuel_core_types::fuel_types::AssetId; + use tests::key::RawKey; + + use super::*; + + use super::db::{ + RegistryIndex as _, + RegistryRead as _, + RegistryWrite as _, + }; + + #[test] + fn test_in_memory_db() { + let mut reg = in_memory::InMemoryRegistry::default(); + + // Empty + assert_eq!( + reg.read(Key::::try_from(100).unwrap()), + *AssetId::default() + ); + assert_eq!( + reg.index_lookup(&*AssetId::from([1; 32])), + None::> + ); + + // Write + reg.batch_write( + Key::::from_raw(RawKey::try_from(100u32).unwrap()), + vec![*AssetId::from([1; 32]), *AssetId::from([2; 32])], + ); + assert_eq!( + reg.read(Key::::try_from(100).unwrap()), + *AssetId::from([1; 32]) + ); + assert_eq!( + reg.read(Key::::try_from(101).unwrap()), + *AssetId::from([2; 32]) + ); + assert_eq!( + reg.read(Key::::try_from(102).unwrap()), + *AssetId::default() + ); + + // Overwrite + reg.batch_write( + Key::::from_raw(RawKey::try_from(99u32).unwrap()), + vec![*AssetId::from([10; 32]), *AssetId::from([11; 32])], + ); + assert_eq!( + reg.read(Key::::try_from(99).unwrap()), + *AssetId::from([10; 32]) + ); + assert_eq!( + reg.read(Key::::try_from(100).unwrap()), + *AssetId::from([11; 32]) + ); + + // Wrapping + reg.batch_write( + Key::::from_raw(RawKey::MAX), + vec![*AssetId::from([3; 32]), *AssetId::from([4; 32])], + ); + + assert_eq!( + reg.read(Key::::from_raw(RawKey::MAX)), + *AssetId::from([3; 32]) + ); + + assert_eq!( + reg.read(Key::::from_raw(RawKey::MIN)), + *AssetId::from([4; 32]) + ); + + assert_eq!( + reg.index_lookup(&*AssetId::from([3; 32])), + Some(Key::::from_raw(RawKey::MAX)) + ); + + assert_eq!( + reg.index_lookup(&*AssetId::from([4; 32])), + Some(Key::::from_raw(RawKey::MIN)) + ); + } +} diff --git a/crates/compression/src/types/mod.rs b/crates/compression/src/types/mod.rs index 6a8e3152e4e..ab76f82fc72 100644 --- a/crates/compression/src/types/mod.rs +++ b/crates/compression/src/types/mod.rs @@ -3,7 +3,10 @@ use serde::{ Serialize, }; -use crate::registry::Key; +use crate::registry::{ + Key, + Table, +}; pub(crate) mod header; pub(crate) mod tx; @@ -12,7 +15,7 @@ pub(crate) mod tx; /// references to the registry instead of raw values, /// this enum can be used. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub enum MaybeCompressed { - Compressed(Key), +pub enum MaybeCompressed { + Compressed(Key), Uncompressed(T), } diff --git a/crates/compression/src/types/tx.rs b/crates/compression/src/types/tx.rs index ead02a1b213..a2ae53043ed 100644 --- a/crates/compression/src/types/tx.rs +++ b/crates/compression/src/types/tx.rs @@ -19,7 +19,10 @@ use serde::{ Serialize, }; -use crate::registry::Key; +use crate::registry::{ + tables, + Key, +}; use super::MaybeCompressed; @@ -34,12 +37,12 @@ pub(crate) enum Transaction { #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct Script { script_gas_limit: Word, - script: MaybeCompressed>, + script: MaybeCompressed, script_data: Vec, policies: fuel_tx::policies::Policies, inputs: Vec, outputs: Vec, - witnesses: Vec, + witnesses: Vec>, receipts_root: Bytes32, } @@ -47,18 +50,18 @@ pub(crate) struct Script { pub(crate) enum Input { CoinSigned { utxo_id: TxPointer, - owner: Key, + owner: Key, amount: Word, - asset_id: Key, + asset_id: Key, tx_pointer: TxPointer, witness_index: u8, maturity: BlockHeight, }, CoinPredicate { utxo_id: TxPointer, - owner: Key, + owner: Key, amount: Word, - asset_id: Key, + asset_id: Key, tx_pointer: TxPointer, maturity: BlockHeight, predicate_gas_used: Word, @@ -70,19 +73,19 @@ pub(crate) enum Input { balance_root: Bytes32, state_root: Bytes32, tx_pointer: TxPointer, - contract_id: Key, + asset_id: Key, }, MessageCoinSigned { - sender: Key, - recipient: Key, + sender: Key, + recipient: Key, amount: Word, nonce: fuel_types::Nonce, witness_index: u8, data: Vec, }, MessageCoinPredicate { - sender: Key, - recipient: Key, + sender: Key, + recipient: Key, amount: Word, nonce: fuel_types::Nonce, predicate_gas_used: Word, @@ -90,16 +93,16 @@ pub(crate) enum Input { predicate_data: Vec, }, MessageDataSigned { - sender: Key, - recipient: Key, + sender: Key, + recipient: Key, amount: Word, nonce: fuel_types::Nonce, witness_index: u8, data: Vec, }, MessageDataPredicate { - sender: Key, - recipient: Key, + sender: Key, + recipient: Key, amount: Word, nonce: fuel_types::Nonce, data: Vec, @@ -112,9 +115,9 @@ pub(crate) enum Input { #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) enum Output { Coin { - to: Key, + to: Key, amount: Word, - asset_id: Key, + asset_id: Key, }, Contract { @@ -124,19 +127,19 @@ pub(crate) enum Output { }, Change { - to: Key, + to: Key, amount: Word, - asset_id: Key, + asset_id: Key, }, Variable { - to: Key, + to: Key, amount: Word, - asset_id: Key, + asset_id: Key, }, ContractCreated { - contract_id: Key, + contract_id: TxPointer, state_root: Bytes32, }, } @@ -159,7 +162,7 @@ pub struct Mint { input_contract: InputContract, output_contract: OutputContract, mint_amount: Word, - mint_asset_id: Key, + mint_asset_id: Key, } #[derive(Default, Debug, Clone, Serialize, Deserialize)] @@ -168,7 +171,7 @@ pub struct InputContract { balance_root: Bytes32, state_root: Bytes32, tx_pointer: TxPointer, - contract_id: Key, + contract_id: TxPointer, } #[derive(Default, Debug, Clone, Serialize, Deserialize)] From e65062d43b52f0f2a59e46b10f8d33700068e207 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 19 Jan 2024 03:57:06 -0500 Subject: [PATCH 005/112] Nevermind, type-based madness was possible and actually quite neat --- crates/compression/src/compression.rs | 85 +++++++++++++-------- crates/compression/src/registry/key.rs | 40 +++++++++- crates/compression/src/registry/mod.rs | 101 +++++++++++++++---------- 3 files changed, 152 insertions(+), 74 deletions(-) diff --git a/crates/compression/src/compression.rs b/crates/compression/src/compression.rs index 45ef22ab129..c11aac62a97 100644 --- a/crates/compression/src/compression.rs +++ b/crates/compression/src/compression.rs @@ -1,5 +1,11 @@ use crate::{ registry::{ + access::{ + self, + *, + }, + add_keys, + block_section::WriteTo, db, next_keys, ChangesPerTable, @@ -12,25 +18,42 @@ use crate::{ #[must_use] pub struct CompactionContext<'a, R> { + /// The registry reg: &'a mut R, + /// These are the keys where writing started + start_keys: KeyPerTable, + /// The next keys to use for each table next_keys: KeyPerTable, - key_limits: CountPerTable, + /// Keys in range next_keys..safe_keys_start + /// could be overwritten by the compaction, + /// and cannot be used for new values. + safe_keys_start: KeyPerTable, changes: ChangesPerTable, } impl<'a, R> CompactionContext<'a, R> where - R: db::RegistrySelectNextKey, + R: db::RegistrySelectNextKey + + db::RegistryRead + + db::RegistryIndex + + db::RegistryWrite, { - pub fn new(reg: &'a mut R, target: &C) -> Self { - let next_keys = next_keys(reg); + pub fn run(reg: &'a mut R, target: C) -> C::Compact { + let start_keys = next_keys(reg); + let next_keys = start_keys; let key_limits = target.count(); + let safe_keys_start = add_keys(next_keys, key_limits); - Self { + let mut ctx = Self { reg, + start_keys, next_keys, - key_limits, + safe_keys_start, changes: Default::default(), - } + }; + + let compacted = target.compact(&mut ctx); + ctx.changes.apply_to_registry(ctx.reg); + compacted } } @@ -38,33 +61,33 @@ impl<'a, R> CompactionContext<'a, R> where R: db::RegistryRead + db::RegistryIndex, { - pub fn compact(&mut self, value: T::Type) -> Key { + /// Convert a value to a key + /// If necessary, store the value in the changeset and allocate a new key. + pub fn to_key(&mut self, value: T::Type) -> Key + where + KeyPerTable: access::AccessCopy>, + KeyPerTable: access::AccessMut>, + ChangesPerTable: access::AccessMut>, + { // Check if the registry contains this value already if let Some(key) = self.reg.index_lookup::(&value) { - let limit = self.key_limits.by_table::(); + let start: Key = self.start_keys.value(); + let end: Key = self.safe_keys_start.value(); // Check if the value is in the possibly-overwritable range - if false { - // TODO + if !key.is_between(start, end) { return key; } } // Allocate a new key for this - let key = self.next_keys.mut_by_table::().take_next(); - self.changes.push::(value); + let key = >>::get_mut(&mut self.next_keys) + .take_next(); + >>::get_mut(&mut self.changes) + .values + .push(value); key } } -impl<'a, R> CompactionContext<'a, R> -where - R: db::RegistryWrite, -{ - /// Apply all changes to the registry - pub fn apply(self) { - self.changes.apply(self.reg); - } -} - /// Convert data to reference-based format pub trait Compactable { type Compact; @@ -114,7 +137,7 @@ mod tests { where R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, { - ctx.compact::(**self) + ctx.to_key::(**self) } fn decompact(compact: Self::Compact, reg: &R) -> Self @@ -125,7 +148,7 @@ mod tests { } } - #[derive(Debug, PartialEq)] + #[derive(Debug, Clone, PartialEq)] struct ManualExample { a: Address, b: Address, @@ -153,8 +176,8 @@ mod tests { where R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, { - let a = ctx.compact::(*self.a); - let b = ctx.compact::(*self.b); + let a = ctx.to_key::(*self.a); + let b = ctx.to_key::(*self.b); ManualExampleCompact { a, b, c: self.c } } @@ -168,17 +191,13 @@ mod tests { } } - fn check(target: C) + fn check(target: C) where C::Compact: std::fmt::Debug, { let mut registry = InMemoryRegistry::default(); - let key_counts = target.count(); - let mut ctx = CompactionContext::new(&mut registry, &target); - let compacted = target.compact(&mut ctx); - dbg!(®istry); - dbg!(&compacted); + let compacted = CompactionContext::run(&mut registry, target.clone()); let decompacted = C::decompact(compacted, ®istry); assert_eq!(target, decompacted); } diff --git a/crates/compression/src/registry/key.rs b/crates/compression/src/registry/key.rs index a69495fde49..811ec5d5828 100644 --- a/crates/compression/src/registry/key.rs +++ b/crates/compression/src/registry/key.rs @@ -15,14 +15,37 @@ impl RawKey { pub const MIN: Self = Self([0; Self::SIZE]); pub const MAX: Self = Self([u8::MAX; Self::SIZE]); - /// Returns incremented key, wrapping around at limit - pub fn next(self) -> Self { - let v = u32::from_be_bytes([0, self.0[0], self.0[1], self.0[2]]) + 1; + pub fn as_u32(self) -> u32 { + u32::from_be_bytes([0, self.0[0], self.0[1], self.0[2]]) + } + /// Wraps around at limit + pub fn add_u32(self, rhs: u32) -> Self { + let lhs = self.as_u32(); + let v = lhs.wrapping_add(rhs); let mut bytes = [0u8; 3]; bytes.copy_from_slice(&v.to_be_bytes()[1..]); RawKey(bytes) } + + /// Wraps around at limit + pub fn next(self) -> Self { + self.add_u32(1) + } + + /// Is `self` between `start` and `end`? i.e. in the half-open logical range `start`..`end`, + /// so that wrap-around cases are handled correctly. + pub fn is_between(self, start: Self, end: Self) -> bool { + let low = start.as_u32(); + let high = end.as_u32(); + let v = self.as_u32(); + + if high >= low { + low <= v && v < high + } else { + v < high || v >= low + } + } } impl TryFrom for RawKey { type Error = &'static str; @@ -58,10 +81,21 @@ impl Key { Self(raw, PhantomData) } + /// Wraps around at limit + pub fn add_u32(self, rhs: u32) -> Self { + Self(self.0.add_u32(rhs), PhantomData) + } + + /// Wraps around at limit pub fn next(self) -> Self { Self(self.0.next(), PhantomData) } + /// Is `self` between `start` and `end`? i.e. in the half-open logical range `start`..`end`, + /// so that wrap-around cases are handled correctly. + pub fn is_between(self, start: Self, end: Self) -> bool { + self.0.is_between(start.0, end.0) + } /// Increments the key by one, and returns the previous value. pub fn take_next(&mut self) -> Self { let result = *self; diff --git a/crates/compression/src/registry/mod.rs b/crates/compression/src/registry/mod.rs index 5b9e53d8137..e8597a3cd27 100644 --- a/crates/compression/src/registry/mod.rs +++ b/crates/compression/src/registry/mod.rs @@ -3,7 +3,7 @@ use serde::{ Serialize, }; -mod block_section; +pub(crate) mod block_section; pub(crate) mod db; pub(crate) mod in_memory; mod key; @@ -22,9 +22,23 @@ pub trait Table: _private::Seal { type Type: Default + Serialize + for<'de> Deserialize<'de>; } +pub mod access { + pub trait AccessCopy { + fn value(&self) -> V; + } + + pub trait AccessRef { + fn get(&self) -> &V; + } + + pub trait AccessMut { + fn get_mut(&mut self) -> &mut V; + } +} + macro_rules! tables { // $index muse use increasing numbers starting from zero - ($($name:ident: $ty:ty),*$(,)?) => { paste::paste!{ + ($($name:ident: $ty:ty),*$(,)?) => { pub mod tables { $( /// Specifies the table to use for a given key. @@ -37,10 +51,6 @@ macro_rules! tables { const NAME: &'static str = stringify!($name); type Type = $ty; } - - // Type level magic - pub trait []: super::_private::Seal {} - impl [] for $name {} )* } @@ -51,16 +61,13 @@ macro_rules! tables { $(pub $name: usize),* } - impl CountPerTable { - pub fn by_table(&self) -> usize { - match T::NAME { - $( - stringify!($name) => self.$name, - )* - _ => unreachable!(), + $( + impl access::AccessCopy for CountPerTable { + fn value(&self) -> usize { + self.$name } } - } + )* /// One key value per table #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] @@ -69,25 +76,23 @@ macro_rules! tables { $(pub $name: Key),* } - impl KeyPerTable { - pub fn by_table(&self) -> Key { - match T::NAME { - $( - stringify!($name) => Key::::from_raw(self.$name.raw()), - )* - _ => unreachable!(), + $( + impl access::AccessCopy> for KeyPerTable { + fn value(&self) -> Key { + self.$name } } - - pub fn mut_by_table(&self) -> Key { - match T::NAME { - $( - stringify!($name) => Key::::from_raw(self.$name.raw()), - )* - _ => unreachable!(), + impl access::AccessRef> for KeyPerTable { + fn get(&self) -> &Key { + &self.$name } } - } + impl access::AccessMut> for KeyPerTable { + fn get_mut(&mut self) -> &mut Key { + &mut self.$name + } + } + )* pub fn next_keys(reg: &mut R) -> KeyPerTable { KeyPerTable { @@ -95,6 +100,17 @@ macro_rules! tables { } } + /// Used to add together keys and counts to deterimine possible overwrite range + pub fn add_keys(keys: KeyPerTable, counts: CountPerTable) -> KeyPerTable { + KeyPerTable { + $( + $name: keys.$name.add_u32(counts.$name.try_into() + .expect("Count too large. Shoudn't happen as we control inputs here.") + ), + )* + } + } + /// Registeration changes per table #[derive(Debug, Clone, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] #[allow(non_snake_case)] // The field names match table type names eactly @@ -103,22 +119,31 @@ macro_rules! tables { } impl ChangesPerTable { - pub fn push(&self, value: T::Type) -> &mut WriteTo { - match T::NAME { - $( - stringify!($name) => self.$name.values.push(value), - )* - _ => unreachable!(), - } + pub fn is_empty(&self) -> bool { + true $(&& self.$name.values.is_empty())* } - pub fn apply(&self, reg: &mut impl db::RegistryWrite) { + /// Apply changes to the registry db + pub fn apply_to_registry(self, reg: &mut R) { $( reg.batch_write(self.$name.start_key, self.$name.values.clone()); )* } } - }}; + + $( + impl access::AccessRef> for ChangesPerTable { + fn get(&self) -> &WriteTo { + &self.$name + } + } + impl access::AccessMut> for ChangesPerTable { + fn get_mut(&mut self) -> &mut WriteTo { + &mut self.$name + } + } + )* + }; } tables!( From f322d0f301d855d2913bc3c4e007d4d115e355d8 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 19 Jan 2024 12:53:28 -0500 Subject: [PATCH 006/112] Remove malleable fields --- crates/compression/src/types/tx.rs | 30 +++--------------------------- 1 file changed, 3 insertions(+), 27 deletions(-) diff --git a/crates/compression/src/types/tx.rs b/crates/compression/src/types/tx.rs index a2ae53043ed..b7f23a05e80 100644 --- a/crates/compression/src/types/tx.rs +++ b/crates/compression/src/types/tx.rs @@ -64,7 +64,6 @@ pub(crate) enum Input { asset_id: Key, tx_pointer: TxPointer, maturity: BlockHeight, - predicate_gas_used: Word, predicate: Vec, predicate_data: Vec, }, @@ -88,7 +87,6 @@ pub(crate) enum Input { recipient: Key, amount: Word, nonce: fuel_types::Nonce, - predicate_gas_used: Word, predicate: Vec, predicate_data: Vec, }, @@ -106,7 +104,6 @@ pub(crate) enum Input { amount: Word, nonce: fuel_types::Nonce, data: Vec, - predicate_gas_used: Word, predicate: Vec, predicate_data: Vec, }, @@ -122,21 +119,11 @@ pub(crate) enum Output { Contract { input_index: u8, - balance_root: Bytes32, - state_root: Bytes32, }, - Change { - to: Key, - amount: Word, - asset_id: Key, - }, + Change, - Variable { - to: Key, - amount: Word, - asset_id: Key, - }, + Variable, ContractCreated { contract_id: TxPointer, @@ -159,24 +146,13 @@ pub struct Create { #[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct Mint { tx_pointer: TxPointer, - input_contract: InputContract, + input_contract: TxPointer, output_contract: OutputContract, mint_amount: Word, mint_asset_id: Key, } -#[derive(Default, Debug, Clone, Serialize, Deserialize)] -pub struct InputContract { - utxo_id: TxPointer, - balance_root: Bytes32, - state_root: Bytes32, - tx_pointer: TxPointer, - contract_id: TxPointer, -} - #[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct OutputContract { input_index: u8, - balance_root: Bytes32, - state_root: Bytes32, } From 20f72697aa7e24256f786c3c7cc5b96ca9ed61df Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Sat, 27 Jan 2024 03:05:42 +0200 Subject: [PATCH 007/112] WIP --- crates/compression-derive/Cargo.toml | 20 + crates/compression-derive/src/deserialize.rs | 177 +++++++++ crates/compression-derive/src/lib.rs | 22 ++ crates/compression-derive/src/serialize.rs | 187 +++++++++ crates/compression/Cargo.toml | 4 +- crates/compression/src/lib.rs | 1 + crates/compression/src/registry/mod.rs | 16 + crates/compression/src/types/tx.rs | 387 ++++++++++++++++++- 8 files changed, 810 insertions(+), 4 deletions(-) create mode 100644 crates/compression-derive/Cargo.toml create mode 100644 crates/compression-derive/src/deserialize.rs create mode 100644 crates/compression-derive/src/lib.rs create mode 100644 crates/compression-derive/src/serialize.rs diff --git a/crates/compression-derive/Cargo.toml b/crates/compression-derive/Cargo.toml new file mode 100644 index 00000000000..96f9f03ec47 --- /dev/null +++ b/crates/compression-derive/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "fuel-core-compression-derive" +version = { workspace = true } +authors = { workspace = true } +categories = ["cryptography::cryptocurrencies"] +edition = { workspace = true } +homepage = { workspace = true } +keywords = ["blockchain", "cryptocurrencies", "fuel-core", "fuel-client", "fuel-compression"] +license = { workspace = true } +repository = { workspace = true } +description = "Compression and decompression derive macros for DA storage types." + +[lib] +proc-macro = true + +[dependencies] +quote = "1" +syn = { version = "2", features = ["full"] } +proc-macro2 = "1" +synstructure = "0.13" diff --git a/crates/compression-derive/src/deserialize.rs b/crates/compression-derive/src/deserialize.rs new file mode 100644 index 00000000000..0edbfb17e57 --- /dev/null +++ b/crates/compression-derive/src/deserialize.rs @@ -0,0 +1,177 @@ +use proc_macro2::TokenStream as TokenStream2; +use quote::{ + format_ident, + quote, +}; + +use crate::attribute::{ + should_skip_field, + should_skip_field_binding, + StructAttrs, +}; + +fn deserialize_struct(s: &mut synstructure::Structure) -> TokenStream2 { + assert_eq!(s.variants().len(), 1, "structs must have one variant"); + + let variant: &synstructure::VariantInfo = &s.variants()[0]; + let decode_main = variant.construct(|field, _| { + let ty = &field.ty; + if should_skip_field(&field.attrs) { + quote! { + ::core::default::Default::default() + } + } else { + quote! {{ + <#ty as ::fuel_types::canonical::Deserialize>::decode_static(buffer)? + }} + } + }); + + let decode_dynamic = variant.each(|binding| { + if should_skip_field_binding(binding) { + quote! { + *#binding = ::core::default::Default::default(); + } + } else { + quote! {{ + ::fuel_types::canonical::Deserialize::decode_dynamic(#binding, buffer)?; + }} + } + }); + + let remove_prefix = if let Some(expected_prefix) = StructAttrs::parse(s).prefix { + quote! {{ + let prefix = <_ as ::fuel_types::canonical::Deserialize>::decode_static(buffer); + if prefix != Ok(#expected_prefix) { + return ::core::result::Result::Err(::fuel_types::canonical::Error::InvalidPrefix) + } + }} + } else { + quote! {} + }; + + s.gen_impl(quote! { + gen impl ::fuel_types::canonical::Deserialize for @Self { + fn decode_static(buffer: &mut I) -> ::core::result::Result { + #remove_prefix + ::core::result::Result::Ok(#decode_main) + } + + fn decode_dynamic(&mut self, buffer: &mut I) -> ::core::result::Result<(), ::fuel_types::canonical::Error> { + match self { + #decode_dynamic, + }; + ::core::result::Result::Ok(()) + } + } + }) +} + +fn deserialize_enum(s: &synstructure::Structure) -> TokenStream2 { + let _name = &s.ast().ident; + + assert!(!s.variants().is_empty(), "got invalid empty enum"); + + let mut next_discriminant = quote! { { 0u64 } }; + let enum_ident = &s.ast().ident; + let calculated_discriminants = + s.variants().iter().enumerate().map(|(index, variant)| { + if variant.ast().discriminant.is_some() { + let variant_ident = variant.ast().ident; + next_discriminant = quote! { { #enum_ident::#variant_ident as u64 } }; + } + + let const_ident = format_ident!("V{}", index); + let result = quote! { const #const_ident: ::core::primitive::u64 = #next_discriminant; }; + + next_discriminant = quote! { ( (#next_discriminant) + 1u64 ) }; + + result + }); + + let decode_static: TokenStream2 = s + .variants() + .iter() + .enumerate() + .map(|(index, variant)| { + let decode_main = variant.construct(|field, _| { + if should_skip_field(&field.attrs) { + quote! { + ::core::default::Default::default() + } + } else { + let ty = &field.ty; + quote! {{ + <#ty as ::fuel_types::canonical::Deserialize>::decode_static(buffer)? + }} + } + }); + let const_ident = format_ident!("V{}", index); + + quote! { + #const_ident => { + ::core::result::Result::Ok(#decode_main) + } + } + }) + .collect(); + + let decode_dynamic = s.variants().iter().map(|variant| { + let decode_dynamic = variant.each(|binding| { + if !should_skip_field_binding(binding) { + quote! {{ + ::fuel_types::canonical::Deserialize::decode_dynamic(#binding, buffer)?; + }} + } else { + quote! {} + } + }); + + quote! { + #decode_dynamic + } + }); + + let discriminant = { + quote! { + <::core::primitive::u64 as ::fuel_types::canonical::Deserialize>::decode(buffer)? + } + }; + + s.gen_impl(quote! { + gen impl ::fuel_types::canonical::Deserialize for @Self { + fn decode_static(buffer: &mut I) -> ::core::result::Result { + #( #calculated_discriminants )* + + match #discriminant { + #decode_static + _ => ::core::result::Result::Err(::fuel_types::canonical::Error::UnknownDiscriminant), + } + } + + fn decode_dynamic(&mut self, buffer: &mut I) -> ::core::result::Result<(), ::fuel_types::canonical::Error> { + match self { + #( + #decode_dynamic + )* + _ => return ::core::result::Result::Err(::fuel_types::canonical::Error::UnknownDiscriminant), + }; + + ::core::result::Result::Ok(()) + } + } + }) +} + +/// Derives `Deserialize` trait for the given `struct` or `enum`. +pub fn deserialize_derive(mut s: synstructure::Structure) -> TokenStream2 { + s.bind_with(|_| synstructure::BindStyle::RefMut) + .add_bounds(synstructure::AddBounds::Fields) + .underscore_const(true); + + match s.ast().data { + syn::Data::Struct(_) => deserialize_struct(&mut s), + syn::Data::Enum(_) => deserialize_enum(&s), + _ => panic!("Can't derive `Deserialize` for `union`s"), + } +} diff --git a/crates/compression-derive/src/lib.rs b/crates/compression-derive/src/lib.rs new file mode 100644 index 00000000000..6c9f3d9173a --- /dev/null +++ b/crates/compression-derive/src/lib.rs @@ -0,0 +1,22 @@ +//! Derive macros for canonical type serialization and deserialization. + +#![deny(unused_must_use, missing_docs)] + +extern crate proc_macro; +mod deserialize; +mod serialize; + +use self::{ + deserialize::deserialize_derive, + serialize::serialize_derive, +}; +synstructure::decl_derive!( + [Deserialize, attributes(canonical)] => + /// Derives `Deserialize` trait for the given `struct` or `enum`. + deserialize_derive +); +synstructure::decl_derive!( + [Serialize, attributes(canonical)] => + /// Derives `Serialize` trait for the given `struct` or `enum`. + serialize_derive +); diff --git a/crates/compression-derive/src/serialize.rs b/crates/compression-derive/src/serialize.rs new file mode 100644 index 00000000000..55e0d2c1059 --- /dev/null +++ b/crates/compression-derive/src/serialize.rs @@ -0,0 +1,187 @@ +use proc_macro2::TokenStream as TokenStream2; +use quote::quote; + +use crate::attribute::{ + should_skip_field_binding, + StructAttrs, +}; + +fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { + assert_eq!(s.variants().len(), 1, "structs must have one variant"); + let variant: &synstructure::VariantInfo = &s.variants()[0]; + + let name = s.ast().ident; + let compact_name = format_ident!("Compact{}", name); + + let compact_fields = variant.each(|binding| { + + }); + + s.gen_impl(quote! { + pub struct #compact_name { + #compact_fields + } + + gen impl ::fuel_core_compression::Compactable for @Self { + type Compact = #compact_name; + + fn count(&self) -> CountPerTable { + #count_per_field; + } + + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex { + #compact_per_field; + } + + fn decompact(compact: Self::Compact, reg: &R) -> Self + where + R: db::RegistryRead { + #decompact_per_field; + } + } + }) +} + +fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { + assert!(!s.variants().is_empty(), "got invalid empty enum"); + let mut next_discriminant = quote! { { 0u64 } }; + let encode_static = s.variants().iter().map(|v| { + let pat = v.pat(); + let encode_static_iter = v.bindings().iter().map(|binding| { + if should_skip_field_binding(binding) { + quote! {} + } else { + quote! { + ::fuel_types::canonical::Serialize::encode_static(#binding, buffer)?; + } + } + }); + + if v.ast().discriminant.is_some() { + let variant_ident = v.ast().ident; + next_discriminant = quote! { { Self::#variant_ident as u64 } }; + } + + let encode_discriminant = quote! { + <::core::primitive::u64 as ::fuel_types::canonical::Serialize>::encode(&#next_discriminant, buffer)?; + }; + next_discriminant = quote! { ( (#next_discriminant) + 1u64 ) }; + + quote! { + #pat => { + #encode_discriminant + #( + { #encode_static_iter } + )* + } + } + }); + let encode_dynamic = s.variants().iter().map(|v| { + let encode_dynamic_iter = v.each(|binding| { + if should_skip_field_binding(binding) { + quote! {} + } else { + quote! { + ::fuel_types::canonical::Serialize::encode_dynamic(#binding, buffer)?; + } + } + }); + quote! { + #encode_dynamic_iter + } + }); + + let match_size_static: TokenStream2 = s + .variants() + .iter() + .map(|variant| { + variant.each(|binding| { + if should_skip_field_binding(binding) { + quote! {} + } else { + quote! { + size = ::fuel_types::canonical::add_sizes(size, #binding.size_static()); + } + } + }) + }) + .collect(); + let match_size_static = quote! {{ + // `repr(128)` is unstable, so because of that we can use 8 bytes. + let mut size = 8; + match self { #match_size_static } size } + }; + + let match_size_dynamic: TokenStream2 = s + .variants() + .iter() + .map(|variant| { + variant.each(|binding| { + if should_skip_field_binding(binding) { + quote! {} + } else { + quote! { + size = ::fuel_types::canonical::add_sizes(size, #binding.size_dynamic()); + } + } + }) + }) + .collect(); + let match_size_dynamic = + quote! {{ let mut size = 0; match self { #match_size_dynamic } size }}; + + let impl_code = s.gen_impl(quote! { + gen impl ::fuel_types::canonical::Serialize for @Self { + #[inline(always)] + fn size_static(&self) -> usize { + #match_size_static + } + + #[inline(always)] + fn size_dynamic(&self) -> usize { + #match_size_dynamic + } + + #[inline(always)] + fn encode_static(&self, buffer: &mut O) -> ::core::result::Result<(), ::fuel_types::canonical::Error> { + match self { + #( + #encode_static + )*, + _ => return ::core::result::Result::Err(::fuel_types::canonical::Error::UnknownDiscriminant), + }; + + ::core::result::Result::Ok(()) + } + + fn encode_dynamic(&self, buffer: &mut O) -> ::core::result::Result<(), ::fuel_types::canonical::Error> { + match self { + #( + #encode_dynamic + )*, + _ => return ::core::result::Result::Err(::fuel_types::canonical::Error::UnknownDiscriminant), + }; + + ::core::result::Result::Ok(()) + } + } + }); + + quote! { + #impl_code + } +} + +/// Derives `Serialize` trait for the given `struct` or `enum`. +pub fn serialize_derive(mut s: synstructure::Structure) -> TokenStream2 { + s.add_bounds(synstructure::AddBounds::Fields) + .underscore_const(true); + + match s.ast().data { + syn::Data::Struct(_) => serialize_struct(&s), + syn::Data::Enum(_) => serialize_enum(&s), + _ => panic!("Can't derive `Serialize` for `union`s"), + } +} diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 360cf991d3f..054ada7701d 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -5,7 +5,7 @@ authors = { workspace = true } categories = ["cryptography::cryptocurrencies"] edition = { workspace = true } homepage = { workspace = true } -keywords = ["blockchain", "cryptocurrencies", "fuel-core", "fuel-client", "fuel-compressor"] +keywords = ["blockchain", "cryptocurrencies", "fuel-core", "fuel-client", "fuel-compression"] license = { workspace = true } repository = { workspace = true } description = "Compression and decompression of Fuel blocks for DA storage." @@ -18,3 +18,5 @@ postcard = { version = "1.0", features = ["use-std"] } bincode = "1.3" paste = "1.0" + +# fuel-core-compression-derive = { path = "../derive" } \ No newline at end of file diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 4675758d1d7..5892148ca5d 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -3,4 +3,5 @@ mod compression; mod registry; mod types; +pub use compression::Compactable; pub use registry::Key; diff --git a/crates/compression/src/registry/mod.rs b/crates/compression/src/registry/mod.rs index e8597a3cd27..51ab9e03ffa 100644 --- a/crates/compression/src/registry/mod.rs +++ b/crates/compression/src/registry/mod.rs @@ -69,6 +69,22 @@ macro_rules! tables { } )* + impl core::ops::Add for CountPerTable { + type Output = Self; + + fn add(self, rhs: CountPerTable) -> Self::Output { + Self { + $($name: self.$name + rhs.$name),* + } + } + } + + impl core::ops::AddAssign for CountPerTable { + fn add_assign(&mut self, rhs: CountPerTable) { + $(self.$name += rhs.$name);* + } + } + /// One key value per table #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] #[allow(non_snake_case)] // The field names match table type names eactly diff --git a/crates/compression/src/types/tx.rs b/crates/compression/src/types/tx.rs index b7f23a05e80..3109106f98d 100644 --- a/crates/compression/src/types/tx.rs +++ b/crates/compression/src/types/tx.rs @@ -24,8 +24,6 @@ use crate::registry::{ Key, }; -use super::MaybeCompressed; - #[derive(Debug, Clone, Serialize, Deserialize)] #[allow(clippy::large_enum_variant)] pub(crate) enum Transaction { @@ -37,7 +35,7 @@ pub(crate) enum Transaction { #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct Script { script_gas_limit: Word, - script: MaybeCompressed, + script: Key, script_data: Vec, policies: fuel_tx::policies::Policies, inputs: Vec, @@ -156,3 +154,386 @@ pub struct Mint { pub struct OutputContract { input_index: u8, } + +mod compaction { + // This could be done using a derive macro as well. Not sure if that's worth it. + + use fuel_core_types::fuel_tx::field::{ + Inputs, + *, + }; + + use crate::{ + compression::CompactionContext, + registry::{ + db, + CountPerTable, + }, + Compactable, + }; + + use super::*; + + impl Compactable for fuel_tx::Transaction { + type Compact = super::Transaction; + + fn count(&self) -> CountPerTable { + match self { + fuel_tx::Transaction::Script(tx) => tx.count(), + fuel_tx::Transaction::Create(tx) => tx.count(), + fuel_tx::Transaction::Mint(tx) => tx.count(), + } + } + + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, + { + match self { + fuel_tx::Transaction::Script(tx) => { + Self::Compact::Script(tx.compact(ctx)) + } + fuel_tx::Transaction::Create(tx) => { + Self::Compact::Create(tx.compact(ctx)) + } + fuel_tx::Transaction::Mint(tx) => Self::Compact::Mint(tx.compact(ctx)), + } + } + + fn decompact(compact: Self::Compact, reg: &R) -> Self + where + R: db::RegistryRead, + { + match compact { + super::Transaction::Script(tx) => { + fuel_tx::Transaction::Script(fuel_tx::Script::decompact(tx, reg)) + } + super::Transaction::Create(tx) => { + fuel_tx::Transaction::Create(fuel_tx::Create::decompact(tx, reg)) + } + super::Transaction::Mint(tx) => { + fuel_tx::Transaction::Mint(fuel_tx::Mint::decompact(tx, reg)) + } + } + } + } + + impl Compactable for fuel_tx::Script { + type Compact = super::Script; + + fn count(&self) -> CountPerTable { + let mut sum = CountPerTable { + ScriptCode: 1, + Witness: self.witnesses().len(), + ..Default::default() + }; + for input in self.inputs() { + sum += ::count(input); + } + for output in self.outputs() { + sum += ::count(output); + } + sum + } + + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, + { + Self::Compact { + script_gas_limit: self.script_gas_limit, + script: ctx.to_key::(self.script().clone()), + script_data: self.script_data().clone(), + policies: self.policies().clone(), + inputs: self.inputs().map(|i| i.compact(ctx)).collect(), + outputs: self.outputs().map(|o| o.compact(ctx)).collect(), + witnesses: self.witnesses().map(|w| w.compact(ctx)).collect(), + receipts_root: self.receipts_root, + } + } + + fn decompact(compact: Self::Compact, reg: &R) -> Self + where + R: db::RegistryRead, + { + Self { + script_gas_limit: compact.script_gas_limit, + script: reg.read::(compact.script), + script_data: compact.script_data, + policies: compact.policies, + inputs: compact.inputs().map(|i| i.decompact(reg)).collect(), + outputs: compact.outputs().map(|o| o.decompact(reg)).collect(), + witnesses: compact.witnesses().map(|w| w.decompact(reg)).collect(), + receipts_root: compact.receipts_root, + } + } + } + + impl Compactable for fuel_tx::Create { + type Compact = super::Create; + + fn count(&self) -> CountPerTable { + let sum = CountPerTable { + Witness: self.witnesses().len(), + ..Default::default() + }; + for input in self.inputs() { + sum += input.count(); + } + for output in self.outputs() { + sum += output.count(); + } + sum + } + + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, + { + Self::Compact { + bytecode_length: self.bytecode_length, + bytecode_witness_index: self.bytecode_witness_index, + policies: self.policies().clone(), + storage_slots: self.storage_slots().clone(), + inputs: self.inputs().map(|i| i.compact(ctx)).collect(), + outputs: self.outputs().map(|o| o.compact(ctx)).collect(), + witnesses: self.witnesses().map(|w| w.compact(ctx)).collect(), + salt: *self.salt(), + } + } + + fn decompact(compact: Self::Compact, reg: &R) -> Self + where + R: db::RegistryRead, + { + Self { + bytecode_length: compact.bytecode_length, + bytecode_witness_index: compact.bytecode_witness_index, + policies: compact.policies, + storage_slots: compact.storage_slots, + inputs: compact + .inputs() + .map(|i| fuel_tx::Input::decompact(i, reg)) + .collect(), + outputs: compact.outputs().map(|o| o.decompact(reg)).collect(), + witnesses: compact.witnesses().map(|w| w.decompact(reg)).collect(), + salt: compact.salt, + } + } + } + + impl Compactable for fuel_tx::Mint { + type Compact = super::Mint; + + fn count(&self) -> CountPerTable { + let sum = CountPerTable { + AssetId: 1, + Witness: self.witnesses().len(), + ..Default::default() + }; + for input in self.inputs() { + sum += input.count(); + } + for output in self.outputs() { + sum += output.count(); + } + sum + } + + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, + { + Self::Compact { + tx_pointer: self.tx_pointer, + input_contract: self.input_contract.compact(ctx), + output_contract: self.output_contract.compact(ctx), + mint_amount: self.mint_amount, + mint_asset_id: ctx.to_key::(self.mint_asset_id), + } + } + + fn decompact(compact: Self::Compact, reg: &R) -> Self + where + R: db::RegistryRead, + { + Self { + tx_pointer: compact.tx_pointer, + input_contract: compact.input_contract.decompact(reg), + output_contract: compact.output_contract.decompact(reg), + mint_amount: compact.mint_amount, + mint_asset_id: reg.read::(compact.mint_asset_id), + } + } + } + + impl Compactable for fuel_tx::input::Input { + type Compact = super::Input; + + fn count(&self) -> CountPerTable { + match self { + fuel_tx::input::Input::CoinSigned(_) => CountPerTable { + AssetId: 1, + Address: 1, + Witness: 1, + ..Default::default() + }, + fuel_tx::input::Input::CoinPredicate(_) => CountPerTable { + AssetId: 1, + Address: 1, + Witness: 1, + ..Default::default() + }, + fuel_tx::input::Input::Contract(_) => CountPerTable { + AssetId: 1, + ..Default::default() + }, + fuel_tx::input::Input::MessageCoinSigned(_) => CountPerTable { + AssetId: 1, + Address: 2, + Witness: 1, + ..Default::default() + }, + fuel_tx::input::Input::MessageCoinPredicate(_) => CountPerTable { + AssetId: 1, + Address: 2, + Witness: 1, + ..Default::default() + }, + fuel_tx::input::Input::MessageDataSigned(_) => CountPerTable { + AssetId: 1, + Address: 2, + Witness: 1, + ..Default::default() + }, + fuel_tx::input::Input::MessageDataPredicate(_) => CountPerTable { + AssetId: 1, + Address: 2, + Witness: 1, + ..Default::default() + }, + } + } + + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, + { + match self { + fuel_tx::input::Input::CoinSigned(input) => { + Self::Compact::CoinSigned(input.compact(ctx)) + } + fuel_tx::input::Input::CoinPredicate(input) => { + Self::Compact::CoinPredicate(input.compact(ctx)) + } + fuel_tx::input::Input::Contract(input) => { + Self::Compact::Contract(input.compact(ctx)) + } + fuel_tx::input::Input::MessageCoinSigned(input) => { + Self::Compact::MessageCoinSigned(input.compact(ctx)) + } + fuel_tx::input::Input::MessageCoinPredicate(input) => { + Self::Compact::MessageCoinPredicate(input.compact(ctx)) + } + fuel_tx::input::Input::MessageDataSigned(input) => { + Self::Compact::MessageDataSigned(input.compact(ctx)) + } + fuel_tx::input::Input::MessageDataPredicate(input) => { + Self::Compact::MessageDataPredicate(input.compact(ctx)) + } + } + } + + fn decompact(compact: Self::Compact, reg: &R) -> Self + where + R: db::RegistryRead, + { + todo!() + } + } + + impl Compactable for fuel_tx::input::Output { + type Compact = super::Output; + + fn count(&self) -> CountPerTable { + match self { + fuel_tx::input::Input::CoinSigned(_) => CountPerTable { + AssetId: 1, + Address: 1, + Witness: 1, + ..Default::default() + }, + fuel_tx::input::Input::CoinPredicate(_) => CountPerTable { + AssetId: 1, + Address: 1, + Witness: 1, + ..Default::default() + }, + fuel_tx::input::Input::Contract(_) => CountPerTable { + AssetId: 1, + ..Default::default() + }, + fuel_tx::input::Input::MessageCoinSigned(_) => CountPerTable { + AssetId: 1, + Address: 2, + Witness: 1, + ..Default::default() + }, + fuel_tx::input::Input::MessageCoinPredicate(_) => CountPerTable { + AssetId: 1, + Address: 2, + Witness: 1, + ..Default::default() + }, + fuel_tx::input::Input::MessageDataSigned(_) => CountPerTable { + AssetId: 1, + Address: 2, + Witness: 1, + ..Default::default() + }, + fuel_tx::input::Input::MessageDataPredicate(_) => CountPerTable { + AssetId: 1, + Address: 2, + Witness: 1, + ..Default::default() + }, + } + } + + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, + { + match self { + fuel_tx::input::Input::CoinSigned(input) => { + Self::Compact::CoinSigned(input.compact(ctx)) + } + fuel_tx::input::Input::CoinPredicate(input) => { + Self::Compact::CoinPredicate(input.compact(ctx)) + } + fuel_tx::input::Input::Contract(input) => { + Self::Compact::Contract(input.compact(ctx)) + } + fuel_tx::input::Input::MessageCoinSigned(input) => { + Self::Compact::MessageCoinSigned(input.compact(ctx)) + } + fuel_tx::input::Input::MessageCoinPredicate(input) => { + Self::Compact::MessageCoinPredicate(input.compact(ctx)) + } + fuel_tx::input::Input::MessageDataSigned(input) => { + Self::Compact::MessageDataSigned(input.compact(ctx)) + } + fuel_tx::input::Input::MessageDataPredicate(input) => { + Self::Compact::MessageDataPredicate(input.compact(ctx)) + } + } + } + + fn decompact(compact: Self::Compact, reg: &R) -> Self + where + R: db::RegistryRead, + { + todo!() + } + } +} From 018c088796774124d59069847a75770c29d8e25d Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 29 Jan 2024 16:33:11 +0200 Subject: [PATCH 008/112] WIP --- crates/compression-derive/Cargo.toml | 1 + crates/compression-derive/src/attribute.rs | 79 +++++ crates/compression-derive/src/deserialize.rs | 322 +++++++++--------- crates/compression-derive/src/lib.rs | 6 +- crates/compression-derive/src/serialize.rs | 330 +++++++++++-------- crates/compression/Cargo.toml | 2 +- crates/compression/src/compression.rs | 108 ++++++ crates/compression/src/lib.rs | 17 +- crates/compression/src/registry/mod.rs | 2 +- crates/compression/src/types/tx.rs | 17 +- 10 files changed, 578 insertions(+), 306 deletions(-) create mode 100644 crates/compression-derive/src/attribute.rs diff --git a/crates/compression-derive/Cargo.toml b/crates/compression-derive/Cargo.toml index 96f9f03ec47..ae3b0a19ae4 100644 --- a/crates/compression-derive/Cargo.toml +++ b/crates/compression-derive/Cargo.toml @@ -18,3 +18,4 @@ quote = "1" syn = { version = "2", features = ["full"] } proc-macro2 = "1" synstructure = "0.13" +regex = "1" diff --git a/crates/compression-derive/src/attribute.rs b/crates/compression-derive/src/attribute.rs new file mode 100644 index 00000000000..64aefb4d0db --- /dev/null +++ b/crates/compression-derive/src/attribute.rs @@ -0,0 +1,79 @@ +use regex::Regex; + +const ATTR: &str = "da_compress"; + +/// struct/enum attributes +pub enum StructureAttrs { + /// Compacted recursively. + Normal, + /// Transparent. + Transparent, +} +impl StructureAttrs { + pub fn parse(attrs: &[syn::Attribute]) -> Self { + let mut result = Self::Normal; + for attr in attrs { + if attr.style != syn::AttrStyle::Outer { + continue; + } + + if let syn::Meta::List(ml) = &attr.meta { + if ml.path.segments.len() == 1 && ml.path.segments[0].ident == ATTR { + if !matches!(result, Self::Normal) { + panic!("Duplicate attribute: {}", ml.tokens); + } + + let attr_contents = ml.tokens.to_string(); + if attr_contents == "transparent" { + result = Self::Transparent; + } else { + panic!("Invalid attribute: {}", ml.tokens); + } + } + } + } + + result + } +} + +/// Field attributes +pub enum FieldAttrs { + /// Skipped when compacting, and must be reconstructed when decompacting. + Skip, + /// Compacted recursively. + Normal, + /// This value is compacted into a registry lookup. + Registry(String), +} +impl FieldAttrs { + pub fn parse(attrs: &[syn::Attribute]) -> Self { + let re_registry = Regex::new(r#"^registry\s*=\s*"([a-zA-Z_]+)"$"#).unwrap(); + + let mut result = Self::Normal; + for attr in attrs { + if attr.style != syn::AttrStyle::Outer { + continue; + } + + if let syn::Meta::List(ml) = &attr.meta { + if ml.path.segments.len() == 1 && ml.path.segments[0].ident == ATTR { + if !matches!(result, Self::Normal) { + panic!("Duplicate attribute: {}", ml.tokens); + } + + let attr_contents = ml.tokens.to_string(); + if attr_contents == "skip" { + result = Self::Skip; + } else if let Some(m) = re_registry.captures(&attr_contents) { + result = Self::Registry(m.get(1).unwrap().as_str().to_owned()); + } else { + panic!("Invalid attribute: {}", ml.tokens); + } + } + } + } + + result + } +} diff --git a/crates/compression-derive/src/deserialize.rs b/crates/compression-derive/src/deserialize.rs index 0edbfb17e57..24550b11fd5 100644 --- a/crates/compression-derive/src/deserialize.rs +++ b/crates/compression-derive/src/deserialize.rs @@ -4,164 +4,158 @@ use quote::{ quote, }; -use crate::attribute::{ - should_skip_field, - should_skip_field_binding, - StructAttrs, -}; - -fn deserialize_struct(s: &mut synstructure::Structure) -> TokenStream2 { - assert_eq!(s.variants().len(), 1, "structs must have one variant"); - - let variant: &synstructure::VariantInfo = &s.variants()[0]; - let decode_main = variant.construct(|field, _| { - let ty = &field.ty; - if should_skip_field(&field.attrs) { - quote! { - ::core::default::Default::default() - } - } else { - quote! {{ - <#ty as ::fuel_types::canonical::Deserialize>::decode_static(buffer)? - }} - } - }); - - let decode_dynamic = variant.each(|binding| { - if should_skip_field_binding(binding) { - quote! { - *#binding = ::core::default::Default::default(); - } - } else { - quote! {{ - ::fuel_types::canonical::Deserialize::decode_dynamic(#binding, buffer)?; - }} - } - }); - - let remove_prefix = if let Some(expected_prefix) = StructAttrs::parse(s).prefix { - quote! {{ - let prefix = <_ as ::fuel_types::canonical::Deserialize>::decode_static(buffer); - if prefix != Ok(#expected_prefix) { - return ::core::result::Result::Err(::fuel_types::canonical::Error::InvalidPrefix) - } - }} - } else { - quote! {} - }; - - s.gen_impl(quote! { - gen impl ::fuel_types::canonical::Deserialize for @Self { - fn decode_static(buffer: &mut I) -> ::core::result::Result { - #remove_prefix - ::core::result::Result::Ok(#decode_main) - } - - fn decode_dynamic(&mut self, buffer: &mut I) -> ::core::result::Result<(), ::fuel_types::canonical::Error> { - match self { - #decode_dynamic, - }; - ::core::result::Result::Ok(()) - } - } - }) -} - -fn deserialize_enum(s: &synstructure::Structure) -> TokenStream2 { - let _name = &s.ast().ident; - - assert!(!s.variants().is_empty(), "got invalid empty enum"); - - let mut next_discriminant = quote! { { 0u64 } }; - let enum_ident = &s.ast().ident; - let calculated_discriminants = - s.variants().iter().enumerate().map(|(index, variant)| { - if variant.ast().discriminant.is_some() { - let variant_ident = variant.ast().ident; - next_discriminant = quote! { { #enum_ident::#variant_ident as u64 } }; - } - - let const_ident = format_ident!("V{}", index); - let result = quote! { const #const_ident: ::core::primitive::u64 = #next_discriminant; }; - - next_discriminant = quote! { ( (#next_discriminant) + 1u64 ) }; - - result - }); - - let decode_static: TokenStream2 = s - .variants() - .iter() - .enumerate() - .map(|(index, variant)| { - let decode_main = variant.construct(|field, _| { - if should_skip_field(&field.attrs) { - quote! { - ::core::default::Default::default() - } - } else { - let ty = &field.ty; - quote! {{ - <#ty as ::fuel_types::canonical::Deserialize>::decode_static(buffer)? - }} - } - }); - let const_ident = format_ident!("V{}", index); - - quote! { - #const_ident => { - ::core::result::Result::Ok(#decode_main) - } - } - }) - .collect(); - - let decode_dynamic = s.variants().iter().map(|variant| { - let decode_dynamic = variant.each(|binding| { - if !should_skip_field_binding(binding) { - quote! {{ - ::fuel_types::canonical::Deserialize::decode_dynamic(#binding, buffer)?; - }} - } else { - quote! {} - } - }); - - quote! { - #decode_dynamic - } - }); - - let discriminant = { - quote! { - <::core::primitive::u64 as ::fuel_types::canonical::Deserialize>::decode(buffer)? - } - }; - - s.gen_impl(quote! { - gen impl ::fuel_types::canonical::Deserialize for @Self { - fn decode_static(buffer: &mut I) -> ::core::result::Result { - #( #calculated_discriminants )* - - match #discriminant { - #decode_static - _ => ::core::result::Result::Err(::fuel_types::canonical::Error::UnknownDiscriminant), - } - } - - fn decode_dynamic(&mut self, buffer: &mut I) -> ::core::result::Result<(), ::fuel_types::canonical::Error> { - match self { - #( - #decode_dynamic - )* - _ => return ::core::result::Result::Err(::fuel_types::canonical::Error::UnknownDiscriminant), - }; - - ::core::result::Result::Ok(()) - } - } - }) -} +// fn deserialize_struct(s: &mut synstructure::Structure) -> TokenStream2 { +// assert_eq!(s.variants().len(), 1, "structs must have one variant"); + +// let variant: &synstructure::VariantInfo = &s.variants()[0]; +// let decode_main = variant.construct(|field, _| { +// let ty = &field.ty; +// if should_skip_field(&field.attrs) { +// quote! { +// ::core::default::Default::default() +// } +// } else { +// quote! {{ +// <#ty as ::fuel_types::canonical::Deserialize>::decode_static(buffer)? +// }} +// } +// }); + +// let decode_dynamic = variant.each(|binding| { +// if should_skip_field_binding(binding) { +// quote! { +// *#binding = ::core::default::Default::default(); +// } +// } else { +// quote! {{ +// ::fuel_types::canonical::Deserialize::decode_dynamic(#binding, buffer)?; +// }} +// } +// }); + +// let remove_prefix = if let Some(expected_prefix) = StructAttrs::parse(s).prefix { +// quote! {{ +// let prefix = <_ as ::fuel_types::canonical::Deserialize>::decode_static(buffer); +// if prefix != Ok(#expected_prefix) { +// return ::core::result::Result::Err(::fuel_types::canonical::Error::InvalidPrefix) +// } +// }} +// } else { +// quote! {} +// }; + +// s.gen_impl(quote! { +// gen impl ::fuel_types::canonical::Deserialize for @Self { +// fn decode_static(buffer: &mut I) -> ::core::result::Result { +// #remove_prefix +// ::core::result::Result::Ok(#decode_main) +// } + +// fn decode_dynamic(&mut self, buffer: &mut I) -> ::core::result::Result<(), ::fuel_types::canonical::Error> { +// match self { +// #decode_dynamic, +// }; +// ::core::result::Result::Ok(()) +// } +// } +// }) +// } + +// fn deserialize_enum(s: &synstructure::Structure) -> TokenStream2 { +// let _name = &s.ast().ident; + +// assert!(!s.variants().is_empty(), "got invalid empty enum"); + +// let mut next_discriminant = quote! { { 0u64 } }; +// let enum_ident = &s.ast().ident; +// let calculated_discriminants = +// s.variants().iter().enumerate().map(|(index, variant)| { +// if variant.ast().discriminant.is_some() { +// let variant_ident = variant.ast().ident; +// next_discriminant = quote! { { #enum_ident::#variant_ident as u64 } }; +// } + +// let const_ident = format_ident!("V{}", index); +// let result = quote! { const #const_ident: ::core::primitive::u64 = #next_discriminant; }; + +// next_discriminant = quote! { ( (#next_discriminant) + 1u64 ) }; + +// result +// }); + +// let decode_static: TokenStream2 = s +// .variants() +// .iter() +// .enumerate() +// .map(|(index, variant)| { +// let decode_main = variant.construct(|field, _| { +// if should_skip_field(&field.attrs) { +// quote! { +// ::core::default::Default::default() +// } +// } else { +// let ty = &field.ty; +// quote! {{ +// <#ty as ::fuel_types::canonical::Deserialize>::decode_static(buffer)? +// }} +// } +// }); +// let const_ident = format_ident!("V{}", index); + +// quote! { +// #const_ident => { +// ::core::result::Result::Ok(#decode_main) +// } +// } +// }) +// .collect(); + +// let decode_dynamic = s.variants().iter().map(|variant| { +// let decode_dynamic = variant.each(|binding| { +// if !should_skip_field_binding(binding) { +// quote! {{ +// ::fuel_types::canonical::Deserialize::decode_dynamic(#binding, buffer)?; +// }} +// } else { +// quote! {} +// } +// }); + +// quote! { +// #decode_dynamic +// } +// }); + +// let discriminant = { +// quote! { +// <::core::primitive::u64 as ::fuel_types::canonical::Deserialize>::decode(buffer)? +// } +// }; + +// s.gen_impl(quote! { +// gen impl ::fuel_types::canonical::Deserialize for @Self { +// fn decode_static(buffer: &mut I) -> ::core::result::Result { +// #( #calculated_discriminants )* + +// match #discriminant { +// #decode_static +// _ => ::core::result::Result::Err(::fuel_types::canonical::Error::UnknownDiscriminant), +// } +// } + +// fn decode_dynamic(&mut self, buffer: &mut I) -> ::core::result::Result<(), ::fuel_types::canonical::Error> { +// match self { +// #( +// #decode_dynamic +// )* +// _ => return ::core::result::Result::Err(::fuel_types::canonical::Error::UnknownDiscriminant), +// }; + +// ::core::result::Result::Ok(()) +// } +// } +// }) +// } /// Derives `Deserialize` trait for the given `struct` or `enum`. pub fn deserialize_derive(mut s: synstructure::Structure) -> TokenStream2 { @@ -169,9 +163,11 @@ pub fn deserialize_derive(mut s: synstructure::Structure) -> TokenStream2 { .add_bounds(synstructure::AddBounds::Fields) .underscore_const(true); - match s.ast().data { - syn::Data::Struct(_) => deserialize_struct(&mut s), - syn::Data::Enum(_) => deserialize_enum(&s), - _ => panic!("Can't derive `Deserialize` for `union`s"), - } + // match s.ast().data { + // syn::Data::Struct(_) => deserialize_struct(&mut s), + // syn::Data::Enum(_) => deserialize_enum(&s), + // _ => panic!("Can't derive `Deserialize` for `union`s"), + // } + + quote! {} } diff --git a/crates/compression-derive/src/lib.rs b/crates/compression-derive/src/lib.rs index 6c9f3d9173a..2eae5539658 100644 --- a/crates/compression-derive/src/lib.rs +++ b/crates/compression-derive/src/lib.rs @@ -3,6 +3,7 @@ #![deny(unused_must_use, missing_docs)] extern crate proc_macro; +mod attribute; mod deserialize; mod serialize; @@ -10,13 +11,14 @@ use self::{ deserialize::deserialize_derive, serialize::serialize_derive, }; + synstructure::decl_derive!( - [Deserialize, attributes(canonical)] => + [Deserialize, attributes(da_compress)] => /// Derives `Deserialize` trait for the given `struct` or `enum`. deserialize_derive ); synstructure::decl_derive!( - [Serialize, attributes(canonical)] => + [Serialize, attributes(da_compress)] => /// Derives `Serialize` trait for the given `struct` or `enum`. serialize_derive ); diff --git a/crates/compression-derive/src/serialize.rs b/crates/compression-derive/src/serialize.rs index 55e0d2c1059..7e82a39d7bc 100644 --- a/crates/compression-derive/src/serialize.rs +++ b/crates/compression-derive/src/serialize.rs @@ -1,177 +1,242 @@ use proc_macro2::TokenStream as TokenStream2; -use quote::quote; +use quote::{ + format_ident, + quote, +}; use crate::attribute::{ - should_skip_field_binding, - StructAttrs, + FieldAttrs, + StructureAttrs, }; +pub struct PerField { + defs: TokenStream2, + count: TokenStream2, +} +impl PerField { + fn form(fields: &syn::Fields) -> Self { + let mut defs = TokenStream2::new(); + let mut count = TokenStream2::new(); + + for field in fields { + let attrs = FieldAttrs::parse(&field.attrs); + defs.extend(match &attrs { + FieldAttrs::Skip => quote! {}, + FieldAttrs::Normal => { + let ty = &field.ty; + let cty = quote! { + <#ty as ::fuel_core_compression::Compactable>::Compact + }; + if let Some(fname) = field.ident.as_ref() { + quote! { #fname: #cty, } + } else { + quote! { #cty, } + } + } + FieldAttrs::Registry(registry) => { + let reg_ident = format_ident!("{}", registry); + let cty = quote! { + ::fuel_core_compression::Key<::fuel_core_compression::tables::#reg_ident> + }; + if let Some(fname) = field.ident.as_ref() { + quote! { #fname: #cty, } + } else { + quote! { #cty, } + } + } + }); + count.extend(match &attrs { + FieldAttrs::Skip => quote! { CountPerTable::default() + }, + FieldAttrs::Normal => { + let ty = &field.ty; + quote! { + <#ty as ::fuel_core_compression::Compactable>::Compact::count() + + } + } + FieldAttrs::Registry(registry) => { + quote! { + CountPerTable { #registry: 1, ..CountPerTable::default() } + + } + } + }); + } + + let defs = match fields { + syn::Fields::Named(_) => quote! {{ #defs }}, + syn::Fields::Unnamed(_) => quote! {(#defs)}, + syn::Fields::Unit => quote! {}, + }; + count.extend(quote! { 0 }); + + Self { defs, count } + } +} + fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { assert_eq!(s.variants().len(), 1, "structs must have one variant"); let variant: &synstructure::VariantInfo = &s.variants()[0]; - let name = s.ast().ident; + let name = &s.ast().ident; let compact_name = format_ident!("Compact{}", name); - let compact_fields = variant.each(|binding| { - - }); + let PerField { + defs, + count: count_per_field, + } = PerField::form(&variant.ast().fields); - s.gen_impl(quote! { - pub struct #compact_name { - #compact_fields - } + let g = s.ast().generics.clone(); + let w = g.where_clause.clone(); + let compact = quote! { + #[derive(Debug, Clone)] + #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + pub struct #compact_name #g #w #defs ; + }; + + let impls = s.gen_impl(quote! { + use ::fuel_core_compression::{db, CountPerTable, CompactionContext}; gen impl ::fuel_core_compression::Compactable for @Self { - type Compact = #compact_name; + + type Compact = #compact_name #g; fn count(&self) -> CountPerTable { - #count_per_field; + // #count_per_field; + todo!() } fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact where R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex { - #compact_per_field; + // #compact_per_field; + todo!() } fn decompact(compact: Self::Compact, reg: &R) -> Self where R: db::RegistryRead { - #decompact_per_field; + // #decompact_per_field; + todo!() } } - }) + }); + + quote! { + #compact + #impls + } } fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { assert!(!s.variants().is_empty(), "got invalid empty enum"); - let mut next_discriminant = quote! { { 0u64 } }; - let encode_static = s.variants().iter().map(|v| { - let pat = v.pat(); - let encode_static_iter = v.bindings().iter().map(|binding| { - if should_skip_field_binding(binding) { - quote! {} - } else { - quote! { - ::fuel_types::canonical::Serialize::encode_static(#binding, buffer)?; - } - } + + let name = &s.ast().ident; + let compact_name = format_ident!("Compact{}", name); + + let enumdef = quote! { + #[derive(Debug, Clone)] + #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + pub enum #compact_name + }; + + let mut variantdefs = TokenStream2::new(); + let mut counts = Vec::new(); + + for variant in s.variants() { + let vname = variant.ast().ident.clone(); + + let PerField { defs, count } = PerField::form(&variant.ast().fields); + + variantdefs.extend(quote! { + #vname #defs, }); + counts.push(count); + } - if v.ast().discriminant.is_some() { - let variant_ident = v.ast().ident; - next_discriminant = quote! { { Self::#variant_ident as u64 } }; - } + let impls = s.gen_impl(quote! { + use ::fuel_core_compression::{db, CountPerTable, CompactionContext}; - let encode_discriminant = quote! { - <::core::primitive::u64 as ::fuel_types::canonical::Serialize>::encode(&#next_discriminant, buffer)?; - }; - next_discriminant = quote! { ( (#next_discriminant) + 1u64 ) }; - - quote! { - #pat => { - #encode_discriminant - #( - { #encode_static_iter } - )* + gen impl ::fuel_core_compression::Compactable for @Self { + + type Compact = #compact_name; + + fn count(&self) -> CountPerTable { + // #count_per_field; + todo!() } - } - }); - let encode_dynamic = s.variants().iter().map(|v| { - let encode_dynamic_iter = v.each(|binding| { - if should_skip_field_binding(binding) { - quote! {} - } else { - quote! { - ::fuel_types::canonical::Serialize::encode_dynamic(#binding, buffer)?; - } + + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex { + // #compact_per_field; + todo!() + } + + fn decompact(compact: Self::Compact, reg: &R) -> Self + where + R: db::RegistryRead { + // #decompact_per_field; + todo!() } - }); - quote! { - #encode_dynamic_iter } }); + quote! { + #enumdef { #variantdefs } + #impls + } +} - let match_size_static: TokenStream2 = s - .variants() - .iter() - .map(|variant| { - variant.each(|binding| { - if should_skip_field_binding(binding) { - quote! {} - } else { - quote! { - size = ::fuel_types::canonical::add_sizes(size, #binding.size_static()); - } - } - }) - }) - .collect(); - let match_size_static = quote! {{ - // `repr(128)` is unstable, so because of that we can use 8 bytes. - let mut size = 8; - match self { #match_size_static } size } +fn serialize_transparent(s: synstructure::Structure) -> TokenStream2 { + assert_eq!( + s.variants().len(), + 1, + "transparent structures must have one variant" + ); + let variant: &synstructure::VariantInfo = &s.variants()[0]; + assert_eq!( + variant.ast().fields.len(), + 1, + "transparent structures must have exactly one field" + ); + let field_t = variant.ast().fields.iter().next().unwrap().ty.clone(); + let field_d = + quote! { <#field_t as Compactable>::decompact(c, reg) }; + let field_name: TokenStream2 = match variant.ast().fields { + syn::Fields::Named(n) => { + let n = n.named[0].ident.clone().unwrap(); + quote! { #n } + } + syn::Fields::Unnamed(_) => quote! { 0 }, + syn::Fields::Unit => unreachable!(), + }; + let field_c = match variant.ast().fields { + syn::Fields::Named(_) => quote! { Self {#field_name: #field_d} }, + syn::Fields::Unnamed(_) => quote! { Self(#field_d) }, + syn::Fields::Unit => unreachable!(), }; - let match_size_dynamic: TokenStream2 = s - .variants() - .iter() - .map(|variant| { - variant.each(|binding| { - if should_skip_field_binding(binding) { - quote! {} - } else { - quote! { - size = ::fuel_types::canonical::add_sizes(size, #binding.size_dynamic()); - } - } - }) - }) - .collect(); - let match_size_dynamic = - quote! {{ let mut size = 0; match self { #match_size_dynamic } size }}; - - let impl_code = s.gen_impl(quote! { - gen impl ::fuel_types::canonical::Serialize for @Self { - #[inline(always)] - fn size_static(&self) -> usize { - #match_size_static - } - - #[inline(always)] - fn size_dynamic(&self) -> usize { - #match_size_dynamic - } + s.gen_impl(quote! { + use ::fuel_core_compression::{db, Compactable, CountPerTable, CompactionContext}; - #[inline(always)] - fn encode_static(&self, buffer: &mut O) -> ::core::result::Result<(), ::fuel_types::canonical::Error> { - match self { - #( - #encode_static - )*, - _ => return ::core::result::Result::Err(::fuel_types::canonical::Error::UnknownDiscriminant), - }; + gen impl Compactable for @Self { + type Compact = #field_t; - ::core::result::Result::Ok(()) + fn count(&self) -> CountPerTable { + self.#field_name.count() } - fn encode_dynamic(&self, buffer: &mut O) -> ::core::result::Result<(), ::fuel_types::canonical::Error> { - match self { - #( - #encode_dynamic - )*, - _ => return ::core::result::Result::Err(::fuel_types::canonical::Error::UnknownDiscriminant), - }; + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex { + self.#field_name.compact(ctx) + } - ::core::result::Result::Ok(()) + fn decompact(c: Self::Compact, reg: &R) -> Self + where + R: db::RegistryRead { + #field_c } } - }); - - quote! { - #impl_code - } + }) } /// Derives `Serialize` trait for the given `struct` or `enum`. @@ -179,9 +244,14 @@ pub fn serialize_derive(mut s: synstructure::Structure) -> TokenStream2 { s.add_bounds(synstructure::AddBounds::Fields) .underscore_const(true); - match s.ast().data { - syn::Data::Struct(_) => serialize_struct(&s), - syn::Data::Enum(_) => serialize_enum(&s), - _ => panic!("Can't derive `Serialize` for `union`s"), - } + let ts = match StructureAttrs::parse(&s.ast().attrs) { + StructureAttrs::Normal => match s.ast().data { + syn::Data::Struct(_) => serialize_struct(&s), + syn::Data::Enum(_) => serialize_enum(&s), + _ => panic!("Can't derive `Serialize` for `union`s"), + }, + StructureAttrs::Transparent => serialize_transparent(s), + }; + println!("{}", ts); + ts } diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 054ada7701d..833fa91b8db 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -19,4 +19,4 @@ bincode = "1.3" paste = "1.0" -# fuel-core-compression-derive = { path = "../derive" } \ No newline at end of file +fuel-core-compression-derive = { path = "../compression-derive" } \ No newline at end of file diff --git a/crates/compression/src/compression.rs b/crates/compression/src/compression.rs index c11aac62a97..d1bfeead8cd 100644 --- a/crates/compression/src/compression.rs +++ b/crates/compression/src/compression.rs @@ -1,3 +1,5 @@ +use std::marker::PhantomData; + use crate::{ registry::{ access::{ @@ -104,6 +106,112 @@ pub trait Compactable { R: db::RegistryRead; } +macro_rules! identity_compaction { + ($t:ty) => { + impl Compactable for $t { + type Compact = Self; + + fn count(&self) -> CountPerTable { + CountPerTable::default() + } + + fn compact(&self, _ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, + { + *self + } + + fn decompact(compact: Self::Compact, _reg: &R) -> Self + where + R: db::RegistryRead, + { + compact + } + } + }; +} + +identity_compaction!(u8); +identity_compaction!(u16); +identity_compaction!(u32); +identity_compaction!(u64); +identity_compaction!(u128); + +impl Compactable for [T; S] { + type Compact = Self; + + fn count(&self) -> CountPerTable { + let mut count = CountPerTable::default(); + for item in self.iter() { + count += item.count(); + } + count + } + + fn compact(&self, _ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, + { + self.clone() + } + + fn decompact(compact: Self::Compact, _reg: &R) -> Self + where + R: db::RegistryRead, + { + compact + } +} + +impl Compactable for Vec { + type Compact = Self; + + fn count(&self) -> CountPerTable { + let mut count = CountPerTable::default(); + for item in self.iter() { + count += item.count(); + } + count + } + + fn compact(&self, _ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, + { + self.clone() + } + + fn decompact(compact: Self::Compact, _reg: &R) -> Self + where + R: db::RegistryRead, + { + compact + } +} + +impl Compactable for PhantomData { + type Compact = (); + + fn count(&self) -> CountPerTable { + CountPerTable::default() + } + + fn compact(&self, _ctx: &mut CompactionContext) -> Self::Compact + where + R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, + { + () + } + + fn decompact(_compact: Self::Compact, _reg: &R) -> Self + where + R: db::RegistryRead, + { + Self + } +} + #[cfg(test)] mod tests { use fuel_core_types::fuel_types::Address; diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 5892148ca5d..190c46869d0 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -3,5 +3,18 @@ mod compression; mod registry; mod types; -pub use compression::Compactable; -pub use registry::Key; +pub use compression::{ + Compactable, + CompactionContext, +}; +pub use registry::{ + db, + tables, + CountPerTable, + Key, +}; + +pub use fuel_core_compression_derive::{ + Deserialize, + Serialize, +}; diff --git a/crates/compression/src/registry/mod.rs b/crates/compression/src/registry/mod.rs index 51ab9e03ffa..398fc1e2b64 100644 --- a/crates/compression/src/registry/mod.rs +++ b/crates/compression/src/registry/mod.rs @@ -4,7 +4,7 @@ use serde::{ }; pub(crate) mod block_section; -pub(crate) mod db; +pub mod db; pub(crate) mod in_memory; mod key; diff --git a/crates/compression/src/types/tx.rs b/crates/compression/src/types/tx.rs index 3109106f98d..d0a0bbe2639 100644 --- a/crates/compression/src/types/tx.rs +++ b/crates/compression/src/types/tx.rs @@ -155,11 +155,14 @@ pub struct OutputContract { input_index: u8, } +#[cfg(feature = "manual")] mod compaction { // This could be done using a derive macro as well. Not sure if that's worth it. use fuel_core_types::fuel_tx::field::{ Inputs, + Outputs, + Witnesses, *, }; @@ -241,14 +244,14 @@ mod compaction { R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, { Self::Compact { - script_gas_limit: self.script_gas_limit, + script_gas_limit: *self.script_gas_limit(), script: ctx.to_key::(self.script().clone()), script_data: self.script_data().clone(), policies: self.policies().clone(), - inputs: self.inputs().map(|i| i.compact(ctx)).collect(), - outputs: self.outputs().map(|o| o.compact(ctx)).collect(), - witnesses: self.witnesses().map(|w| w.compact(ctx)).collect(), - receipts_root: self.receipts_root, + inputs: self.inputs().iter().map(|i| i.compact(ctx)).collect(), + outputs: self.outputs().iter().map(|o| o.compact(ctx)).collect(), + witnesses: self.witnesses().iter().map(|w| ctx.to_key(*w)).collect(), + receipts_root: *self.receipts_root(), } } @@ -345,7 +348,7 @@ mod compaction { R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, { Self::Compact { - tx_pointer: self.tx_pointer, + tx_pointer: self.tx_pointer(), input_contract: self.input_contract.compact(ctx), output_contract: self.output_contract.compact(ctx), mint_amount: self.mint_amount, @@ -452,7 +455,7 @@ mod compaction { } } - impl Compactable for fuel_tx::input::Output { + impl Compactable for fuel_tx::output::Output { type Compact = super::Output; fn count(&self) -> CountPerTable { From 75ab4a02e29e9cc096f393407f14c371b856b9d7 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 29 Jan 2024 16:33:23 +0200 Subject: [PATCH 009/112] WIP --- Cargo.lock | 13 +++++++++++++ Cargo.toml | 1 + 2 files changed, 14 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 78270c980f3..8bbf85b5120 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2781,11 +2781,24 @@ name = "fuel-core-compression" version = "0.22.0" dependencies = [ "bincode", + "fuel-core-compression-derive", "fuel-core-types", + "paste", "postcard", "serde", ] +[[package]] +name = "fuel-core-compression-derive" +version = "0.22.0" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.48", + "synstructure 0.13.0", +] + [[package]] name = "fuel-core-consensus-module" version = "0.22.0" diff --git a/Cargo.toml b/Cargo.toml index 81a70648e62..2278896d026 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "crates/chain-config", "crates/client", "crates/compression", + "crates/compression-derive", "crates/database", "crates/fuel-core", "crates/keygen", From 7fb55be95278bff9b9b389e456ecc6f13b4913f7 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 29 Jan 2024 18:30:28 +0200 Subject: [PATCH 010/112] Derive Compact: initial work --- .../src/{serialize.rs => compact.rs} | 86 +++++---- crates/compression-derive/src/deserialize.rs | 173 ------------------ crates/compression-derive/src/lib.rs | 19 +- crates/compression/src/lib.rs | 5 +- 4 files changed, 53 insertions(+), 230 deletions(-) rename crates/compression-derive/src/{serialize.rs => compact.rs} (75%) delete mode 100644 crates/compression-derive/src/deserialize.rs diff --git a/crates/compression-derive/src/serialize.rs b/crates/compression-derive/src/compact.rs similarity index 75% rename from crates/compression-derive/src/serialize.rs rename to crates/compression-derive/src/compact.rs index 7e82a39d7bc..7490b051601 100644 --- a/crates/compression-derive/src/serialize.rs +++ b/crates/compression-derive/src/compact.rs @@ -18,44 +18,47 @@ impl PerField { let mut defs = TokenStream2::new(); let mut count = TokenStream2::new(); - for field in fields { + for (i, field) in fields.iter().enumerate() { + let fi = syn::Index::from(i); let attrs = FieldAttrs::parse(&field.attrs); defs.extend(match &attrs { - FieldAttrs::Skip => quote! {}, - FieldAttrs::Normal => { - let ty = &field.ty; - let cty = quote! { - <#ty as ::fuel_core_compression::Compactable>::Compact - }; - if let Some(fname) = field.ident.as_ref() { - quote! { #fname: #cty, } - } else { - quote! { #cty, } + FieldAttrs::Skip => quote! {}, + FieldAttrs::Normal => { + let ty = &field.ty; + let cty = quote! { + <#ty as ::fuel_core_compression::Compactable>::Compact + }; + if let Some(fname) = field.ident.as_ref() { + quote! { #fname: #cty, } + } else { + quote! { #cty, } + } } - } - FieldAttrs::Registry(registry) => { - let reg_ident = format_ident!("{}", registry); - let cty = quote! { - ::fuel_core_compression::Key<::fuel_core_compression::tables::#reg_ident> - }; - if let Some(fname) = field.ident.as_ref() { - quote! { #fname: #cty, } - } else { - quote! { #cty, } + FieldAttrs::Registry(registry) => { + let reg_ident = format_ident!("{}", registry); + let cty = quote! { + ::fuel_core_compression::Key<::fuel_core_compression::tables::#reg_ident> + }; + if let Some(fname) = field.ident.as_ref() { + quote! { #fname: #cty, } + } else { + quote! { #cty, } + } } - } - }); + }); count.extend(match &attrs { FieldAttrs::Skip => quote! { CountPerTable::default() + }, FieldAttrs::Normal => { - let ty = &field.ty; - quote! { - <#ty as ::fuel_core_compression::Compactable>::Compact::count() + + if let Some(fname) = &field.ident { + quote! { self.#fname.count() + } + } else { + quote! { self.#fi.count() + } } } FieldAttrs::Registry(registry) => { + let reg_ident = format_ident!("{}", registry); quote! { - CountPerTable { #registry: 1, ..CountPerTable::default() } + + CountPerTable { #reg_ident: 1, ..CountPerTable::default() } + } } }); @@ -66,7 +69,7 @@ impl PerField { syn::Fields::Unnamed(_) => quote! {(#defs)}, syn::Fields::Unit => quote! {}, }; - count.extend(quote! { 0 }); + count.extend(quote! { CountPerTable::default() }); Self { defs, count } } @@ -84,12 +87,17 @@ fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { count: count_per_field, } = PerField::form(&variant.ast().fields); + let semi = match variant.ast().fields { + syn::Fields::Named(_) => quote! {}, + syn::Fields::Unnamed(_) => quote! {;}, + syn::Fields::Unit => quote! {;}, + }; + let g = s.ast().generics.clone(); let w = g.where_clause.clone(); let compact = quote! { - #[derive(Debug, Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] - pub struct #compact_name #g #w #defs ; + pub struct #compact_name #g #w #defs #semi }; let impls = s.gen_impl(quote! { @@ -100,8 +108,7 @@ fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { type Compact = #compact_name #g; fn count(&self) -> CountPerTable { - // #count_per_field; - todo!() + #count_per_field } fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact @@ -133,7 +140,6 @@ fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { let compact_name = format_ident!("Compact{}", name); let enumdef = quote! { - #[derive(Debug, Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum #compact_name }; @@ -198,8 +204,7 @@ fn serialize_transparent(s: synstructure::Structure) -> TokenStream2 { "transparent structures must have exactly one field" ); let field_t = variant.ast().fields.iter().next().unwrap().ty.clone(); - let field_d = - quote! { <#field_t as Compactable>::decompact(c, reg) }; + let field_d = quote! { <#field_t as Compactable>::decompact(c, reg) }; let field_name: TokenStream2 = match variant.ast().fields { syn::Fields::Named(n) => { let n = n.named[0].ident.clone().unwrap(); @@ -239,11 +244,13 @@ fn serialize_transparent(s: synstructure::Structure) -> TokenStream2 { }) } -/// Derives `Serialize` trait for the given `struct` or `enum`. -pub fn serialize_derive(mut s: synstructure::Structure) -> TokenStream2 { - s.add_bounds(synstructure::AddBounds::Fields) +/// Derives `Compact` trait for the given `struct` or `enum`. +pub fn compact_derive(mut s: synstructure::Structure) -> TokenStream2 { + s.add_bounds(synstructure::AddBounds::Both) .underscore_const(true); + let name = s.ast().ident.to_string(); + let ts = match StructureAttrs::parse(&s.ast().attrs) { StructureAttrs::Normal => match s.ast().data { syn::Data::Struct(_) => serialize_struct(&s), @@ -252,6 +259,7 @@ pub fn serialize_derive(mut s: synstructure::Structure) -> TokenStream2 { }, StructureAttrs::Transparent => serialize_transparent(s), }; - println!("{}", ts); + // println!("{}", ts); + let _ = std::fs::write(format!("/tmp/derive/{name}.rs"), ts.to_string()); ts } diff --git a/crates/compression-derive/src/deserialize.rs b/crates/compression-derive/src/deserialize.rs deleted file mode 100644 index 24550b11fd5..00000000000 --- a/crates/compression-derive/src/deserialize.rs +++ /dev/null @@ -1,173 +0,0 @@ -use proc_macro2::TokenStream as TokenStream2; -use quote::{ - format_ident, - quote, -}; - -// fn deserialize_struct(s: &mut synstructure::Structure) -> TokenStream2 { -// assert_eq!(s.variants().len(), 1, "structs must have one variant"); - -// let variant: &synstructure::VariantInfo = &s.variants()[0]; -// let decode_main = variant.construct(|field, _| { -// let ty = &field.ty; -// if should_skip_field(&field.attrs) { -// quote! { -// ::core::default::Default::default() -// } -// } else { -// quote! {{ -// <#ty as ::fuel_types::canonical::Deserialize>::decode_static(buffer)? -// }} -// } -// }); - -// let decode_dynamic = variant.each(|binding| { -// if should_skip_field_binding(binding) { -// quote! { -// *#binding = ::core::default::Default::default(); -// } -// } else { -// quote! {{ -// ::fuel_types::canonical::Deserialize::decode_dynamic(#binding, buffer)?; -// }} -// } -// }); - -// let remove_prefix = if let Some(expected_prefix) = StructAttrs::parse(s).prefix { -// quote! {{ -// let prefix = <_ as ::fuel_types::canonical::Deserialize>::decode_static(buffer); -// if prefix != Ok(#expected_prefix) { -// return ::core::result::Result::Err(::fuel_types::canonical::Error::InvalidPrefix) -// } -// }} -// } else { -// quote! {} -// }; - -// s.gen_impl(quote! { -// gen impl ::fuel_types::canonical::Deserialize for @Self { -// fn decode_static(buffer: &mut I) -> ::core::result::Result { -// #remove_prefix -// ::core::result::Result::Ok(#decode_main) -// } - -// fn decode_dynamic(&mut self, buffer: &mut I) -> ::core::result::Result<(), ::fuel_types::canonical::Error> { -// match self { -// #decode_dynamic, -// }; -// ::core::result::Result::Ok(()) -// } -// } -// }) -// } - -// fn deserialize_enum(s: &synstructure::Structure) -> TokenStream2 { -// let _name = &s.ast().ident; - -// assert!(!s.variants().is_empty(), "got invalid empty enum"); - -// let mut next_discriminant = quote! { { 0u64 } }; -// let enum_ident = &s.ast().ident; -// let calculated_discriminants = -// s.variants().iter().enumerate().map(|(index, variant)| { -// if variant.ast().discriminant.is_some() { -// let variant_ident = variant.ast().ident; -// next_discriminant = quote! { { #enum_ident::#variant_ident as u64 } }; -// } - -// let const_ident = format_ident!("V{}", index); -// let result = quote! { const #const_ident: ::core::primitive::u64 = #next_discriminant; }; - -// next_discriminant = quote! { ( (#next_discriminant) + 1u64 ) }; - -// result -// }); - -// let decode_static: TokenStream2 = s -// .variants() -// .iter() -// .enumerate() -// .map(|(index, variant)| { -// let decode_main = variant.construct(|field, _| { -// if should_skip_field(&field.attrs) { -// quote! { -// ::core::default::Default::default() -// } -// } else { -// let ty = &field.ty; -// quote! {{ -// <#ty as ::fuel_types::canonical::Deserialize>::decode_static(buffer)? -// }} -// } -// }); -// let const_ident = format_ident!("V{}", index); - -// quote! { -// #const_ident => { -// ::core::result::Result::Ok(#decode_main) -// } -// } -// }) -// .collect(); - -// let decode_dynamic = s.variants().iter().map(|variant| { -// let decode_dynamic = variant.each(|binding| { -// if !should_skip_field_binding(binding) { -// quote! {{ -// ::fuel_types::canonical::Deserialize::decode_dynamic(#binding, buffer)?; -// }} -// } else { -// quote! {} -// } -// }); - -// quote! { -// #decode_dynamic -// } -// }); - -// let discriminant = { -// quote! { -// <::core::primitive::u64 as ::fuel_types::canonical::Deserialize>::decode(buffer)? -// } -// }; - -// s.gen_impl(quote! { -// gen impl ::fuel_types::canonical::Deserialize for @Self { -// fn decode_static(buffer: &mut I) -> ::core::result::Result { -// #( #calculated_discriminants )* - -// match #discriminant { -// #decode_static -// _ => ::core::result::Result::Err(::fuel_types::canonical::Error::UnknownDiscriminant), -// } -// } - -// fn decode_dynamic(&mut self, buffer: &mut I) -> ::core::result::Result<(), ::fuel_types::canonical::Error> { -// match self { -// #( -// #decode_dynamic -// )* -// _ => return ::core::result::Result::Err(::fuel_types::canonical::Error::UnknownDiscriminant), -// }; - -// ::core::result::Result::Ok(()) -// } -// } -// }) -// } - -/// Derives `Deserialize` trait for the given `struct` or `enum`. -pub fn deserialize_derive(mut s: synstructure::Structure) -> TokenStream2 { - s.bind_with(|_| synstructure::BindStyle::RefMut) - .add_bounds(synstructure::AddBounds::Fields) - .underscore_const(true); - - // match s.ast().data { - // syn::Data::Struct(_) => deserialize_struct(&mut s), - // syn::Data::Enum(_) => deserialize_enum(&s), - // _ => panic!("Can't derive `Deserialize` for `union`s"), - // } - - quote! {} -} diff --git a/crates/compression-derive/src/lib.rs b/crates/compression-derive/src/lib.rs index 2eae5539658..6772ea00478 100644 --- a/crates/compression-derive/src/lib.rs +++ b/crates/compression-derive/src/lib.rs @@ -4,21 +4,12 @@ extern crate proc_macro; mod attribute; -mod deserialize; -mod serialize; +mod compact; -use self::{ - deserialize::deserialize_derive, - serialize::serialize_derive, -}; +use self::compact::compact_derive; synstructure::decl_derive!( - [Deserialize, attributes(da_compress)] => - /// Derives `Deserialize` trait for the given `struct` or `enum`. - deserialize_derive -); -synstructure::decl_derive!( - [Serialize, attributes(da_compress)] => - /// Derives `Serialize` trait for the given `struct` or `enum`. - serialize_derive + [Compact, attributes(da_compress)] => + /// Derives `Compact` trait for the given `struct` or `enum`. + compact_derive ); diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 190c46869d0..0e4471fdd4b 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -14,7 +14,4 @@ pub use registry::{ Key, }; -pub use fuel_core_compression_derive::{ - Deserialize, - Serialize, -}; +pub use fuel_core_compression_derive::Compact; From 3e4f0e0bd8fd3363a0b52481b8401f9c8191258c Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 29 Jan 2024 19:58:22 +0200 Subject: [PATCH 011/112] WIP: compaction --- crates/compression-derive/src/compact.rs | 209 +++++++++++------- crates/compression/src/compression.rs | 6 +- .../compression/src/registry/block_section.rs | 4 +- crates/compression/src/registry/mod.rs | 32 +-- 4 files changed, 153 insertions(+), 98 deletions(-) diff --git a/crates/compression-derive/src/compact.rs b/crates/compression-derive/src/compact.rs index 7490b051601..39df7c7dec1 100644 --- a/crates/compression-derive/src/compact.rs +++ b/crates/compression-derive/src/compact.rs @@ -9,70 +9,129 @@ use crate::attribute::{ StructureAttrs, }; -pub struct PerField { - defs: TokenStream2, - count: TokenStream2, +/// Map field definitions to compacted field definitions. +fn field_defs(fields: &syn::Fields) -> TokenStream2 { + let mut defs = TokenStream2::new(); + + for field in fields { + let attrs = FieldAttrs::parse(&field.attrs); + defs.extend(match &attrs { + FieldAttrs::Skip => quote! {}, + FieldAttrs::Normal => { + let ty = &field.ty; + let cty = quote! { + <#ty as ::fuel_core_compression::Compactable>::Compact + }; + if let Some(fname) = field.ident.as_ref() { + quote! { #fname: #cty, } + } else { + quote! { #cty, } + } + } + FieldAttrs::Registry(registry) => { + let reg_ident = format_ident!("{}", registry); + let cty = quote! { + ::fuel_core_compression::Key<::fuel_core_compression::tables::#reg_ident> + }; + if let Some(fname) = field.ident.as_ref() { + quote! { #fname: #cty, } + } else { + quote! { #cty, } + } + } + }); + } + + match fields { + syn::Fields::Named(_) => quote! {{ #defs }}, + syn::Fields::Unnamed(_) => quote! {(#defs)}, + syn::Fields::Unit => quote! {}, + } } -impl PerField { - fn form(fields: &syn::Fields) -> Self { - let mut defs = TokenStream2::new(); - let mut count = TokenStream2::new(); - - for (i, field) in fields.iter().enumerate() { - let fi = syn::Index::from(i); - let attrs = FieldAttrs::parse(&field.attrs); - defs.extend(match &attrs { + +/// Construct +fn construct( + compact: &syn::Ident, + variant: &synstructure::VariantInfo<'_>, +) -> TokenStream2 { + let bound_fields: TokenStream2 = variant + .bindings() + .iter() + .map(|binding| { + let attrs = FieldAttrs::parse(&binding.ast().attrs); + let ty = &binding.ast().ty; + let cname = format_ident!("{}_c", binding.binding); + + match attrs { FieldAttrs::Skip => quote! {}, FieldAttrs::Normal => { - let ty = &field.ty; - let cty = quote! { - <#ty as ::fuel_core_compression::Compactable>::Compact - }; - if let Some(fname) = field.ident.as_ref() { - quote! { #fname: #cty, } - } else { - quote! { #cty, } + quote! { + let #cname = <#ty as Compactable>::compact(&#binding, ctx); } } FieldAttrs::Registry(registry) => { let reg_ident = format_ident!("{}", registry); let cty = quote! { - ::fuel_core_compression::Key<::fuel_core_compression::tables::#reg_ident> + ::fuel_core_compression::Key< + ::fuel_core_compression::tables::#reg_ident + > }; - if let Some(fname) = field.ident.as_ref() { - quote! { #fname: #cty, } - } else { - quote! { #cty, } + quote! { + let #cname: #cty = ctx.to_key(*#binding); } } - }); - count.extend(match &attrs { - FieldAttrs::Skip => quote! { CountPerTable::default() + }, + } + }) + .collect(); + + let construct_fields: TokenStream2 = variant + .bindings() + .iter() + .map(|binding| { + let attrs = FieldAttrs::parse(&binding.ast().attrs); + if matches!(attrs, FieldAttrs::Skip) { + return quote! {}; + } + let cname = format_ident!("{}_c", binding.binding); + if let Some(fname) = &binding.ast().ident { + quote! { #fname: #cname, } + } else { + quote! { #cname, } + } + }) + .collect(); + + quote! { + #bound_fields + #compact { #construct_fields } + } +} +// Sum of Compactable::count() of all fields. +fn sum_counts(variant: &synstructure::VariantInfo<'_>) -> TokenStream2 { + variant + .bindings() + .iter() + .map(|binding| { + let attrs = FieldAttrs::parse(&binding.ast().attrs); + let ty = &binding.ast().ty; + + match attrs { + FieldAttrs::Skip => quote! { CountPerTable::default() }, FieldAttrs::Normal => { - if let Some(fname) = &field.ident { - quote! { self.#fname.count() + } - } else { - quote! { self.#fi.count() + } - } + quote! { <#ty as Compactable>::count(&#binding) } } FieldAttrs::Registry(registry) => { let reg_ident = format_ident!("{}", registry); quote! { - CountPerTable { #reg_ident: 1, ..CountPerTable::default() } + + CountPerTable { #reg_ident: 1, ..CountPerTable::default() } } } - }); - } - - let defs = match fields { - syn::Fields::Named(_) => quote! {{ #defs }}, - syn::Fields::Unnamed(_) => quote! {(#defs)}, - syn::Fields::Unit => quote! {}, - }; - count.extend(quote! { CountPerTable::default() }); - - Self { defs, count } - } + } + }) + .fold( + quote! { CountPerTable::default() }, + |acc, x| quote! { #acc + #x }, + ) } fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { @@ -82,10 +141,10 @@ fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { let name = &s.ast().ident; let compact_name = format_ident!("Compact{}", name); - let PerField { - defs, - count: count_per_field, - } = PerField::form(&variant.ast().fields); + let defs = field_defs(&variant.ast().fields); + let count_per_variant = s.each_variant(|variant| sum_counts(variant)); + let construct_per_variant = + s.each_variant(|variant| construct(&compact_name, variant)); let semi = match variant.ast().fields { syn::Fields::Named(_) => quote! {}, @@ -101,21 +160,20 @@ fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { }; let impls = s.gen_impl(quote! { - use ::fuel_core_compression::{db, CountPerTable, CompactionContext}; + use ::fuel_core_compression::{db, Compactable, CountPerTable, CompactionContext}; - gen impl ::fuel_core_compression::Compactable for @Self { + gen impl Compactable for @Self { type Compact = #compact_name #g; fn count(&self) -> CountPerTable { - #count_per_field + match self { #count_per_variant } } fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact where R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex { - // #compact_per_field; - todo!() + match self { #construct_per_variant } } fn decompact(compact: Self::Compact, reg: &R) -> Self @@ -134,40 +192,37 @@ fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { } fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { - assert!(!s.variants().is_empty(), "got invalid empty enum"); + assert!(!s.variants().is_empty(), "empty enums are not supported"); let name = &s.ast().ident; let compact_name = format_ident!("Compact{}", name); + let variant_defs: TokenStream2 = s + .variants() + .iter() + .map(|variant| { + let vname = variant.ast().ident.clone(); + let defs = field_defs(&variant.ast().fields); + quote! { + #vname #defs, + } + }) + .collect(); let enumdef = quote! { #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] - pub enum #compact_name + pub enum #compact_name { #variant_defs } }; - let mut variantdefs = TokenStream2::new(); - let mut counts = Vec::new(); - - for variant in s.variants() { - let vname = variant.ast().ident.clone(); - - let PerField { defs, count } = PerField::form(&variant.ast().fields); - - variantdefs.extend(quote! { - #vname #defs, - }); - counts.push(count); - } + let count_per_variant = s.each_variant(|variant| sum_counts(variant)); let impls = s.gen_impl(quote! { - use ::fuel_core_compression::{db, CountPerTable, CompactionContext}; - - gen impl ::fuel_core_compression::Compactable for @Self { + use ::fuel_core_compression::{db, Compactable, CountPerTable, CompactionContext}; + gen impl Compactable for @Self { type Compact = #compact_name; fn count(&self) -> CountPerTable { - // #count_per_field; - todo!() + match self { #count_per_variant } } fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact @@ -186,7 +241,7 @@ fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { } }); quote! { - #enumdef { #variantdefs } + #enumdef #impls } } @@ -259,7 +314,7 @@ pub fn compact_derive(mut s: synstructure::Structure) -> TokenStream2 { }, StructureAttrs::Transparent => serialize_transparent(s), }; - // println!("{}", ts); + println!("{}", ts); let _ = std::fs::write(format!("/tmp/derive/{name}.rs"), ts.to_string()); ts } diff --git a/crates/compression/src/compression.rs b/crates/compression/src/compression.rs index d1bfeead8cd..804d30088a6 100644 --- a/crates/compression/src/compression.rs +++ b/crates/compression/src/compression.rs @@ -245,7 +245,7 @@ mod tests { where R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, { - ctx.to_key::(**self) + ctx.to_key::(*self) } fn decompact(compact: Self::Compact, reg: &R) -> Self @@ -284,8 +284,8 @@ mod tests { where R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, { - let a = ctx.to_key::(*self.a); - let b = ctx.to_key::(*self.b); + let a = ctx.to_key::(self.a); + let b = ctx.to_key::(self.b); ManualExampleCompact { a, b, c: self.c } } diff --git a/crates/compression/src/registry/block_section.rs b/crates/compression/src/registry/block_section.rs index b8b0d3f2281..23c61d13774 100644 --- a/crates/compression/src/registry/block_section.rs +++ b/crates/compression/src/registry/block_section.rs @@ -120,11 +120,11 @@ mod tests { changes: ChangesPerTable { AssetId: WriteTo { start_key: Key::try_from(100).unwrap(), - values: vec![*AssetId::from([0xa0; 32]), *AssetId::from([0xa1; 32])], + values: vec![AssetId::from([0xa0; 32]), AssetId::from([0xa1; 32])], }, Address: WriteTo { start_key: Key::default(), - values: vec![*Address::from([0xc0; 32])], + values: vec![Address::from([0xc0; 32])], }, ScriptCode: WriteTo { start_key: Key::default(), diff --git a/crates/compression/src/registry/mod.rs b/crates/compression/src/registry/mod.rs index 398fc1e2b64..8f4787a6255 100644 --- a/crates/compression/src/registry/mod.rs +++ b/crates/compression/src/registry/mod.rs @@ -163,8 +163,8 @@ macro_rules! tables { } tables!( - AssetId: [u8; 32], - Address: [u8; 32], + AssetId: fuel_core_types::fuel_tx::AssetId, + Address: fuel_core_types::fuel_tx::Address, ScriptCode: Vec, Witness: Vec, ); @@ -189,68 +189,68 @@ mod tests { // Empty assert_eq!( reg.read(Key::::try_from(100).unwrap()), - *AssetId::default() + AssetId::default() ); assert_eq!( - reg.index_lookup(&*AssetId::from([1; 32])), + reg.index_lookup(&AssetId::from([1; 32])), None::> ); // Write reg.batch_write( Key::::from_raw(RawKey::try_from(100u32).unwrap()), - vec![*AssetId::from([1; 32]), *AssetId::from([2; 32])], + vec![AssetId::from([1; 32]), AssetId::from([2; 32])], ); assert_eq!( reg.read(Key::::try_from(100).unwrap()), - *AssetId::from([1; 32]) + AssetId::from([1; 32]) ); assert_eq!( reg.read(Key::::try_from(101).unwrap()), - *AssetId::from([2; 32]) + AssetId::from([2; 32]) ); assert_eq!( reg.read(Key::::try_from(102).unwrap()), - *AssetId::default() + AssetId::default() ); // Overwrite reg.batch_write( Key::::from_raw(RawKey::try_from(99u32).unwrap()), - vec![*AssetId::from([10; 32]), *AssetId::from([11; 32])], + vec![AssetId::from([10; 32]), AssetId::from([11; 32])], ); assert_eq!( reg.read(Key::::try_from(99).unwrap()), - *AssetId::from([10; 32]) + AssetId::from([10; 32]) ); assert_eq!( reg.read(Key::::try_from(100).unwrap()), - *AssetId::from([11; 32]) + AssetId::from([11; 32]) ); // Wrapping reg.batch_write( Key::::from_raw(RawKey::MAX), - vec![*AssetId::from([3; 32]), *AssetId::from([4; 32])], + vec![AssetId::from([3; 32]), AssetId::from([4; 32])], ); assert_eq!( reg.read(Key::::from_raw(RawKey::MAX)), - *AssetId::from([3; 32]) + AssetId::from([3; 32]) ); assert_eq!( reg.read(Key::::from_raw(RawKey::MIN)), - *AssetId::from([4; 32]) + AssetId::from([4; 32]) ); assert_eq!( - reg.index_lookup(&*AssetId::from([3; 32])), + reg.index_lookup(&AssetId::from([3; 32])), Some(Key::::from_raw(RawKey::MAX)) ); assert_eq!( - reg.index_lookup(&*AssetId::from([4; 32])), + reg.index_lookup(&AssetId::from([4; 32])), Some(Key::::from_raw(RawKey::MIN)) ); } From a910bcefd2c2e6b46822f5353294161f484886d1 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 31 Jan 2024 09:03:30 +0200 Subject: [PATCH 012/112] Impl compaction --- Cargo.lock | 40 +- Cargo.toml | 6 +- crates/compression-alt/Cargo.toml | 26 + crates/compression-alt/src/block.rs | 84 +++ .../types => compression-alt/src}/header.rs | 4 +- crates/compression-alt/src/lib.rs | 2 + crates/compression-derive/src/attribute.rs | 16 +- crates/compression-derive/src/compact.rs | 107 ++-- crates/compression/Cargo.toml | 10 +- crates/compression/src/block.rs | 26 - crates/compression/src/compression.rs | 68 +-- crates/compression/src/lib.rs | 7 +- .../compression/src/registry/block_section.rs | 14 +- crates/compression/src/registry/mod.rs | 34 +- crates/compression/src/types/mod.rs | 21 - crates/compression/src/types/tx.rs | 542 ------------------ crates/types/Cargo.toml | 1 + 17 files changed, 247 insertions(+), 761 deletions(-) create mode 100644 crates/compression-alt/Cargo.toml create mode 100644 crates/compression-alt/src/block.rs rename crates/{compression/src/types => compression-alt/src}/header.rs (79%) create mode 100644 crates/compression-alt/src/lib.rs delete mode 100644 crates/compression/src/block.rs delete mode 100644 crates/compression/src/types/mod.rs delete mode 100644 crates/compression/src/types/tx.rs diff --git a/Cargo.lock b/Cargo.lock index e353e75a5ba..23222063be4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2601,8 +2601,6 @@ dependencies = [ [[package]] name = "fuel-asm" version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe999b5964065e569092405bb58ec6a5b82c0368a0a9627ad48403583013506f" dependencies = [ "bitflags 2.4.2", "fuel-types", @@ -2784,6 +2782,19 @@ dependencies = [ "paste", "postcard", "serde", + "serde-big-array", +] + +[[package]] +name = "fuel-core-compression-alt" +version = "0.22.0" +dependencies = [ + "bincode", + "fuel-core-compression", + "fuel-core-types", + "paste", + "postcard", + "serde", ] [[package]] @@ -3158,8 +3169,6 @@ dependencies = [ [[package]] name = "fuel-crypto" version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "607c74d6c2df713b3945ca9fc4ac5a50bec55b405d9375b7cc684b9e3960c74a" dependencies = [ "coins-bip32", "coins-bip39", @@ -3179,8 +3188,6 @@ dependencies = [ [[package]] name = "fuel-derive" version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92f1e5e1602c4b554b98e84a924d97621641d27ccec643c9468844329cee05e7" dependencies = [ "proc-macro2", "quote", @@ -3191,8 +3198,6 @@ dependencies = [ [[package]] name = "fuel-merkle" version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1077a43ef91efcd2839ec649e595b5d89f2b130e927c3abd71f78189b376c30" dependencies = [ "derive_more", "digest 0.10.7", @@ -3206,19 +3211,16 @@ dependencies = [ [[package]] name = "fuel-storage" version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee976cc2f29f4ba6d6758d6892c421a7079a654b29777d808641c64288a98b9" [[package]] name = "fuel-tx" version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3ada9cb4520034ccce5f89c3b6b791fc830cc5b8c2b37ecfb2f50059e962672" dependencies = [ "bitflags 2.4.2", "derivative", "derive_more", "fuel-asm", + "fuel-core-compression", "fuel-crypto", "fuel-merkle", "fuel-types", @@ -3234,9 +3236,8 @@ dependencies = [ [[package]] name = "fuel-types" version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8d2dd56d12e5022ac047de40e3e461d192d28e3931ed00338150fd62993ff49" dependencies = [ + "fuel-core-compression", "fuel-derive", "hex", "rand", @@ -3246,8 +3247,6 @@ dependencies = [ [[package]] name = "fuel-vm" version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0615e83572095957e7e235356fa7f3e5706d17a3aff62d1d206ec480013ea99" dependencies = [ "anyhow", "async-trait", @@ -6979,6 +6978,15 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-big-array" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" +dependencies = [ + "serde", +] + [[package]] name = "serde_derive" version = "1.0.196" diff --git a/Cargo.toml b/Cargo.toml index 51fb1cc5670..d108118a8e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "crates/chain-config", "crates/client", "crates/compression", + "crates/compression-alt", "crates/compression-derive", "crates/database", "crates/fuel-core", @@ -62,6 +63,9 @@ fuel-core-client = { version = "0.22.0", path = "./crates/client" } fuel-core-database = { version = "0.22.0", path = "./crates/database" } fuel-core-metrics = { version = "0.22.0", path = "./crates/metrics" } fuel-core-services = { version = "0.22.0", path = "./crates/services" } +fuel-core-compression = { version = "0.22.0", path = "./crates/compression" } +fuel-core-compression-alt = { version = "0.22.0", path = "./crates/compression-alt" } +fuel-core-compression-derice = { version = "0.22.0", path = "./crates/compression-derive" } fuel-core-consensus-module = { version = "0.22.0", path = "./crates/services/consensus_module" } fuel-core-bft = { version = "0.22.0", path = "./crates/services/consensus_module/bft" } fuel-core-poa = { version = "0.22.0", path = "./crates/services/consensus_module/poa" } @@ -79,7 +83,7 @@ fuel-core-tests = { version = "0.0.0", path = "./tests" } fuel-core-xtask = { version = "0.0.0", path = "./xtask" } # Fuel dependencies -fuel-vm-private = { version = "0.44.0", package = "fuel-vm", default-features = false } +fuel-vm-private = { path = "../fuel-vm/fuel-vm", version = "0.44.0", package = "fuel-vm", default-features = false } # Common dependencies anyhow = "1.0" diff --git a/crates/compression-alt/Cargo.toml b/crates/compression-alt/Cargo.toml new file mode 100644 index 00000000000..6d12d0c2593 --- /dev/null +++ b/crates/compression-alt/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "fuel-core-compression-alt" +version = { workspace = true } +authors = { workspace = true } +categories = ["cryptography::cryptocurrencies"] +edition = { workspace = true } +homepage = { workspace = true } +keywords = ["blockchain", "cryptocurrencies", "fuel-core", "fuel-client", "fuel-compression"] +license = { workspace = true } +repository = { workspace = true } +description = "Compression and decompression of Fuel blocks for DA storage." + +[dependencies] +fuel-core-types = { workspace = true, features = ["serde"] } +fuel-core-compression = { workspace = true } + +serde = { version = "1.0", features = ["derive"] } + +postcard = { version = "1.0", features = ["use-std"] } +bincode = "1.3" + +paste = "1.0" + +[dev-dependencies] +fuel-core-types = { workspace = true, features = ["test-helpers"] } +fuel-core-compression = { workspace = true, features = ["test-helpers"] } diff --git a/crates/compression-alt/src/block.rs b/crates/compression-alt/src/block.rs new file mode 100644 index 00000000000..dcef40bbb46 --- /dev/null +++ b/crates/compression-alt/src/block.rs @@ -0,0 +1,84 @@ +use serde::{ + Deserialize, + Serialize, +}; + +use fuel_core_compression::ChangesPerTable; +use fuel_core_types::fuel_tx::CompactTransaction; + +use crate::header::Header; + +/// Compressed block. +/// The versioning here working depends on the serialization format, +/// but as long as we we have less than 128 variants, postcard will +/// make that a single byte. +#[derive(Clone, Serialize, Deserialize)] +#[non_exhaustive] +pub enum CompressedBlock { + V0 { + /// Registration section of the compressed block + registrations: ChangesPerTable, + /// Compressed block header + header: Header, + /// Compressed transactions + transactions: Vec, + }, +} + +#[cfg(test)] +mod tests { + use fuel_core_compression::{ + Compactable, + CompactionContext, + InMemoryRegistry, + }; + use fuel_core_types::{ + blockchain::primitives::DaBlockHeight, + fuel_tx::Transaction, + tai64::Tai64, + }; + + use super::*; + + #[test] + fn postcard_roundtrip() { + let original = CompressedBlock::V0 { + registrations: ChangesPerTable::default(), + header: Header { + da_height: DaBlockHeight::default(), + prev_root: Default::default(), + height: 3u32.into(), + time: Tai64::now(), + }, + transactions: vec![], + }; + + let compressed = postcard::to_allocvec(&original).unwrap(); + let decompressed: CompressedBlock = postcard::from_bytes(&compressed).unwrap(); + + let CompressedBlock::V0 { + registrations, + header, + transactions, + } = decompressed; + + assert_eq!(registrations, ChangesPerTable::default()); + assert_eq!(header.height, 3u32.into()); + assert!(transactions.is_empty()); + } + + #[test] + fn compact_transactions() { + let tx = Transaction::default_test_tx(); + + let mut registry = InMemoryRegistry::default(); + let compacted = CompactionContext::run(&mut registry, tx.clone()); + + let compressed = postcard::to_allocvec(&compacted).unwrap(); + dbg!(compressed.len()); + + let decompacted = Transaction::decompact(compacted, ®istry); + + assert_eq!(tx, decompacted); + } +} diff --git a/crates/compression/src/types/header.rs b/crates/compression-alt/src/header.rs similarity index 79% rename from crates/compression/src/types/header.rs rename to crates/compression-alt/src/header.rs index 4db4b311c9f..7d2ba84002c 100644 --- a/crates/compression/src/types/header.rs +++ b/crates/compression-alt/src/header.rs @@ -12,8 +12,8 @@ use serde::{ Serialize, }; -#[derive(Debug, Clone, Serialize, Deserialize)] -pub(crate) struct Header { +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Header { pub da_height: DaBlockHeight, pub prev_root: Bytes32, pub height: BlockHeight, diff --git a/crates/compression-alt/src/lib.rs b/crates/compression-alt/src/lib.rs new file mode 100644 index 00000000000..3f24bea4e69 --- /dev/null +++ b/crates/compression-alt/src/lib.rs @@ -0,0 +1,2 @@ +pub mod block; +pub mod header; diff --git a/crates/compression-derive/src/attribute.rs b/crates/compression-derive/src/attribute.rs index 64aefb4d0db..a3e8cd9bde1 100644 --- a/crates/compression-derive/src/attribute.rs +++ b/crates/compression-derive/src/attribute.rs @@ -6,12 +6,9 @@ const ATTR: &str = "da_compress"; pub enum StructureAttrs { /// Compacted recursively. Normal, - /// Transparent. - Transparent, } impl StructureAttrs { pub fn parse(attrs: &[syn::Attribute]) -> Self { - let mut result = Self::Normal; for attr in attrs { if attr.style != syn::AttrStyle::Outer { continue; @@ -19,21 +16,12 @@ impl StructureAttrs { if let syn::Meta::List(ml) = &attr.meta { if ml.path.segments.len() == 1 && ml.path.segments[0].ident == ATTR { - if !matches!(result, Self::Normal) { - panic!("Duplicate attribute: {}", ml.tokens); - } - - let attr_contents = ml.tokens.to_string(); - if attr_contents == "transparent" { - result = Self::Transparent; - } else { - panic!("Invalid attribute: {}", ml.tokens); - } + panic!("Invalid attribute: {}", ml.tokens); } } } - result + Self::Normal } } diff --git a/crates/compression-derive/src/compact.rs b/crates/compression-derive/src/compact.rs index 39df7c7dec1..e3fe4b6a615 100644 --- a/crates/compression-derive/src/compact.rs +++ b/crates/compression-derive/src/compact.rs @@ -49,9 +49,10 @@ fn field_defs(fields: &syn::Fields) -> TokenStream2 { } } -/// Construct -fn construct( - compact: &syn::Ident, +/// Construct compact version of the struct from the original one +fn construct_compact( + // The structure to construct, i.e. struct name or enum variant path + compact: &TokenStream2, variant: &synstructure::VariantInfo<'_>, ) -> TokenStream2 { let bound_fields: TokenStream2 = variant @@ -72,12 +73,14 @@ fn construct( FieldAttrs::Registry(registry) => { let reg_ident = format_ident!("{}", registry); let cty = quote! { - ::fuel_core_compression::Key< - ::fuel_core_compression::tables::#reg_ident + Key< + tables::#reg_ident > }; quote! { - let #cname: #cty = ctx.to_key(*#binding); + let #cname: #cty = ctx.to_key( + ::Type::from(#binding.clone()) + ); } } } @@ -101,9 +104,15 @@ fn construct( }) .collect(); + let construct_fields = match variant.ast().fields { + syn::Fields::Named(_) => quote! {{ #construct_fields }}, + syn::Fields::Unnamed(_) => quote! {(#construct_fields)}, + syn::Fields::Unit => quote! {}, + }; + quote! { #bound_fields - #compact { #construct_fields } + #compact #construct_fields } } // Sum of Compactable::count() of all fields. @@ -144,7 +153,7 @@ fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { let defs = field_defs(&variant.ast().fields); let count_per_variant = s.each_variant(|variant| sum_counts(variant)); let construct_per_variant = - s.each_variant(|variant| construct(&compact_name, variant)); + s.each_variant(|variant| construct_compact("e! {#compact_name}, variant)); let semi = match variant.ast().fields { syn::Fields::Named(_) => quote! {}, @@ -155,12 +164,14 @@ fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { let g = s.ast().generics.clone(); let w = g.where_clause.clone(); let compact = quote! { + #[derive(Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + #[doc = concat!("Compacted version of `", stringify!(#name), "`.")] pub struct #compact_name #g #w #defs #semi }; let impls = s.gen_impl(quote! { - use ::fuel_core_compression::{db, Compactable, CountPerTable, CompactionContext}; + use ::fuel_core_compression::{db, tables, Table, Key, Compactable, CountPerTable, CompactionContext}; gen impl Compactable for @Self { @@ -180,7 +191,7 @@ fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { where R: db::RegistryRead { // #decompact_per_field; - todo!() + todo!("decompact struct") } } }); @@ -209,14 +220,25 @@ fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { }) .collect(); let enumdef = quote! { + #[derive(Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + #[doc = concat!("Compacted version of `", stringify!(#name), "`.")] pub enum #compact_name { #variant_defs } }; let count_per_variant = s.each_variant(|variant| sum_counts(variant)); + let construct_per_variant = s.each_variant(|variant| { + let vname = variant.ast().ident.clone(); + construct_compact( + "e! { + #compact_name :: #vname + }, + variant, + ) + }); let impls = s.gen_impl(quote! { - use ::fuel_core_compression::{db, Compactable, CountPerTable, CompactionContext}; + use ::fuel_core_compression::{db, tables, Table, Key, Compactable, CountPerTable, CompactionContext}; gen impl Compactable for @Self { type Compact = #compact_name; @@ -228,15 +250,14 @@ fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact where R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex { - // #compact_per_field; - todo!() + match self { #construct_per_variant } } fn decompact(compact: Self::Compact, reg: &R) -> Self where R: db::RegistryRead { // #decompact_per_field; - todo!() + todo!("decompact enum") } } }); @@ -246,62 +267,9 @@ fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { } } -fn serialize_transparent(s: synstructure::Structure) -> TokenStream2 { - assert_eq!( - s.variants().len(), - 1, - "transparent structures must have one variant" - ); - let variant: &synstructure::VariantInfo = &s.variants()[0]; - assert_eq!( - variant.ast().fields.len(), - 1, - "transparent structures must have exactly one field" - ); - let field_t = variant.ast().fields.iter().next().unwrap().ty.clone(); - let field_d = quote! { <#field_t as Compactable>::decompact(c, reg) }; - let field_name: TokenStream2 = match variant.ast().fields { - syn::Fields::Named(n) => { - let n = n.named[0].ident.clone().unwrap(); - quote! { #n } - } - syn::Fields::Unnamed(_) => quote! { 0 }, - syn::Fields::Unit => unreachable!(), - }; - let field_c = match variant.ast().fields { - syn::Fields::Named(_) => quote! { Self {#field_name: #field_d} }, - syn::Fields::Unnamed(_) => quote! { Self(#field_d) }, - syn::Fields::Unit => unreachable!(), - }; - - s.gen_impl(quote! { - use ::fuel_core_compression::{db, Compactable, CountPerTable, CompactionContext}; - - gen impl Compactable for @Self { - type Compact = #field_t; - - fn count(&self) -> CountPerTable { - self.#field_name.count() - } - - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex { - self.#field_name.compact(ctx) - } - - fn decompact(c: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead { - #field_c - } - } - }) -} - /// Derives `Compact` trait for the given `struct` or `enum`. pub fn compact_derive(mut s: synstructure::Structure) -> TokenStream2 { - s.add_bounds(synstructure::AddBounds::Both) + s.add_bounds(synstructure::AddBounds::None) .underscore_const(true); let name = s.ast().ident.to_string(); @@ -310,9 +278,8 @@ pub fn compact_derive(mut s: synstructure::Structure) -> TokenStream2 { StructureAttrs::Normal => match s.ast().data { syn::Data::Struct(_) => serialize_struct(&s), syn::Data::Enum(_) => serialize_enum(&s), - _ => panic!("Can't derive `Serialize` for `union`s"), + _ => panic!("Can't derive `Compact` for `union`s"), }, - StructureAttrs::Transparent => serialize_transparent(s), }; println!("{}", ts); let _ = std::fs::write(format!("/tmp/derive/{name}.rs"), ts.to_string()); diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 833fa91b8db..3c1670cfa82 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -11,12 +11,18 @@ repository = { workspace = true } description = "Compression and decompression of Fuel blocks for DA storage." [dependencies] -fuel-core-types = { workspace = true, features = ["serde"] } serde = { version = "1.0", features = ["derive"] } +serde-big-array = "0.5" postcard = { version = "1.0", features = ["use-std"] } bincode = "1.3" paste = "1.0" -fuel-core-compression-derive = { path = "../compression-derive" } \ No newline at end of file +fuel-core-compression-derive = { path = "../compression-derive" } + +[dev-dependencies] +fuel-core-types = { workspace = true, features = ["serde"] } + +[features] +test-helpers = [] diff --git a/crates/compression/src/block.rs b/crates/compression/src/block.rs deleted file mode 100644 index 5b8d418d084..00000000000 --- a/crates/compression/src/block.rs +++ /dev/null @@ -1,26 +0,0 @@ -use serde::{ - Deserialize, - Serialize, -}; - -use crate::{ - registry::Registrations, - types, -}; - -/// Compressed block. -/// The versioning here working depends on the serialization format, -/// but as long as we we have less than 128 variants, postcard will -/// make that a single byte. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[non_exhaustive] -pub enum CompressedBlock { - V0 { - /// Registration section of the compressed block - registrations: Registrations, - /// Compressed block header - header: types::header::Header, - /// Compressed transactions - transactions: Vec, - }, -} diff --git a/crates/compression/src/compression.rs b/crates/compression/src/compression.rs index 804d30088a6..b413b8ff531 100644 --- a/crates/compression/src/compression.rs +++ b/crates/compression/src/compression.rs @@ -1,5 +1,10 @@ use std::marker::PhantomData; +use serde::{ + Deserialize, + Serialize, +}; + use crate::{ registry::{ access::{ @@ -92,7 +97,7 @@ where /// Convert data to reference-based format pub trait Compactable { - type Compact; + type Compact: Clone + Serialize + for<'a> Deserialize<'a>; /// Count max number of each key type, for upper limit of overwritten keys fn count(&self) -> CountPerTable; @@ -138,8 +143,16 @@ identity_compaction!(u32); identity_compaction!(u64); identity_compaction!(u128); -impl Compactable for [T; S] { - type Compact = Self; +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ArrayWrapper Deserialize<'a>>( + #[serde(with = "serde_big_array::BigArray")] pub [T; S], +); + +impl Compactable for [T; S] +where + T: Compactable + Clone + Serialize + for<'a> Deserialize<'a>, +{ + type Compact = ArrayWrapper; fn count(&self) -> CountPerTable { let mut count = CountPerTable::default(); @@ -153,18 +166,21 @@ impl Compactable for [T; S] { where R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, { - self.clone() + ArrayWrapper(self.clone()) } fn decompact(compact: Self::Compact, _reg: &R) -> Self where R: db::RegistryRead, { - compact + compact.0 } } -impl Compactable for Vec { +impl Compactable for Vec +where + T: Compactable + Clone + Serialize + for<'a> Deserialize<'a>, +{ type Compact = Self; fn count(&self) -> CountPerTable { @@ -214,8 +230,6 @@ impl Compactable for PhantomData { #[cfg(test)] mod tests { - use fuel_core_types::fuel_types::Address; - use crate::{ registry::{ db, @@ -225,37 +239,17 @@ mod tests { }, Key, }; + use fuel_core_types::fuel_types::Address; + use serde::{ + Deserialize, + Serialize, + }; use super::{ Compactable, CompactionContext, }; - impl Compactable for Address { - type Compact = Key; - - fn count(&self) -> crate::registry::CountPerTable { - CountPerTable { - Address: 1, - ..Default::default() - } - } - - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, - { - ctx.to_key::(*self) - } - - fn decompact(compact: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead, - { - Address::from(reg.read::(compact)) - } - } - #[derive(Debug, Clone, PartialEq)] struct ManualExample { a: Address, @@ -263,7 +257,7 @@ mod tests { c: u64, } - #[derive(Debug, PartialEq)] + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] struct ManualExampleCompact { a: Key, b: Key, @@ -284,8 +278,8 @@ mod tests { where R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, { - let a = ctx.to_key::(self.a); - let b = ctx.to_key::(self.b); + let a = ctx.to_key::(*self.a); + let b = ctx.to_key::(*self.b); ManualExampleCompact { a, b, c: self.c } } @@ -312,8 +306,6 @@ mod tests { #[test] fn test_compaction_roundtrip() { - check(Address::default()); - check(Address::from([1u8; 32])); check(ManualExample { a: Address::from([1u8; 32]), b: Address::from([2u8; 32]), diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 0e4471fdd4b..b9b0c16d970 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -1,7 +1,5 @@ -mod block; mod compression; mod registry; -mod types; pub use compression::{ Compactable, @@ -10,8 +8,13 @@ pub use compression::{ pub use registry::{ db, tables, + ChangesPerTable, CountPerTable, Key, + Table, }; +#[cfg(feature = "test-helpers")] +pub use registry::in_memory::InMemoryRegistry; + pub use fuel_core_compression_derive::Compact; diff --git a/crates/compression/src/registry/block_section.rs b/crates/compression/src/registry/block_section.rs index 23c61d13774..efb5e562855 100644 --- a/crates/compression/src/registry/block_section.rs +++ b/crates/compression/src/registry/block_section.rs @@ -1,6 +1,5 @@ use core::fmt; -use fuel_core_types::fuel_types::Bytes32; use serde::{ ser::SerializeTuple, Deserialize, @@ -95,7 +94,7 @@ impl<'de, T: Table + Deserialize<'de>> serde::de::Visitor<'de> for WriteTo { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Registrations { /// Merkle root of the registeration table merkle roots - pub tables_root: Bytes32, + pub tables_root: [u8; 32], /// Changes per table pub changes: ChangesPerTable, } @@ -107,24 +106,21 @@ mod tests { use fuel_core_types::{ fuel_asm::op, fuel_tx::AssetId, - fuel_types::{ - Address, - Bytes32, - }, + fuel_types::Address, }; #[test] fn test_tables() { let original = Registrations { - tables_root: Bytes32::default(), + tables_root: Default::default(), changes: ChangesPerTable { AssetId: WriteTo { start_key: Key::try_from(100).unwrap(), - values: vec![AssetId::from([0xa0; 32]), AssetId::from([0xa1; 32])], + values: vec![*AssetId::from([0xa0; 32]), *AssetId::from([0xa1; 32])], }, Address: WriteTo { start_key: Key::default(), - values: vec![Address::from([0xc0; 32])], + values: vec![*Address::from([0xc0; 32])], }, ScriptCode: WriteTo { start_key: Key::default(), diff --git a/crates/compression/src/registry/mod.rs b/crates/compression/src/registry/mod.rs index 8f4787a6255..5e8cf38aa20 100644 --- a/crates/compression/src/registry/mod.rs +++ b/crates/compression/src/registry/mod.rs @@ -11,8 +11,6 @@ mod key; use self::block_section::WriteTo; pub use self::key::Key; -pub(crate) use self::block_section::Registrations; - mod _private { pub trait Seal {} } @@ -163,8 +161,8 @@ macro_rules! tables { } tables!( - AssetId: fuel_core_types::fuel_tx::AssetId, - Address: fuel_core_types::fuel_tx::Address, + AssetId: [u8; 32], + Address: [u8; 32], ScriptCode: Vec, Witness: Vec, ); @@ -189,68 +187,68 @@ mod tests { // Empty assert_eq!( reg.read(Key::::try_from(100).unwrap()), - AssetId::default() + [0; 32] ); assert_eq!( - reg.index_lookup(&AssetId::from([1; 32])), + reg.index_lookup(&*AssetId::from([1; 32])), None::> ); // Write reg.batch_write( Key::::from_raw(RawKey::try_from(100u32).unwrap()), - vec![AssetId::from([1; 32]), AssetId::from([2; 32])], + vec![[1; 32], [2; 32]], ); assert_eq!( reg.read(Key::::try_from(100).unwrap()), - AssetId::from([1; 32]) + [1; 32] ); assert_eq!( reg.read(Key::::try_from(101).unwrap()), - AssetId::from([2; 32]) + [2; 32] ); assert_eq!( reg.read(Key::::try_from(102).unwrap()), - AssetId::default() + [0; 32] ); // Overwrite reg.batch_write( Key::::from_raw(RawKey::try_from(99u32).unwrap()), - vec![AssetId::from([10; 32]), AssetId::from([11; 32])], + vec![[10; 32], [11; 32]], ); assert_eq!( reg.read(Key::::try_from(99).unwrap()), - AssetId::from([10; 32]) + [10; 32] ); assert_eq!( reg.read(Key::::try_from(100).unwrap()), - AssetId::from([11; 32]) + [11; 32] ); // Wrapping reg.batch_write( Key::::from_raw(RawKey::MAX), - vec![AssetId::from([3; 32]), AssetId::from([4; 32])], + vec![[3; 32], [4; 32]], ); assert_eq!( reg.read(Key::::from_raw(RawKey::MAX)), - AssetId::from([3; 32]) + [3; 32] ); assert_eq!( reg.read(Key::::from_raw(RawKey::MIN)), - AssetId::from([4; 32]) + [4; 32] ); assert_eq!( - reg.index_lookup(&AssetId::from([3; 32])), + reg.index_lookup(&*AssetId::from([3; 32])), Some(Key::::from_raw(RawKey::MAX)) ); assert_eq!( - reg.index_lookup(&AssetId::from([4; 32])), + reg.index_lookup(&*AssetId::from([4; 32])), Some(Key::::from_raw(RawKey::MIN)) ); } diff --git a/crates/compression/src/types/mod.rs b/crates/compression/src/types/mod.rs deleted file mode 100644 index ab76f82fc72..00000000000 --- a/crates/compression/src/types/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -use serde::{ - Deserialize, - Serialize, -}; - -use crate::registry::{ - Key, - Table, -}; - -pub(crate) mod header; -pub(crate) mod tx; - -/// For types that need an explicit flag marking them -/// references to the registry instead of raw values, -/// this enum can be used. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub enum MaybeCompressed { - Compressed(Key), - Uncompressed(T), -} diff --git a/crates/compression/src/types/tx.rs b/crates/compression/src/types/tx.rs deleted file mode 100644 index d0a0bbe2639..00000000000 --- a/crates/compression/src/types/tx.rs +++ /dev/null @@ -1,542 +0,0 @@ -//! Compressed versions of fuel-tx types needed for DA storage. - -// TODO: remove malleabile fields - -use fuel_core_types::{ - fuel_tx::{ - self, - TxPointer, - }, - fuel_types::{ - self, - BlockHeight, - Bytes32, - Word, - }, -}; -use serde::{ - Deserialize, - Serialize, -}; - -use crate::registry::{ - tables, - Key, -}; - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[allow(clippy::large_enum_variant)] -pub(crate) enum Transaction { - Script(Script), - Create(Create), - Mint(Mint), -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub(crate) struct Script { - script_gas_limit: Word, - script: Key, - script_data: Vec, - policies: fuel_tx::policies::Policies, - inputs: Vec, - outputs: Vec, - witnesses: Vec>, - receipts_root: Bytes32, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub(crate) enum Input { - CoinSigned { - utxo_id: TxPointer, - owner: Key, - amount: Word, - asset_id: Key, - tx_pointer: TxPointer, - witness_index: u8, - maturity: BlockHeight, - }, - CoinPredicate { - utxo_id: TxPointer, - owner: Key, - amount: Word, - asset_id: Key, - tx_pointer: TxPointer, - maturity: BlockHeight, - predicate: Vec, - predicate_data: Vec, - }, - Contract { - utxo_id: TxPointer, - balance_root: Bytes32, - state_root: Bytes32, - tx_pointer: TxPointer, - asset_id: Key, - }, - MessageCoinSigned { - sender: Key, - recipient: Key, - amount: Word, - nonce: fuel_types::Nonce, - witness_index: u8, - data: Vec, - }, - MessageCoinPredicate { - sender: Key, - recipient: Key, - amount: Word, - nonce: fuel_types::Nonce, - predicate: Vec, - predicate_data: Vec, - }, - MessageDataSigned { - sender: Key, - recipient: Key, - amount: Word, - nonce: fuel_types::Nonce, - witness_index: u8, - data: Vec, - }, - MessageDataPredicate { - sender: Key, - recipient: Key, - amount: Word, - nonce: fuel_types::Nonce, - data: Vec, - predicate: Vec, - predicate_data: Vec, - }, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub(crate) enum Output { - Coin { - to: Key, - amount: Word, - asset_id: Key, - }, - - Contract { - input_index: u8, - }, - - Change, - - Variable, - - ContractCreated { - contract_id: TxPointer, - state_root: Bytes32, - }, -} - -#[derive(Default, Debug, Clone, Serialize, Deserialize)] -pub struct Create { - bytecode_length: Word, - bytecode_witness_index: u8, - policies: fuel_tx::policies::Policies, - storage_slots: Vec, - inputs: Vec, - outputs: Vec, - witnesses: Vec, - salt: fuel_types::Salt, -} - -#[derive(Default, Debug, Clone, Serialize, Deserialize)] -pub struct Mint { - tx_pointer: TxPointer, - input_contract: TxPointer, - output_contract: OutputContract, - mint_amount: Word, - mint_asset_id: Key, -} - -#[derive(Default, Debug, Clone, Serialize, Deserialize)] -pub struct OutputContract { - input_index: u8, -} - -#[cfg(feature = "manual")] -mod compaction { - // This could be done using a derive macro as well. Not sure if that's worth it. - - use fuel_core_types::fuel_tx::field::{ - Inputs, - Outputs, - Witnesses, - *, - }; - - use crate::{ - compression::CompactionContext, - registry::{ - db, - CountPerTable, - }, - Compactable, - }; - - use super::*; - - impl Compactable for fuel_tx::Transaction { - type Compact = super::Transaction; - - fn count(&self) -> CountPerTable { - match self { - fuel_tx::Transaction::Script(tx) => tx.count(), - fuel_tx::Transaction::Create(tx) => tx.count(), - fuel_tx::Transaction::Mint(tx) => tx.count(), - } - } - - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, - { - match self { - fuel_tx::Transaction::Script(tx) => { - Self::Compact::Script(tx.compact(ctx)) - } - fuel_tx::Transaction::Create(tx) => { - Self::Compact::Create(tx.compact(ctx)) - } - fuel_tx::Transaction::Mint(tx) => Self::Compact::Mint(tx.compact(ctx)), - } - } - - fn decompact(compact: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead, - { - match compact { - super::Transaction::Script(tx) => { - fuel_tx::Transaction::Script(fuel_tx::Script::decompact(tx, reg)) - } - super::Transaction::Create(tx) => { - fuel_tx::Transaction::Create(fuel_tx::Create::decompact(tx, reg)) - } - super::Transaction::Mint(tx) => { - fuel_tx::Transaction::Mint(fuel_tx::Mint::decompact(tx, reg)) - } - } - } - } - - impl Compactable for fuel_tx::Script { - type Compact = super::Script; - - fn count(&self) -> CountPerTable { - let mut sum = CountPerTable { - ScriptCode: 1, - Witness: self.witnesses().len(), - ..Default::default() - }; - for input in self.inputs() { - sum += ::count(input); - } - for output in self.outputs() { - sum += ::count(output); - } - sum - } - - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, - { - Self::Compact { - script_gas_limit: *self.script_gas_limit(), - script: ctx.to_key::(self.script().clone()), - script_data: self.script_data().clone(), - policies: self.policies().clone(), - inputs: self.inputs().iter().map(|i| i.compact(ctx)).collect(), - outputs: self.outputs().iter().map(|o| o.compact(ctx)).collect(), - witnesses: self.witnesses().iter().map(|w| ctx.to_key(*w)).collect(), - receipts_root: *self.receipts_root(), - } - } - - fn decompact(compact: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead, - { - Self { - script_gas_limit: compact.script_gas_limit, - script: reg.read::(compact.script), - script_data: compact.script_data, - policies: compact.policies, - inputs: compact.inputs().map(|i| i.decompact(reg)).collect(), - outputs: compact.outputs().map(|o| o.decompact(reg)).collect(), - witnesses: compact.witnesses().map(|w| w.decompact(reg)).collect(), - receipts_root: compact.receipts_root, - } - } - } - - impl Compactable for fuel_tx::Create { - type Compact = super::Create; - - fn count(&self) -> CountPerTable { - let sum = CountPerTable { - Witness: self.witnesses().len(), - ..Default::default() - }; - for input in self.inputs() { - sum += input.count(); - } - for output in self.outputs() { - sum += output.count(); - } - sum - } - - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, - { - Self::Compact { - bytecode_length: self.bytecode_length, - bytecode_witness_index: self.bytecode_witness_index, - policies: self.policies().clone(), - storage_slots: self.storage_slots().clone(), - inputs: self.inputs().map(|i| i.compact(ctx)).collect(), - outputs: self.outputs().map(|o| o.compact(ctx)).collect(), - witnesses: self.witnesses().map(|w| w.compact(ctx)).collect(), - salt: *self.salt(), - } - } - - fn decompact(compact: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead, - { - Self { - bytecode_length: compact.bytecode_length, - bytecode_witness_index: compact.bytecode_witness_index, - policies: compact.policies, - storage_slots: compact.storage_slots, - inputs: compact - .inputs() - .map(|i| fuel_tx::Input::decompact(i, reg)) - .collect(), - outputs: compact.outputs().map(|o| o.decompact(reg)).collect(), - witnesses: compact.witnesses().map(|w| w.decompact(reg)).collect(), - salt: compact.salt, - } - } - } - - impl Compactable for fuel_tx::Mint { - type Compact = super::Mint; - - fn count(&self) -> CountPerTable { - let sum = CountPerTable { - AssetId: 1, - Witness: self.witnesses().len(), - ..Default::default() - }; - for input in self.inputs() { - sum += input.count(); - } - for output in self.outputs() { - sum += output.count(); - } - sum - } - - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, - { - Self::Compact { - tx_pointer: self.tx_pointer(), - input_contract: self.input_contract.compact(ctx), - output_contract: self.output_contract.compact(ctx), - mint_amount: self.mint_amount, - mint_asset_id: ctx.to_key::(self.mint_asset_id), - } - } - - fn decompact(compact: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead, - { - Self { - tx_pointer: compact.tx_pointer, - input_contract: compact.input_contract.decompact(reg), - output_contract: compact.output_contract.decompact(reg), - mint_amount: compact.mint_amount, - mint_asset_id: reg.read::(compact.mint_asset_id), - } - } - } - - impl Compactable for fuel_tx::input::Input { - type Compact = super::Input; - - fn count(&self) -> CountPerTable { - match self { - fuel_tx::input::Input::CoinSigned(_) => CountPerTable { - AssetId: 1, - Address: 1, - Witness: 1, - ..Default::default() - }, - fuel_tx::input::Input::CoinPredicate(_) => CountPerTable { - AssetId: 1, - Address: 1, - Witness: 1, - ..Default::default() - }, - fuel_tx::input::Input::Contract(_) => CountPerTable { - AssetId: 1, - ..Default::default() - }, - fuel_tx::input::Input::MessageCoinSigned(_) => CountPerTable { - AssetId: 1, - Address: 2, - Witness: 1, - ..Default::default() - }, - fuel_tx::input::Input::MessageCoinPredicate(_) => CountPerTable { - AssetId: 1, - Address: 2, - Witness: 1, - ..Default::default() - }, - fuel_tx::input::Input::MessageDataSigned(_) => CountPerTable { - AssetId: 1, - Address: 2, - Witness: 1, - ..Default::default() - }, - fuel_tx::input::Input::MessageDataPredicate(_) => CountPerTable { - AssetId: 1, - Address: 2, - Witness: 1, - ..Default::default() - }, - } - } - - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, - { - match self { - fuel_tx::input::Input::CoinSigned(input) => { - Self::Compact::CoinSigned(input.compact(ctx)) - } - fuel_tx::input::Input::CoinPredicate(input) => { - Self::Compact::CoinPredicate(input.compact(ctx)) - } - fuel_tx::input::Input::Contract(input) => { - Self::Compact::Contract(input.compact(ctx)) - } - fuel_tx::input::Input::MessageCoinSigned(input) => { - Self::Compact::MessageCoinSigned(input.compact(ctx)) - } - fuel_tx::input::Input::MessageCoinPredicate(input) => { - Self::Compact::MessageCoinPredicate(input.compact(ctx)) - } - fuel_tx::input::Input::MessageDataSigned(input) => { - Self::Compact::MessageDataSigned(input.compact(ctx)) - } - fuel_tx::input::Input::MessageDataPredicate(input) => { - Self::Compact::MessageDataPredicate(input.compact(ctx)) - } - } - } - - fn decompact(compact: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead, - { - todo!() - } - } - - impl Compactable for fuel_tx::output::Output { - type Compact = super::Output; - - fn count(&self) -> CountPerTable { - match self { - fuel_tx::input::Input::CoinSigned(_) => CountPerTable { - AssetId: 1, - Address: 1, - Witness: 1, - ..Default::default() - }, - fuel_tx::input::Input::CoinPredicate(_) => CountPerTable { - AssetId: 1, - Address: 1, - Witness: 1, - ..Default::default() - }, - fuel_tx::input::Input::Contract(_) => CountPerTable { - AssetId: 1, - ..Default::default() - }, - fuel_tx::input::Input::MessageCoinSigned(_) => CountPerTable { - AssetId: 1, - Address: 2, - Witness: 1, - ..Default::default() - }, - fuel_tx::input::Input::MessageCoinPredicate(_) => CountPerTable { - AssetId: 1, - Address: 2, - Witness: 1, - ..Default::default() - }, - fuel_tx::input::Input::MessageDataSigned(_) => CountPerTable { - AssetId: 1, - Address: 2, - Witness: 1, - ..Default::default() - }, - fuel_tx::input::Input::MessageDataPredicate(_) => CountPerTable { - AssetId: 1, - Address: 2, - Witness: 1, - ..Default::default() - }, - } - } - - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, - { - match self { - fuel_tx::input::Input::CoinSigned(input) => { - Self::Compact::CoinSigned(input.compact(ctx)) - } - fuel_tx::input::Input::CoinPredicate(input) => { - Self::Compact::CoinPredicate(input.compact(ctx)) - } - fuel_tx::input::Input::Contract(input) => { - Self::Compact::Contract(input.compact(ctx)) - } - fuel_tx::input::Input::MessageCoinSigned(input) => { - Self::Compact::MessageCoinSigned(input.compact(ctx)) - } - fuel_tx::input::Input::MessageCoinPredicate(input) => { - Self::Compact::MessageCoinPredicate(input.compact(ctx)) - } - fuel_tx::input::Input::MessageDataSigned(input) => { - Self::Compact::MessageDataSigned(input.compact(ctx)) - } - fuel_tx::input::Input::MessageDataPredicate(input) => { - Self::Compact::MessageDataPredicate(input.compact(ctx)) - } - } - } - - fn decompact(compact: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead, - { - todo!() - } - } -} diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index 586408ab50e..76ffff8d64a 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -32,6 +32,7 @@ zeroize = "1.5" [features] default = ["std"] serde = ["dep:serde", "fuel-vm-private/serde"] +da-compression = ["fuel-vm-private/da-compression"] std = ["fuel-vm-private/std"] random = ["dep:rand", "fuel-vm-private/random"] test-helpers = ["random", "fuel-vm-private/test-helpers"] From e6b17266ac0b05d3a3593db473e0b184b8a50a23 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 31 Jan 2024 11:15:40 +0200 Subject: [PATCH 013/112] Correctly compact sequence types --- crates/compression/src/compression.rs | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/crates/compression/src/compression.rs b/crates/compression/src/compression.rs index b413b8ff531..e4b8b136403 100644 --- a/crates/compression/src/compression.rs +++ b/crates/compression/src/compression.rs @@ -152,7 +152,7 @@ impl Compactable for [T; S] where T: Compactable + Clone + Serialize + for<'a> Deserialize<'a>, { - type Compact = ArrayWrapper; + type Compact = ArrayWrapper; fn count(&self) -> CountPerTable { let mut count = CountPerTable::default(); @@ -162,18 +162,18 @@ where count } - fn compact(&self, _ctx: &mut CompactionContext) -> Self::Compact + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact where R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, { - ArrayWrapper(self.clone()) + ArrayWrapper(self.clone().map(|item| item.compact(ctx))) } - fn decompact(compact: Self::Compact, _reg: &R) -> Self + fn decompact(compact: Self::Compact, reg: &R) -> Self where R: db::RegistryRead, { - compact.0 + compact.0.map(|item| T::decompact(item, reg)) } } @@ -181,7 +181,7 @@ impl Compactable for Vec where T: Compactable + Clone + Serialize + for<'a> Deserialize<'a>, { - type Compact = Self; + type Compact = Vec; fn count(&self) -> CountPerTable { let mut count = CountPerTable::default(); @@ -191,18 +191,21 @@ where count } - fn compact(&self, _ctx: &mut CompactionContext) -> Self::Compact + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact where R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, { - self.clone() + self.iter().map(|item| item.compact(ctx)).collect() } - fn decompact(compact: Self::Compact, _reg: &R) -> Self + fn decompact(compact: Self::Compact, reg: &R) -> Self where R: db::RegistryRead, { compact + .into_iter() + .map(|item| T::decompact(item, reg)) + .collect() } } From 7fbb4c8f4bc68825c1a5ba2058611cdd8a4344f2 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 31 Jan 2024 11:17:41 +0200 Subject: [PATCH 014/112] WIP: roundtrip and size tests --- crates/compression-alt/src/block.rs | 59 +++++++++- crates/compression-derive/src/compact.rs | 117 ++++++++++++++++++- crates/compression/src/registry/in_memory.rs | 42 +++++++ 3 files changed, 206 insertions(+), 12 deletions(-) diff --git a/crates/compression-alt/src/block.rs b/crates/compression-alt/src/block.rs index dcef40bbb46..f815814ed6a 100644 --- a/crates/compression-alt/src/block.rs +++ b/crates/compression-alt/src/block.rs @@ -27,6 +27,8 @@ pub enum CompressedBlock { #[cfg(test)] mod tests { + use std::array; + use fuel_core_compression::{ Compactable, CompactionContext, @@ -68,17 +70,62 @@ mod tests { } #[test] - fn compact_transactions() { + fn compact_transaction() { let tx = Transaction::default_test_tx(); - let mut registry = InMemoryRegistry::default(); let compacted = CompactionContext::run(&mut registry, tx.clone()); + let decompacted = Transaction::decompact(compacted.clone(), ®istry); + assert_eq!(tx, decompacted); + + // Check size reduction + let compressed_original = postcard::to_allocvec(&tx).unwrap(); + let compressed_compact = postcard::to_allocvec(&compacted).unwrap(); + assert!(compressed_compact.len() < compressed_original.len() / 2); // Arbitrary threshold + } + + #[test] + fn compact_transaction_twice_gives_equal_result() { + let tx = Transaction::default_test_tx(); + let mut registry = InMemoryRegistry::default(); + let compacted1 = CompactionContext::run(&mut registry, tx.clone()); + let compacted2 = CompactionContext::run(&mut registry, tx.clone()); + let compressed1 = postcard::to_allocvec(&compacted1).unwrap(); + let compressed2 = postcard::to_allocvec(&compacted2).unwrap(); + assert_eq!(compressed1, compressed2); + } + + #[test] + fn sizes_of_repeated_tx_make_sense() { + let tx = Transaction::default_test_tx(); - let compressed = postcard::to_allocvec(&compacted).unwrap(); - dbg!(compressed.len()); + let sizes: [usize; 3] = array::from_fn(|i| { + let mut registry = InMemoryRegistry::default(); - let decompacted = Transaction::decompact(compacted, ®istry); + let original = CompressedBlock::V0 { + registrations: ChangesPerTable::default(), + header: Header { + da_height: DaBlockHeight::default(), + prev_root: Default::default(), + height: 3u32.into(), + time: Tai64::now(), + }, + transactions: vec![ + CompactionContext::run(&mut registry, tx.clone()); + i + 1 + ], + }; - assert_eq!(tx, decompacted); + let compressed = postcard::to_allocvec(&original).unwrap(); + compressed.len() + }); + + dbg!(sizes); + panic!(); + + assert!(sizes[0] < sizes[1]); + assert!(sizes[1] < sizes[2]); + let d1 = sizes[1] - sizes[0]; + let d2 = sizes[2] - sizes[1]; + assert!(d2 < d1); } } diff --git a/crates/compression-derive/src/compact.rs b/crates/compression-derive/src/compact.rs index e3fe4b6a615..180d7d92239 100644 --- a/crates/compression-derive/src/compact.rs +++ b/crates/compression-derive/src/compact.rs @@ -1,3 +1,5 @@ +use core::panic; + use proc_macro2::TokenStream as TokenStream2; use quote::{ format_ident, @@ -67,6 +69,7 @@ fn construct_compact( FieldAttrs::Skip => quote! {}, FieldAttrs::Normal => { quote! { + println!("recurse compact {}", stringify!(#ty)); let #cname = <#ty as Compactable>::compact(&#binding, ctx); } } @@ -78,6 +81,7 @@ fn construct_compact( > }; quote! { + println!("to_key {}: {}", stringify!(#cname), stringify!(#cty)); let #cname: #cty = ctx.to_key( ::Type::from(#binding.clone()) ); @@ -115,6 +119,67 @@ fn construct_compact( #compact #construct_fields } } +/// Construct original version of the struct from the compacted one +fn construct_decompact( + // The original structure to construct, i.e. struct name or enum variant path + original: &TokenStream2, + variant: &synstructure::VariantInfo<'_>, +) -> TokenStream2 { + let bound_fields: TokenStream2 = variant + .bindings() + .iter() + .map(|binding| { + let attrs = FieldAttrs::parse(&binding.ast().attrs); + let ty = &binding.ast().ty; + let cname = format_ident!("{}_c", binding.binding); + + match attrs { + FieldAttrs::Skip => quote! { + let #cname = Default::default(); + }, + FieldAttrs::Normal => { + quote! { + let #cname = <#ty as Compactable>::decompact(#binding, reg); + } + } + FieldAttrs::Registry(registry) => { + let reg_ident = format_ident!("{}", registry); + quote! { + let raw: ::Type = reg.read( + #binding + ); + let #cname = raw.into(); + } + } + } + }) + .collect(); + + let construct_fields: TokenStream2 = variant + .bindings() + .iter() + .map(|binding| { + let cname = format_ident!("{}_c", binding.binding); + if let Some(fname) = &binding.ast().ident { + quote! { #fname: #cname, } + } else { + quote! { #cname, } + } + }) + .collect(); + + let construct_fields = match variant.ast().fields { + syn::Fields::Named(_) => quote! {{ #construct_fields }}, + syn::Fields::Unnamed(_) => quote! {(#construct_fields)}, + syn::Fields::Unit => quote! {}, + }; + + quote! { + #bound_fields + #original #construct_fields + } +} + // Sum of Compactable::count() of all fields. fn sum_counts(variant: &synstructure::VariantInfo<'_>) -> TokenStream2 { variant @@ -143,6 +208,37 @@ fn sum_counts(variant: &synstructure::VariantInfo<'_>) -> TokenStream2 { ) } +/// Generate a match arm for each variant of the compacted structure +/// using the given function to generate the pattern body. +fn each_variant_compact) -> TokenStream2>( + s: &synstructure::Structure, + compact_name: &TokenStream2, + mut f: F, +) -> TokenStream2 { + s.variants() + .iter() + .map(|variant| { + // Modify the binding pattern to match the compact variant + let mut v2 = variant.clone(); + v2.filter(|field| { + let attrs = FieldAttrs::parse(&field.ast().attrs); + !matches!(attrs, FieldAttrs::Skip) + }); + v2.bindings_mut().iter_mut().for_each(|binding| { + binding.style = synstructure::BindStyle::Move; + }); + let mut p = v2.pat().into_iter(); + let _ = p.next().expect("pattern always begins with an identifier"); + let p = quote! { #compact_name #(#p)* }; + + let decompacted = f(variant); + quote! { + #p => { #decompacted } + } + }) + .collect() +} + fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { assert_eq!(s.variants().len(), 1, "structs must have one variant"); let variant: &synstructure::VariantInfo = &s.variants()[0]; @@ -152,9 +248,14 @@ fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { let defs = field_defs(&variant.ast().fields); let count_per_variant = s.each_variant(|variant| sum_counts(variant)); - let construct_per_variant = + let compact_per_variant = s.each_variant(|variant| construct_compact("e! {#compact_name}, variant)); + let decompact_per_variant = + each_variant_compact(s, "e! {#compact_name}, |variant| { + construct_decompact("e! {#name}, variant) + }); + let semi = match variant.ast().fields { syn::Fields::Named(_) => quote! {}, syn::Fields::Unnamed(_) => quote! {;}, @@ -184,14 +285,13 @@ fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact where R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex { - match self { #construct_per_variant } + match self { #compact_per_variant } } fn decompact(compact: Self::Compact, reg: &R) -> Self where R: db::RegistryRead { - // #decompact_per_field; - todo!("decompact struct") + match compact { #decompact_per_variant } } } }); @@ -237,6 +337,12 @@ fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { ) }); + let decompact_per_variant = + each_variant_compact(s, "e! {#compact_name}, |variant| { + let vname = variant.ast().ident.clone(); + construct_decompact("e! { #name :: #vname }, variant) + }); + let impls = s.gen_impl(quote! { use ::fuel_core_compression::{db, tables, Table, Key, Compactable, CountPerTable, CompactionContext}; @@ -256,8 +362,7 @@ fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { fn decompact(compact: Self::Compact, reg: &R) -> Self where R: db::RegistryRead { - // #decompact_per_field; - todo!("decompact enum") + match compact { #decompact_per_variant } } } }); diff --git a/crates/compression/src/registry/in_memory.rs b/crates/compression/src/registry/in_memory.rs index 2c025e7b319..5d642e9be76 100644 --- a/crates/compression/src/registry/in_memory.rs +++ b/crates/compression/src/registry/in_memory.rs @@ -56,3 +56,45 @@ impl RegistryIndex for InMemoryRegistry { None } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + tables, + Key, + }; + + #[test] + fn in_memory_registry_works() { + let mut reg = InMemoryRegistry::default(); + + let k1: Key = reg.next_key(); + let k2: Key = reg.next_key(); + assert_eq!(k1.next(), k2); + + // Empty + assert_eq!( + reg.read(Key::::try_from(100).unwrap()), + [0; 32] + ); + + // Write + reg.batch_write( + Key::::from_raw(RawKey::try_from(100u32).unwrap()), + vec![[1; 32], [2; 32]], + ); + + // Read + assert_eq!( + reg.read(Key::::try_from(100).unwrap()), + [1; 32] + ); + + // Index + assert_eq!( + reg.index_lookup(&[1; 32]), + Some(Key::::try_from(100).unwrap()) + ); + } +} From 8ae740700073934eacfa14b974e30474527c3868 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 31 Jan 2024 18:17:49 +0200 Subject: [PATCH 015/112] Default key, performance fixes, misc stuff --- Cargo.lock | 3 +- crates/compression-alt/Cargo.toml | 11 +- crates/compression-alt/src/block.rs | 104 ++++++++++++++---- crates/compression-derive/src/compact.rs | 12 +- crates/compression/Cargo.toml | 1 + crates/compression/src/compression.rs | 81 +++++++++++--- .../compression/src/registry/block_section.rs | 54 +++++++-- crates/compression/src/registry/db.rs | 12 +- crates/compression/src/registry/in_memory.rs | 44 +++++--- crates/compression/src/registry/key.rs | 74 ++++++++++--- crates/compression/src/registry/mod.rs | 49 +++++++-- 11 files changed, 344 insertions(+), 101 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 23222063be4..d37d96c22f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2777,6 +2777,7 @@ name = "fuel-core-compression" version = "0.22.0" dependencies = [ "bincode", + "fuel-core-compression", "fuel-core-compression-derive", "fuel-core-types", "paste", @@ -2789,10 +2790,8 @@ dependencies = [ name = "fuel-core-compression-alt" version = "0.22.0" dependencies = [ - "bincode", "fuel-core-compression", "fuel-core-types", - "paste", "postcard", "serde", ] diff --git a/crates/compression-alt/Cargo.toml b/crates/compression-alt/Cargo.toml index 6d12d0c2593..ef479b62cfd 100644 --- a/crates/compression-alt/Cargo.toml +++ b/crates/compression-alt/Cargo.toml @@ -11,16 +11,11 @@ repository = { workspace = true } description = "Compression and decompression of Fuel blocks for DA storage." [dependencies] -fuel-core-types = { workspace = true, features = ["serde"] } fuel-core-compression = { workspace = true } - -serde = { version = "1.0", features = ["derive"] } - +fuel-core-types = { workspace = true, features = ["serde", "da-compression"] } postcard = { version = "1.0", features = ["use-std"] } -bincode = "1.3" - -paste = "1.0" +serde = { version = "1.0", features = ["derive"] } [dev-dependencies] -fuel-core-types = { workspace = true, features = ["test-helpers"] } fuel-core-compression = { workspace = true, features = ["test-helpers"] } +fuel-core-types = { workspace = true, features = ["test-helpers"] } diff --git a/crates/compression-alt/src/block.rs b/crates/compression-alt/src/block.rs index f815814ed6a..adc51ec61fa 100644 --- a/crates/compression-alt/src/block.rs +++ b/crates/compression-alt/src/block.rs @@ -30,6 +30,7 @@ mod tests { use std::array; use fuel_core_compression::{ + Compact, Compactable, CompactionContext, InMemoryRegistry, @@ -45,12 +46,12 @@ mod tests { #[test] fn postcard_roundtrip() { let original = CompressedBlock::V0 { - registrations: ChangesPerTable::default(), + registrations: ChangesPerTable::from_start_keys(Default::default()), header: Header { da_height: DaBlockHeight::default(), prev_root: Default::default(), height: 3u32.into(), - time: Tai64::now(), + time: Tai64::UNIX_EPOCH, }, transactions: vec![], }; @@ -64,7 +65,7 @@ mod tests { transactions, } = decompressed; - assert_eq!(registrations, ChangesPerTable::default()); + assert!(registrations.is_empty()); assert_eq!(header.height, 3u32.into()); assert!(transactions.is_empty()); } @@ -73,7 +74,7 @@ mod tests { fn compact_transaction() { let tx = Transaction::default_test_tx(); let mut registry = InMemoryRegistry::default(); - let compacted = CompactionContext::run(&mut registry, tx.clone()); + let (compacted, _) = CompactionContext::run(&mut registry, tx.clone()); let decompacted = Transaction::decompact(compacted.clone(), ®istry); assert_eq!(tx, decompacted); @@ -87,8 +88,10 @@ mod tests { fn compact_transaction_twice_gives_equal_result() { let tx = Transaction::default_test_tx(); let mut registry = InMemoryRegistry::default(); - let compacted1 = CompactionContext::run(&mut registry, tx.clone()); - let compacted2 = CompactionContext::run(&mut registry, tx.clone()); + let (compacted1, changes1) = CompactionContext::run(&mut registry, tx.clone()); + let (compacted2, changes2) = CompactionContext::run(&mut registry, tx.clone()); + assert!(!changes1.is_empty()); + assert!(changes2.is_empty()); let compressed1 = postcard::to_allocvec(&compacted1).unwrap(); let compressed2 = postcard::to_allocvec(&compacted2).unwrap(); assert_eq!(compressed1, compressed2); @@ -98,34 +101,95 @@ mod tests { fn sizes_of_repeated_tx_make_sense() { let tx = Transaction::default_test_tx(); - let sizes: [usize; 3] = array::from_fn(|i| { + let sizes: [usize; 4] = array::from_fn(|i| { + // Registry recreated for each block in this test let mut registry = InMemoryRegistry::default(); + let (transactions, registrations) = + CompactionContext::run(&mut registry, vec![tx.clone(); i]); + let original = CompressedBlock::V0 { - registrations: ChangesPerTable::default(), + registrations, header: Header { da_height: DaBlockHeight::default(), prev_root: Default::default(), height: 3u32.into(), - time: Tai64::now(), + time: Tai64::UNIX_EPOCH, }, - transactions: vec![ - CompactionContext::run(&mut registry, tx.clone()); - i + 1 - ], + transactions, }; let compressed = postcard::to_allocvec(&original).unwrap(); compressed.len() }); - dbg!(sizes); - panic!(); + assert!( + sizes.windows(2).all(|w| w[0] < w[1]), + "Sizes should be in strictly ascending order" + ); + let deltas: Vec<_> = sizes.windows(2).map(|w| w[1] - w[0]).collect(); + assert!(deltas[0] > deltas[1], "Initial delta should be larger"); + assert!(deltas[1] == deltas[2], "Later delta should be constant"); + } + + #[test] + fn same_compact_tx_is_smaller_in_next_block() { + let tx = Transaction::default_test_tx(); + + let mut registry = InMemoryRegistry::default(); + + let sizes: [usize; 3] = array::from_fn(|_| { + let (transactions, registrations) = + CompactionContext::run(&mut registry, vec![tx.clone()]); - assert!(sizes[0] < sizes[1]); - assert!(sizes[1] < sizes[2]); - let d1 = sizes[1] - sizes[0]; - let d2 = sizes[2] - sizes[1]; - assert!(d2 < d1); + let original = CompressedBlock::V0 { + registrations, + header: Header { + da_height: DaBlockHeight::default(), + prev_root: Default::default(), + height: 3u32.into(), + time: Tai64::UNIX_EPOCH, + }, + transactions, + }; + + let compressed = postcard::to_allocvec(&original).unwrap(); + compressed.len() + }); + + assert!(sizes[0] > sizes[1], "Size must decrease after first block"); + assert!( + sizes[1] == sizes[2], + "Size must be constant after first block" + ); + } + + #[test] + #[ignore = "This test is slow"] + fn compact_registry_key_wraparound() { + use fuel_core_types::fuel_types::AssetId; + + #[derive(Debug, Clone, Copy, PartialEq, Compact)] + struct Example { + #[da_compress(registry = "AssetId")] + a: AssetId, + } + + let mut registry = InMemoryRegistry::default(); + for i in 0u32..((1 << 24) + 100) { + if i % 10000 == 0 { + println!("i = {} ({})", i, (i as f32) / (1 << 24) as f32); + } + let mut bytes = [0x00; 32]; + bytes[..4].copy_from_slice(&i.to_be_bytes()); + let target = Example { + a: AssetId::from(bytes), + }; + let (compact, _) = CompactionContext::run(&mut registry, target); + if i % 99 == 0 { + let decompact = Example::decompact(compact, ®istry); + assert_eq!(decompact, target); + } + } } } diff --git a/crates/compression-derive/src/compact.rs b/crates/compression-derive/src/compact.rs index 180d7d92239..2ecb2dd4465 100644 --- a/crates/compression-derive/src/compact.rs +++ b/crates/compression-derive/src/compact.rs @@ -69,7 +69,6 @@ fn construct_compact( FieldAttrs::Skip => quote! {}, FieldAttrs::Normal => { quote! { - println!("recurse compact {}", stringify!(#ty)); let #cname = <#ty as Compactable>::compact(&#binding, ctx); } } @@ -81,7 +80,6 @@ fn construct_compact( > }; quote! { - println!("to_key {}: {}", stringify!(#cname), stringify!(#cty)); let #cname: #cty = ctx.to_key( ::Type::from(#binding.clone()) ); @@ -197,7 +195,7 @@ fn sum_counts(variant: &synstructure::VariantInfo<'_>) -> TokenStream2 { FieldAttrs::Registry(registry) => { let reg_ident = format_ident!("{}", registry); quote! { - CountPerTable { #reg_ident: 1, ..CountPerTable::default() } + CountPerTable::#reg_ident(1) } } } @@ -265,8 +263,7 @@ fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { let g = s.ast().generics.clone(); let w = g.where_clause.clone(); let compact = quote! { - #[derive(Clone)] - #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + #[derive(Clone, serde::Serialize, serde::Deserialize)] #[doc = concat!("Compacted version of `", stringify!(#name), "`.")] pub struct #compact_name #g #w #defs #semi }; @@ -320,8 +317,7 @@ fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { }) .collect(); let enumdef = quote! { - #[derive(Clone)] - #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + #[derive(Clone, serde::Serialize, serde::Deserialize)] #[doc = concat!("Compacted version of `", stringify!(#name), "`.")] pub enum #compact_name { #variant_defs } }; @@ -386,7 +382,7 @@ pub fn compact_derive(mut s: synstructure::Structure) -> TokenStream2 { _ => panic!("Can't derive `Compact` for `union`s"), }, }; - println!("{}", ts); + // println!("{}", ts); let _ = std::fs::write(format!("/tmp/derive/{name}.rs"), ts.to_string()); ts } diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 3c1670cfa82..b23eab48496 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -22,6 +22,7 @@ paste = "1.0" fuel-core-compression-derive = { path = "../compression-derive" } [dev-dependencies] +fuel-core-compression = { path = "." } # Self-dependency needed by test for macros fuel-core-types = { workspace = true, features = ["serde"] } [features] diff --git a/crates/compression/src/compression.rs b/crates/compression/src/compression.rs index e4b8b136403..483b16794c3 100644 --- a/crates/compression/src/compression.rs +++ b/crates/compression/src/compression.rs @@ -44,7 +44,12 @@ where + db::RegistryIndex + db::RegistryWrite, { - pub fn run(reg: &'a mut R, target: C) -> C::Compact { + /// Run the compaction for the given target, returning the compacted data. + /// Changes are applied to the registry, and then returned as well. + pub fn run( + reg: &'a mut R, + target: C, + ) -> (C::Compact, ChangesPerTable) { let start_keys = next_keys(reg); let next_keys = start_keys; let key_limits = target.count(); @@ -55,12 +60,12 @@ where start_keys, next_keys, safe_keys_start, - changes: Default::default(), + changes: ChangesPerTable::from_start_keys(start_keys), }; let compacted = target.compact(&mut ctx); ctx.changes.apply_to_registry(ctx.reg); - compacted + (compacted, ctx.changes) } } @@ -74,8 +79,17 @@ where where KeyPerTable: access::AccessCopy>, KeyPerTable: access::AccessMut>, - ChangesPerTable: access::AccessMut>, + ChangesPerTable: + access::AccessRef> + access::AccessMut>, { + // Check if the value is within the current changeset + if let Some(key) = + >>::get(&self.changes) + .lookup_value(&value) + { + return key; + } + // Check if the registry contains this value already if let Some(key) = self.reg.index_lookup::(&value) { let start: Key = self.start_keys.value(); @@ -242,7 +256,12 @@ mod tests { }, Key, }; - use fuel_core_types::fuel_types::Address; + use fuel_core_compression::Compactable as _; // Hack for derive + use fuel_core_compression_derive::Compact; + use fuel_core_types::fuel_types::{ + Address, + AssetId, + }; use serde::{ Deserialize, Serialize, @@ -296,23 +315,55 @@ mod tests { } } - fn check(target: C) - where - C::Compact: std::fmt::Debug, - { - let mut registry = InMemoryRegistry::default(); + #[derive(Debug, Clone, PartialEq, Compact)] + struct AutomaticExample { + #[da_compress(registry = "AssetId")] + a: AssetId, + #[da_compress(registry = "AssetId")] + b: AssetId, + c: u32, + } - let compacted = CompactionContext::run(&mut registry, target.clone()); - let decompacted = C::decompact(compacted, ®istry); - assert_eq!(target, decompacted); + #[test] + fn test_compaction_properties() { + let a = ManualExample { + a: Address::from([1u8; 32]), + b: Address::from([2u8; 32]), + c: 3, + }; + assert_eq!(a.count().Address, 2); + assert_eq!(a.count().AssetId, 0); + + let b = AutomaticExample { + a: AssetId::from([1u8; 32]), + b: AssetId::from([2u8; 32]), + c: 3, + }; + assert_eq!(b.count().Address, 0); + assert_eq!(b.count().AssetId, 2); } #[test] fn test_compaction_roundtrip() { - check(ManualExample { + let target = ManualExample { a: Address::from([1u8; 32]), b: Address::from([2u8; 32]), c: 3, - }); + }; + let mut registry = InMemoryRegistry::default(); + let (compacted, _) = CompactionContext::run(&mut registry, target.clone()); + let decompacted = ManualExample::decompact(compacted, ®istry); + assert_eq!(target, decompacted); + + let target = AutomaticExample { + a: AssetId::from([1u8; 32]), + b: AssetId::from([2u8; 32]), + c: 3, + }; + let mut registry = fuel_core_compression::InMemoryRegistry::default(); + let (compacted, _) = + fuel_core_compression::CompactionContext::run(&mut registry, target.clone()); + let decompacted = AutomaticExample::decompact(compacted, ®istry); + assert_eq!(target, decompacted); } } diff --git a/crates/compression/src/registry/block_section.rs b/crates/compression/src/registry/block_section.rs index efb5e562855..d389c564ba7 100644 --- a/crates/compression/src/registry/block_section.rs +++ b/crates/compression/src/registry/block_section.rs @@ -13,8 +13,7 @@ use super::{ }; /// New registrations written to a specific table. -/// Default value is an empty write. -#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[derive(Clone, PartialEq, Eq)] pub struct WriteTo { /// The values are inserted starting from this key pub start_key: Key, @@ -22,6 +21,44 @@ pub struct WriteTo { pub values: Vec, } +impl fmt::Debug for WriteTo +where + T::Type: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.values.is_empty() { + return f.write_str("WriteTo::EMPTY"); + } + + f.debug_struct("WriteTo") + .field("start_key", &self.start_key) + .field("values", &self.values) + .finish() + } +} + +impl WriteTo +where + T::Type: PartialEq, +{ + /// Reverse lookup. + /// TODO: possibly add a lookup table for this, if deemed necessary + pub fn lookup_value(&self, needle: &T::Type) -> Option> { + if *needle == T::Type::default() { + return Some(Key::DEFAULT_VALUE); + } + + let mut key = self.start_key; + for v in &self.values { + if v == needle { + return Some(key); + } + key = key.next(); + } + None + } +} + /// Custom serialization is used to omit the start_key when the sequence is empty impl Serialize for WriteTo where @@ -50,7 +87,7 @@ where deserializer.deserialize_tuple( 2, Self { - start_key: Key::default(), + start_key: Key::ZERO, values: Vec::new(), }, ) @@ -78,7 +115,7 @@ impl<'de, T: Table + Deserialize<'de>> serde::de::Visitor<'de> for WriteTo { &"WriteTo<_> with 2 elements", ))?; Ok(WriteTo { - start_key: Key::default(), + start_key: Key::ZERO, values, }) } else { @@ -119,11 +156,11 @@ mod tests { values: vec![*AssetId::from([0xa0; 32]), *AssetId::from([0xa1; 32])], }, Address: WriteTo { - start_key: Key::default(), + start_key: Key::ZERO, values: vec![*Address::from([0xc0; 32])], }, ScriptCode: WriteTo { - start_key: Key::default(), + start_key: Key::ZERO, values: vec![ vec![op::addi(0x20, 0x20, 1), op::ret(0)] .into_iter() @@ -133,7 +170,10 @@ mod tests { .collect(), ], }, - Witness: WriteTo::default(), + Witness: WriteTo { + start_key: Key::ZERO, + values: vec![], + }, }, }; diff --git a/crates/compression/src/registry/db.rs b/crates/compression/src/registry/db.rs index dd543833595..43ada92bd72 100644 --- a/crates/compression/src/registry/db.rs +++ b/crates/compression/src/registry/db.rs @@ -3,18 +3,28 @@ use super::{ Table, }; +/// Get next key for the given table. This is where the next write should start at. +/// The result of this function is just a suggestion, and the caller may choose to +/// ignore it, although it's rare that they would know better. pub trait RegistrySelectNextKey { - fn next_key(&mut self) -> Key; + /// Get next set of start keys + fn next_key(&self) -> Key; } +/// Read a value from the registry by key pub trait RegistryRead { + /// Read a value from the registry by key fn read(&self, key: Key) -> T::Type; } +/// Write values to the registry pub trait RegistryWrite { + /// Write a continuous sequence of values to the registry fn batch_write(&mut self, start_key: Key, values: Vec); } +/// Lookup a key by value pub trait RegistryIndex { + /// Lookup a key by value fn index_lookup(&self, value: &T::Type) -> Option>; } diff --git a/crates/compression/src/registry/in_memory.rs b/crates/compression/src/registry/in_memory.rs index 5d642e9be76..62470090f2e 100644 --- a/crates/compression/src/registry/in_memory.rs +++ b/crates/compression/src/registry/in_memory.rs @@ -12,19 +12,21 @@ use super::{ pub struct InMemoryRegistry { next_keys: HashMap<&'static str, RawKey>, storage: HashMap<&'static str, HashMap>>, + index: HashMap<&'static str, HashMap, RawKey>>, } impl RegistrySelectNextKey for InMemoryRegistry { - fn next_key(&mut self) -> Key { - let next_key = self.next_keys.entry(T::NAME).or_default(); - let key = Key::::from_raw(*next_key); - *next_key = next_key.next(); - key + fn next_key(&self) -> Key { + Key::from_raw(self.next_keys.get(T::NAME).copied().unwrap_or(RawKey::ZERO)) } } impl RegistryRead for InMemoryRegistry { fn read(&self, key: Key) -> T::Type { + if key == Key::DEFAULT_VALUE { + return T::Type::default(); + } + self.storage .get(T::NAME) .and_then(|table| table.get(&key.raw())) @@ -35,24 +37,42 @@ impl RegistryRead for InMemoryRegistry { impl RegistryWrite for InMemoryRegistry { fn batch_write(&mut self, start_key: Key, values: Vec) { + let empty = values.is_empty(); + if !empty && start_key == Key::DEFAULT_VALUE { + panic!("Cannot write to the default value key"); + } let table = self.storage.entry(T::NAME).or_default(); let mut key = start_key.raw(); for value in values.into_iter() { - table.insert(key, postcard::to_stdvec(&value).unwrap()); + let value = postcard::to_stdvec(&value).unwrap(); + let mut prefix = value.clone(); + prefix.truncate(32); + self.index.entry(T::NAME).or_default().insert(prefix, key); + table.insert(key, value); key = key.next(); } + if !empty { + self.next_keys.insert(T::NAME, key); + } } } impl RegistryIndex for InMemoryRegistry { fn index_lookup(&self, value: &T::Type) -> Option> { + if *value == T::Type::default() { + return Some(Key::DEFAULT_VALUE); + } + let needle = postcard::to_stdvec(value).unwrap(); - let table = self.storage.get(T::NAME)?; - for (key, value) in table.iter() { - if value == &needle { - return Some(Key::from_raw(*key)); + let mut prefix = needle.clone(); + prefix.truncate(32); + if let Some(cand) = self.index.get(T::NAME)?.get(&prefix).copied() { + let cand_val = self.storage.get(T::NAME)?.get(&cand)?; + if *cand_val == needle { + return Some(Key::from_raw(cand)); } } + None } } @@ -69,10 +89,6 @@ mod tests { fn in_memory_registry_works() { let mut reg = InMemoryRegistry::default(); - let k1: Key = reg.next_key(); - let k2: Key = reg.next_key(); - assert_eq!(k1.next(), k2); - // Empty assert_eq!( reg.read(Key::::try_from(100).unwrap()), diff --git a/crates/compression/src/registry/key.rs b/crates/compression/src/registry/key.rs index 811ec5d5828..9e5545c8eff 100644 --- a/crates/compression/src/registry/key.rs +++ b/crates/compression/src/registry/key.rs @@ -1,3 +1,4 @@ +use core::fmt; use std::marker::PhantomData; use serde::{ @@ -8,34 +9,55 @@ use serde::{ use super::Table; /// Untyped key pointing to a registry table entry. -#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, Serialize, Deserialize)] +/// The last key (all bits set) is reserved for the default value and cannot be written to. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct RawKey([u8; Self::SIZE]); impl RawKey { pub const SIZE: usize = 3; - pub const MIN: Self = Self([0; Self::SIZE]); - pub const MAX: Self = Self([u8::MAX; Self::SIZE]); + pub const ZERO: Self = Self([0; Self::SIZE]); + pub const MAX_WRITABLE: Self = Self([u8::MAX; Self::SIZE]); + pub const DEFAULT_VALUE: Self = Self([u8::MAX; Self::SIZE]); pub fn as_u32(self) -> u32 { u32::from_be_bytes([0, self.0[0], self.0[1], self.0[2]]) } - /// Wraps around at limit + /// Wraps around just below max/default value. pub fn add_u32(self, rhs: u32) -> Self { let lhs = self.as_u32(); let v = lhs.wrapping_add(rhs); - let mut bytes = [0u8; 3]; - bytes.copy_from_slice(&v.to_be_bytes()[1..]); - RawKey(bytes) + let b = v.to_be_bytes(); + let raw = Self([b[1], b[2], b[3]]); + if raw == Self::DEFAULT_VALUE { + Self::ZERO + } else { + raw + } } - /// Wraps around at limit + /// Wraps around just below max/default value. pub fn next(self) -> Self { self.add_u32(1) } /// Is `self` between `start` and `end`? i.e. in the half-open logical range `start`..`end`, /// so that wrap-around cases are handled correctly. + /// + /// Panics if max/default value is used. pub fn is_between(self, start: Self, end: Self) -> bool { + assert!( + self != Self::DEFAULT_VALUE, + "Cannot use max/default value in is_between" + ); + assert!( + start != Self::DEFAULT_VALUE, + "Cannot use max/default value in is_between" + ); + assert!( + end != Self::DEFAULT_VALUE, + "Cannot use max/default value in is_between" + ); + let low = start.as_u32(); let high = end.as_u32(); let v = self.as_u32(); @@ -62,7 +84,8 @@ impl TryFrom for RawKey { } /// Typed key to a registry table entry. -#[derive(Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +/// The last key (all bits set) is reserved for the default value and cannot be written to. +#[derive(Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct Key(RawKey, PhantomData); impl Clone for Key { @@ -72,7 +95,19 @@ impl Clone for Key { } impl Copy for Key {} +impl PartialEq> for Key { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + impl Key { + /// This is the first writable key. + pub const ZERO: Self = Self(RawKey::ZERO, PhantomData); + + /// This key is reserved for the default value and cannot be written to. + pub const DEFAULT_VALUE: Self = Self(RawKey::DEFAULT_VALUE, PhantomData); + pub fn raw(&self) -> RawKey { self.0 } @@ -81,12 +116,12 @@ impl Key { Self(raw, PhantomData) } - /// Wraps around at limit + /// Wraps around at limit, i.e. one below the max/default value pub fn add_u32(self, rhs: u32) -> Self { Self(self.0.add_u32(rhs), PhantomData) } - /// Wraps around at limit + /// Wraps around at limit, i.e. one below the max/default value pub fn next(self) -> Self { Self(self.0.next(), PhantomData) } @@ -97,6 +132,7 @@ impl Key { self.0.is_between(start.0, end.0) } /// Increments the key by one, and returns the previous value. + /// Skips the max/default value. pub fn take_next(&mut self) -> Self { let result = *self; self.0 = self.0.next(); @@ -111,9 +147,13 @@ impl TryFrom for Key { } } -impl Default for Key { - fn default() -> Self { - Self(RawKey::default(), PhantomData) +impl fmt::Debug for Key { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if *self == Self::DEFAULT_VALUE { + write!(f, "Key<{}>::DEFAULT_VALUE", T::NAME) + } else { + write!(f, "Key<{}>({})", T::NAME, self.0.as_u32()) + } } } @@ -123,11 +163,11 @@ mod tests { #[test] fn key_next() { - assert_eq!(RawKey::default().next(), RawKey([0, 0, 1])); - assert_eq!(RawKey::MIN.next().next(), RawKey([0, 0, 2])); + assert_eq!(RawKey::ZERO.next(), RawKey([0, 0, 1])); + assert_eq!(RawKey::ZERO.next().next(), RawKey([0, 0, 2])); assert_eq!(RawKey([0, 0, 255]).next(), RawKey([0, 1, 0])); assert_eq!(RawKey([0, 1, 255]).next(), RawKey([0, 2, 0])); assert_eq!(RawKey([0, 255, 255]).next(), RawKey([1, 0, 0])); - assert_eq!(RawKey::MAX.next(), RawKey::MIN); + assert_eq!(RawKey::MAX_WRITABLE.next(), RawKey::ZERO); } } diff --git a/crates/compression/src/registry/mod.rs b/crates/compression/src/registry/mod.rs index 5e8cf38aa20..4a87d462bf8 100644 --- a/crates/compression/src/registry/mod.rs +++ b/crates/compression/src/registry/mod.rs @@ -17,7 +17,7 @@ mod _private { pub trait Table: _private::Seal { const NAME: &'static str; - type Type: Default + Serialize + for<'de> Deserialize<'de>; + type Type: PartialEq + Default + Serialize + for<'de> Deserialize<'de>; } pub mod access { @@ -55,10 +55,22 @@ macro_rules! tables { /// One counter per table #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] #[allow(non_snake_case)] // The field names match table type names eactly + #[non_exhaustive] pub struct CountPerTable { $(pub $name: usize),* } + impl CountPerTable {$( + /// Custom constructor per table + #[allow(non_snake_case)] // The field names match table type names eactly + pub fn $name(value: usize) -> Self { + Self { + $name: value, + ..Self::default() + } + } + )*} + $( impl access::AccessCopy for CountPerTable { fn value(&self) -> usize { @@ -84,12 +96,21 @@ macro_rules! tables { } /// One key value per table - #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] + #[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)] #[allow(non_snake_case)] // The field names match table type names eactly + #[non_exhaustive] pub struct KeyPerTable { $(pub $name: Key),* } + impl Default for KeyPerTable { + fn default() -> Self { + Self { + $($name: Key::ZERO,)* + } + } + } + $( impl access::AccessCopy> for KeyPerTable { fn value(&self) -> Key { @@ -126,8 +147,9 @@ macro_rules! tables { } /// Registeration changes per table - #[derive(Debug, Clone, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] + #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] #[allow(non_snake_case)] // The field names match table type names eactly + #[non_exhaustive] pub struct ChangesPerTable { $(pub $name: WriteTo),* } @@ -137,8 +159,17 @@ macro_rules! tables { true $(&& self.$name.values.is_empty())* } + pub fn from_start_keys(start_keys: KeyPerTable) -> Self { + Self { + $($name: WriteTo { + start_key: start_keys.$name, + values: Vec::new(), + }),* + } + } + /// Apply changes to the registry db - pub fn apply_to_registry(self, reg: &mut R) { + pub fn apply_to_registry(&self, reg: &mut R) { $( reg.batch_write(self.$name.start_key, self.$name.values.clone()); )* @@ -228,28 +259,28 @@ mod tests { // Wrapping reg.batch_write( - Key::::from_raw(RawKey::MAX), + Key::::from_raw(RawKey::MAX_WRITABLE), vec![[3; 32], [4; 32]], ); assert_eq!( - reg.read(Key::::from_raw(RawKey::MAX)), + reg.read(Key::::from_raw(RawKey::MAX_WRITABLE)), [3; 32] ); assert_eq!( - reg.read(Key::::from_raw(RawKey::MIN)), + reg.read(Key::::from_raw(RawKey::ZERO)), [4; 32] ); assert_eq!( reg.index_lookup(&*AssetId::from([3; 32])), - Some(Key::::from_raw(RawKey::MAX)) + Some(Key::::from_raw(RawKey::MAX_WRITABLE)) ); assert_eq!( reg.index_lookup(&*AssetId::from([4; 32])), - Some(Key::::from_raw(RawKey::MIN)) + Some(Key::::from_raw(RawKey::ZERO)) ); } } From 1919aa804c7ad7a3920ddfc8c710729dd2641084 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 31 Jan 2024 18:49:59 +0200 Subject: [PATCH 016/112] Combine db traits together --- crates/compression-alt/src/block.rs | 195 ----------------- crates/compression-alt/src/header.rs | 21 -- crates/compression-alt/src/lib.rs | 212 ++++++++++++++++++- crates/compression-derive/src/attribute.rs | 23 -- crates/compression-derive/src/compact.rs | 42 ++-- crates/compression/src/compression.rs | 81 ++----- crates/compression/src/lib.rs | 2 +- crates/compression/src/registry/db.rs | 18 +- crates/compression/src/registry/in_memory.rs | 8 +- crates/compression/src/registry/mod.rs | 15 +- 10 files changed, 256 insertions(+), 361 deletions(-) delete mode 100644 crates/compression-alt/src/block.rs delete mode 100644 crates/compression-alt/src/header.rs diff --git a/crates/compression-alt/src/block.rs b/crates/compression-alt/src/block.rs deleted file mode 100644 index adc51ec61fa..00000000000 --- a/crates/compression-alt/src/block.rs +++ /dev/null @@ -1,195 +0,0 @@ -use serde::{ - Deserialize, - Serialize, -}; - -use fuel_core_compression::ChangesPerTable; -use fuel_core_types::fuel_tx::CompactTransaction; - -use crate::header::Header; - -/// Compressed block. -/// The versioning here working depends on the serialization format, -/// but as long as we we have less than 128 variants, postcard will -/// make that a single byte. -#[derive(Clone, Serialize, Deserialize)] -#[non_exhaustive] -pub enum CompressedBlock { - V0 { - /// Registration section of the compressed block - registrations: ChangesPerTable, - /// Compressed block header - header: Header, - /// Compressed transactions - transactions: Vec, - }, -} - -#[cfg(test)] -mod tests { - use std::array; - - use fuel_core_compression::{ - Compact, - Compactable, - CompactionContext, - InMemoryRegistry, - }; - use fuel_core_types::{ - blockchain::primitives::DaBlockHeight, - fuel_tx::Transaction, - tai64::Tai64, - }; - - use super::*; - - #[test] - fn postcard_roundtrip() { - let original = CompressedBlock::V0 { - registrations: ChangesPerTable::from_start_keys(Default::default()), - header: Header { - da_height: DaBlockHeight::default(), - prev_root: Default::default(), - height: 3u32.into(), - time: Tai64::UNIX_EPOCH, - }, - transactions: vec![], - }; - - let compressed = postcard::to_allocvec(&original).unwrap(); - let decompressed: CompressedBlock = postcard::from_bytes(&compressed).unwrap(); - - let CompressedBlock::V0 { - registrations, - header, - transactions, - } = decompressed; - - assert!(registrations.is_empty()); - assert_eq!(header.height, 3u32.into()); - assert!(transactions.is_empty()); - } - - #[test] - fn compact_transaction() { - let tx = Transaction::default_test_tx(); - let mut registry = InMemoryRegistry::default(); - let (compacted, _) = CompactionContext::run(&mut registry, tx.clone()); - let decompacted = Transaction::decompact(compacted.clone(), ®istry); - assert_eq!(tx, decompacted); - - // Check size reduction - let compressed_original = postcard::to_allocvec(&tx).unwrap(); - let compressed_compact = postcard::to_allocvec(&compacted).unwrap(); - assert!(compressed_compact.len() < compressed_original.len() / 2); // Arbitrary threshold - } - - #[test] - fn compact_transaction_twice_gives_equal_result() { - let tx = Transaction::default_test_tx(); - let mut registry = InMemoryRegistry::default(); - let (compacted1, changes1) = CompactionContext::run(&mut registry, tx.clone()); - let (compacted2, changes2) = CompactionContext::run(&mut registry, tx.clone()); - assert!(!changes1.is_empty()); - assert!(changes2.is_empty()); - let compressed1 = postcard::to_allocvec(&compacted1).unwrap(); - let compressed2 = postcard::to_allocvec(&compacted2).unwrap(); - assert_eq!(compressed1, compressed2); - } - - #[test] - fn sizes_of_repeated_tx_make_sense() { - let tx = Transaction::default_test_tx(); - - let sizes: [usize; 4] = array::from_fn(|i| { - // Registry recreated for each block in this test - let mut registry = InMemoryRegistry::default(); - - let (transactions, registrations) = - CompactionContext::run(&mut registry, vec![tx.clone(); i]); - - let original = CompressedBlock::V0 { - registrations, - header: Header { - da_height: DaBlockHeight::default(), - prev_root: Default::default(), - height: 3u32.into(), - time: Tai64::UNIX_EPOCH, - }, - transactions, - }; - - let compressed = postcard::to_allocvec(&original).unwrap(); - compressed.len() - }); - - assert!( - sizes.windows(2).all(|w| w[0] < w[1]), - "Sizes should be in strictly ascending order" - ); - let deltas: Vec<_> = sizes.windows(2).map(|w| w[1] - w[0]).collect(); - assert!(deltas[0] > deltas[1], "Initial delta should be larger"); - assert!(deltas[1] == deltas[2], "Later delta should be constant"); - } - - #[test] - fn same_compact_tx_is_smaller_in_next_block() { - let tx = Transaction::default_test_tx(); - - let mut registry = InMemoryRegistry::default(); - - let sizes: [usize; 3] = array::from_fn(|_| { - let (transactions, registrations) = - CompactionContext::run(&mut registry, vec![tx.clone()]); - - let original = CompressedBlock::V0 { - registrations, - header: Header { - da_height: DaBlockHeight::default(), - prev_root: Default::default(), - height: 3u32.into(), - time: Tai64::UNIX_EPOCH, - }, - transactions, - }; - - let compressed = postcard::to_allocvec(&original).unwrap(); - compressed.len() - }); - - assert!(sizes[0] > sizes[1], "Size must decrease after first block"); - assert!( - sizes[1] == sizes[2], - "Size must be constant after first block" - ); - } - - #[test] - #[ignore = "This test is slow"] - fn compact_registry_key_wraparound() { - use fuel_core_types::fuel_types::AssetId; - - #[derive(Debug, Clone, Copy, PartialEq, Compact)] - struct Example { - #[da_compress(registry = "AssetId")] - a: AssetId, - } - - let mut registry = InMemoryRegistry::default(); - for i in 0u32..((1 << 24) + 100) { - if i % 10000 == 0 { - println!("i = {} ({})", i, (i as f32) / (1 << 24) as f32); - } - let mut bytes = [0x00; 32]; - bytes[..4].copy_from_slice(&i.to_be_bytes()); - let target = Example { - a: AssetId::from(bytes), - }; - let (compact, _) = CompactionContext::run(&mut registry, target); - if i % 99 == 0 { - let decompact = Example::decompact(compact, ®istry); - assert_eq!(decompact, target); - } - } - } -} diff --git a/crates/compression-alt/src/header.rs b/crates/compression-alt/src/header.rs deleted file mode 100644 index 7d2ba84002c..00000000000 --- a/crates/compression-alt/src/header.rs +++ /dev/null @@ -1,21 +0,0 @@ -use fuel_core_types::{ - blockchain::primitives::DaBlockHeight, - fuel_types::{ - BlockHeight, - Bytes32, - }, - tai64::Tai64, -}; - -use serde::{ - Deserialize, - Serialize, -}; - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct Header { - pub da_height: DaBlockHeight, - pub prev_root: Bytes32, - pub height: BlockHeight, - pub time: Tai64, -} diff --git a/crates/compression-alt/src/lib.rs b/crates/compression-alt/src/lib.rs index 3f24bea4e69..3e0682bded1 100644 --- a/crates/compression-alt/src/lib.rs +++ b/crates/compression-alt/src/lib.rs @@ -1,2 +1,210 @@ -pub mod block; -pub mod header; +use serde::{ + Deserialize, + Serialize, +}; + +use fuel_core_compression::ChangesPerTable; +use fuel_core_types::fuel_tx::CompactTransaction; + +use fuel_core_types::{ + blockchain::primitives::DaBlockHeight, + fuel_types::{ + BlockHeight, + Bytes32, + }, + tai64::Tai64, +}; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Header { + pub da_height: DaBlockHeight, + pub prev_root: Bytes32, + pub height: BlockHeight, + pub time: Tai64, +} + +/// Compressed block. +/// The versioning here working depends on the serialization format, +/// but as long as we we have less than 128 variants, postcard will +/// make that a single byte. +#[derive(Clone, Serialize, Deserialize)] +#[non_exhaustive] +pub enum CompressedBlock { + V0 { + /// Registration section of the compressed block + registrations: ChangesPerTable, + /// Compressed block header + header: Header, + /// Compressed transactions + transactions: Vec, + }, +} + +#[cfg(test)] +mod tests { + use std::array; + + use fuel_core_compression::{ + Compact, + Compactable, + CompactionContext, + InMemoryRegistry, + }; + use fuel_core_types::{ + blockchain::primitives::DaBlockHeight, + fuel_tx::Transaction, + tai64::Tai64, + }; + + use super::*; + + #[test] + fn postcard_roundtrip() { + let original = CompressedBlock::V0 { + registrations: ChangesPerTable::from_start_keys(Default::default()), + header: Header { + da_height: DaBlockHeight::default(), + prev_root: Default::default(), + height: 3u32.into(), + time: Tai64::UNIX_EPOCH, + }, + transactions: vec![], + }; + + let compressed = postcard::to_allocvec(&original).unwrap(); + let decompressed: CompressedBlock = postcard::from_bytes(&compressed).unwrap(); + + let CompressedBlock::V0 { + registrations, + header, + transactions, + } = decompressed; + + assert!(registrations.is_empty()); + assert_eq!(header.height, 3u32.into()); + assert!(transactions.is_empty()); + } + + #[test] + fn compact_transaction() { + let tx = Transaction::default_test_tx(); + let mut registry = InMemoryRegistry::default(); + let (compacted, _) = CompactionContext::run(&mut registry, tx.clone()); + let decompacted = Transaction::decompact(compacted.clone(), ®istry); + assert_eq!(tx, decompacted); + + // Check size reduction + let compressed_original = postcard::to_allocvec(&tx).unwrap(); + let compressed_compact = postcard::to_allocvec(&compacted).unwrap(); + assert!(compressed_compact.len() < compressed_original.len() / 2); // Arbitrary threshold + } + + #[test] + fn compact_transaction_twice_gives_equal_result() { + let tx = Transaction::default_test_tx(); + let mut registry = InMemoryRegistry::default(); + let (compacted1, changes1) = CompactionContext::run(&mut registry, tx.clone()); + let (compacted2, changes2) = CompactionContext::run(&mut registry, tx.clone()); + assert!(!changes1.is_empty()); + assert!(changes2.is_empty()); + let compressed1 = postcard::to_allocvec(&compacted1).unwrap(); + let compressed2 = postcard::to_allocvec(&compacted2).unwrap(); + assert_eq!(compressed1, compressed2); + } + + #[test] + fn sizes_of_repeated_tx_make_sense() { + let tx = Transaction::default_test_tx(); + + let sizes: [usize; 4] = array::from_fn(|i| { + // Registry recreated for each block in this test + let mut registry = InMemoryRegistry::default(); + + let (transactions, registrations) = + CompactionContext::run(&mut registry, vec![tx.clone(); i]); + + let original = CompressedBlock::V0 { + registrations, + header: Header { + da_height: DaBlockHeight::default(), + prev_root: Default::default(), + height: 3u32.into(), + time: Tai64::UNIX_EPOCH, + }, + transactions, + }; + + let compressed = postcard::to_allocvec(&original).unwrap(); + compressed.len() + }); + + assert!( + sizes.windows(2).all(|w| w[0] < w[1]), + "Sizes should be in strictly ascending order" + ); + let deltas: Vec<_> = sizes.windows(2).map(|w| w[1] - w[0]).collect(); + assert!(deltas[0] > deltas[1], "Initial delta should be larger"); + assert!(deltas[1] == deltas[2], "Later delta should be constant"); + } + + #[test] + fn same_compact_tx_is_smaller_in_next_block() { + let tx = Transaction::default_test_tx(); + + let mut registry = InMemoryRegistry::default(); + + let sizes: [usize; 3] = array::from_fn(|_| { + let (transactions, registrations) = + CompactionContext::run(&mut registry, vec![tx.clone()]); + + let original = CompressedBlock::V0 { + registrations, + header: Header { + da_height: DaBlockHeight::default(), + prev_root: Default::default(), + height: 3u32.into(), + time: Tai64::UNIX_EPOCH, + }, + transactions, + }; + + let compressed = postcard::to_allocvec(&original).unwrap(); + compressed.len() + }); + + assert!(sizes[0] > sizes[1], "Size must decrease after first block"); + assert!( + sizes[1] == sizes[2], + "Size must be constant after first block" + ); + } + + #[test] + #[ignore = "This test is slow"] + fn compact_registry_key_wraparound() { + use fuel_core_types::fuel_types::AssetId; + + #[derive(Debug, Clone, Copy, PartialEq, Compact)] + struct Example { + #[da_compress(registry = "AssetId")] + a: AssetId, + } + + let mut registry = InMemoryRegistry::default(); + for i in 0u32..((1 << 24) + 100) { + if i % 10000 == 0 { + println!("i = {} ({})", i, (i as f32) / (1 << 24) as f32); + } + let mut bytes = [0x00; 32]; + bytes[..4].copy_from_slice(&i.to_be_bytes()); + let target = Example { + a: AssetId::from(bytes), + }; + let (compact, _) = CompactionContext::run(&mut registry, target); + if i % 99 == 0 { + let decompact = Example::decompact(compact, ®istry); + assert_eq!(decompact, target); + } + } + } +} diff --git a/crates/compression-derive/src/attribute.rs b/crates/compression-derive/src/attribute.rs index a3e8cd9bde1..4c0e7122e45 100644 --- a/crates/compression-derive/src/attribute.rs +++ b/crates/compression-derive/src/attribute.rs @@ -2,29 +2,6 @@ use regex::Regex; const ATTR: &str = "da_compress"; -/// struct/enum attributes -pub enum StructureAttrs { - /// Compacted recursively. - Normal, -} -impl StructureAttrs { - pub fn parse(attrs: &[syn::Attribute]) -> Self { - for attr in attrs { - if attr.style != syn::AttrStyle::Outer { - continue; - } - - if let syn::Meta::List(ml) = &attr.meta { - if ml.path.segments.len() == 1 && ml.path.segments[0].ident == ATTR { - panic!("Invalid attribute: {}", ml.tokens); - } - } - } - - Self::Normal - } -} - /// Field attributes pub enum FieldAttrs { /// Skipped when compacting, and must be reconstructed when decompacting. diff --git a/crates/compression-derive/src/compact.rs b/crates/compression-derive/src/compact.rs index 2ecb2dd4465..0ae346d55ad 100644 --- a/crates/compression-derive/src/compact.rs +++ b/crates/compression-derive/src/compact.rs @@ -1,15 +1,10 @@ -use core::panic; - use proc_macro2::TokenStream as TokenStream2; use quote::{ format_ident, quote, }; -use crate::attribute::{ - FieldAttrs, - StructureAttrs, -}; +use crate::attribute::FieldAttrs; /// Map field definitions to compacted field definitions. fn field_defs(fields: &syn::Fields) -> TokenStream2 { @@ -269,7 +264,7 @@ fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { }; let impls = s.gen_impl(quote! { - use ::fuel_core_compression::{db, tables, Table, Key, Compactable, CountPerTable, CompactionContext}; + use ::fuel_core_compression::{RegistryDb, tables, Table, Key, Compactable, CountPerTable, CompactionContext}; gen impl Compactable for @Self { @@ -279,15 +274,11 @@ fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { match self { #count_per_variant } } - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex { + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact { match self { #compact_per_variant } } - fn decompact(compact: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead { + fn decompact(compact: Self::Compact, reg: &R) -> Self { match compact { #decompact_per_variant } } } @@ -340,7 +331,7 @@ fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { }); let impls = s.gen_impl(quote! { - use ::fuel_core_compression::{db, tables, Table, Key, Compactable, CountPerTable, CompactionContext}; + use ::fuel_core_compression::{RegistryDb, tables, Table, Key, Compactable, CountPerTable, CompactionContext}; gen impl Compactable for @Self { type Compact = #compact_name; @@ -349,15 +340,11 @@ fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { match self { #count_per_variant } } - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex { + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact { match self { #construct_per_variant } } - fn decompact(compact: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead { + fn decompact(compact: Self::Compact, reg: &R) -> Self { match compact { #decompact_per_variant } } } @@ -375,14 +362,9 @@ pub fn compact_derive(mut s: synstructure::Structure) -> TokenStream2 { let name = s.ast().ident.to_string(); - let ts = match StructureAttrs::parse(&s.ast().attrs) { - StructureAttrs::Normal => match s.ast().data { - syn::Data::Struct(_) => serialize_struct(&s), - syn::Data::Enum(_) => serialize_enum(&s), - _ => panic!("Can't derive `Compact` for `union`s"), - }, - }; - // println!("{}", ts); - let _ = std::fs::write(format!("/tmp/derive/{name}.rs"), ts.to_string()); - ts + match s.ast().data { + syn::Data::Struct(_) => serialize_struct(&s), + syn::Data::Enum(_) => serialize_enum(&s), + _ => panic!("Can't derive `Compact` for `union`s"), + } } diff --git a/crates/compression/src/compression.rs b/crates/compression/src/compression.rs index 483b16794c3..aa40e89c2ee 100644 --- a/crates/compression/src/compression.rs +++ b/crates/compression/src/compression.rs @@ -13,11 +13,11 @@ use crate::{ }, add_keys, block_section::WriteTo, - db, next_keys, ChangesPerTable, CountPerTable, KeyPerTable, + RegistryDb, Table, }, Key, @@ -37,13 +37,7 @@ pub struct CompactionContext<'a, R> { safe_keys_start: KeyPerTable, changes: ChangesPerTable, } -impl<'a, R> CompactionContext<'a, R> -where - R: db::RegistrySelectNextKey - + db::RegistryRead - + db::RegistryIndex - + db::RegistryWrite, -{ +impl<'a, R: RegistryDb> CompactionContext<'a, R> { /// Run the compaction for the given target, returning the compacted data. /// Changes are applied to the registry, and then returned as well. pub fn run( @@ -69,10 +63,7 @@ where } } -impl<'a, R> CompactionContext<'a, R> -where - R: db::RegistryRead + db::RegistryIndex, -{ +impl<'a, R: RegistryDb> CompactionContext<'a, R> { /// Convert a value to a key /// If necessary, store the value in the changeset and allocate a new key. pub fn to_key(&mut self, value: T::Type) -> Key @@ -116,13 +107,9 @@ pub trait Compactable { /// Count max number of each key type, for upper limit of overwritten keys fn count(&self) -> CountPerTable; - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex; + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact; - fn decompact(compact: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead; + fn decompact(compact: Self::Compact, reg: &R) -> Self; } macro_rules! identity_compaction { @@ -134,17 +121,14 @@ macro_rules! identity_compaction { CountPerTable::default() } - fn compact(&self, _ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, - { + fn compact( + &self, + _ctx: &mut CompactionContext, + ) -> Self::Compact { *self } - fn decompact(compact: Self::Compact, _reg: &R) -> Self - where - R: db::RegistryRead, - { + fn decompact(compact: Self::Compact, _reg: &R) -> Self { compact } } @@ -176,17 +160,11 @@ where count } - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, - { + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact { ArrayWrapper(self.clone().map(|item| item.compact(ctx))) } - fn decompact(compact: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead, - { + fn decompact(compact: Self::Compact, reg: &R) -> Self { compact.0.map(|item| T::decompact(item, reg)) } } @@ -205,17 +183,11 @@ where count } - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, - { + fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact { self.iter().map(|item| item.compact(ctx)).collect() } - fn decompact(compact: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead, - { + fn decompact(compact: Self::Compact, reg: &R) -> Self { compact .into_iter() .map(|item| T::decompact(item, reg)) @@ -230,17 +202,11 @@ impl Compactable for PhantomData { CountPerTable::default() } - fn compact(&self, _ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, - { + fn compact(&self, _ctx: &mut CompactionContext) -> Self::Compact { () } - fn decompact(_compact: Self::Compact, _reg: &R) -> Self - where - R: db::RegistryRead, - { + fn decompact(_compact: Self::Compact, _reg: &R) -> Self { Self } } @@ -249,12 +215,12 @@ impl Compactable for PhantomData { mod tests { use crate::{ registry::{ - db, in_memory::InMemoryRegistry, tables, CountPerTable, }, Key, + RegistryDb, }; use fuel_core_compression::Compactable as _; // Hack for derive use fuel_core_compression_derive::Compact; @@ -296,19 +262,16 @@ mod tests { } } - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact - where - R: db::RegistryRead + db::RegistryWrite + db::RegistryIndex, - { + fn compact( + &self, + ctx: &mut CompactionContext, + ) -> Self::Compact { let a = ctx.to_key::(*self.a); let b = ctx.to_key::(*self.b); ManualExampleCompact { a, b, c: self.c } } - fn decompact(compact: Self::Compact, reg: &R) -> Self - where - R: db::RegistryRead, - { + fn decompact(compact: Self::Compact, reg: &R) -> Self { let a = Address::from(reg.read::(compact.a)); let b = Address::from(reg.read::(compact.b)); Self { a, b, c: compact.c } diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index b9b0c16d970..56b9606112c 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -6,11 +6,11 @@ pub use compression::{ CompactionContext, }; pub use registry::{ - db, tables, ChangesPerTable, CountPerTable, Key, + RegistryDb, Table, }; diff --git a/crates/compression/src/registry/db.rs b/crates/compression/src/registry/db.rs index 43ada92bd72..40c729c31e4 100644 --- a/crates/compression/src/registry/db.rs +++ b/crates/compression/src/registry/db.rs @@ -3,28 +3,18 @@ use super::{ Table, }; -/// Get next key for the given table. This is where the next write should start at. -/// The result of this function is just a suggestion, and the caller may choose to -/// ignore it, although it's rare that they would know better. -pub trait RegistrySelectNextKey { - /// Get next set of start keys +pub trait RegistryDb { + /// Get next key for the given table. This is where the next write should start at. + /// The result of this function is just a suggestion, and the caller may choose to + /// ignore it, although it's rare that they would know better. fn next_key(&self) -> Key; -} -/// Read a value from the registry by key -pub trait RegistryRead { /// Read a value from the registry by key fn read(&self, key: Key) -> T::Type; -} -/// Write values to the registry -pub trait RegistryWrite { /// Write a continuous sequence of values to the registry fn batch_write(&mut self, start_key: Key, values: Vec); -} -/// Lookup a key by value -pub trait RegistryIndex { /// Lookup a key by value fn index_lookup(&self, value: &T::Type) -> Option>; } diff --git a/crates/compression/src/registry/in_memory.rs b/crates/compression/src/registry/in_memory.rs index 62470090f2e..f55b001597b 100644 --- a/crates/compression/src/registry/in_memory.rs +++ b/crates/compression/src/registry/in_memory.rs @@ -15,13 +15,11 @@ pub struct InMemoryRegistry { index: HashMap<&'static str, HashMap, RawKey>>, } -impl RegistrySelectNextKey for InMemoryRegistry { +impl RegistryDb for InMemoryRegistry { fn next_key(&self) -> Key { Key::from_raw(self.next_keys.get(T::NAME).copied().unwrap_or(RawKey::ZERO)) } -} -impl RegistryRead for InMemoryRegistry { fn read(&self, key: Key) -> T::Type { if key == Key::DEFAULT_VALUE { return T::Type::default(); @@ -33,9 +31,7 @@ impl RegistryRead for InMemoryRegistry { .map(|bytes| postcard::from_bytes(bytes).expect("Invalid value in registry")) .unwrap_or_default() } -} -impl RegistryWrite for InMemoryRegistry { fn batch_write(&mut self, start_key: Key, values: Vec) { let empty = values.is_empty(); if !empty && start_key == Key::DEFAULT_VALUE { @@ -55,9 +51,7 @@ impl RegistryWrite for InMemoryRegistry { self.next_keys.insert(T::NAME, key); } } -} -impl RegistryIndex for InMemoryRegistry { fn index_lookup(&self, value: &T::Type) -> Option> { if *value == T::Type::default() { return Some(Key::DEFAULT_VALUE); diff --git a/crates/compression/src/registry/mod.rs b/crates/compression/src/registry/mod.rs index 4a87d462bf8..9bbd08bef24 100644 --- a/crates/compression/src/registry/mod.rs +++ b/crates/compression/src/registry/mod.rs @@ -9,7 +9,10 @@ pub(crate) mod in_memory; mod key; use self::block_section::WriteTo; -pub use self::key::Key; +pub use self::{ + db::RegistryDb, + key::Key, +}; mod _private { pub trait Seal {} @@ -129,7 +132,7 @@ macro_rules! tables { } )* - pub fn next_keys(reg: &mut R) -> KeyPerTable { + pub fn next_keys(reg: &mut R) -> KeyPerTable { KeyPerTable { $( $name: reg.next_key(), )* } @@ -169,7 +172,7 @@ macro_rules! tables { } /// Apply changes to the registry db - pub fn apply_to_registry(&self, reg: &mut R) { + pub fn apply_to_registry(&self, reg: &mut R) { $( reg.batch_write(self.$name.start_key, self.$name.values.clone()); )* @@ -205,12 +208,6 @@ mod tests { use super::*; - use super::db::{ - RegistryIndex as _, - RegistryRead as _, - RegistryWrite as _, - }; - #[test] fn test_in_memory_db() { let mut reg = in_memory::InMemoryRegistry::default(); From ec4f257b0bc6c62ae1937b284b3c91270fce1b95 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 1 Feb 2024 10:56:22 +0200 Subject: [PATCH 017/112] Simplify the derive code --- crates/compression-derive/src/compact.rs | 145 +++++++++-------------- 1 file changed, 53 insertions(+), 92 deletions(-) diff --git a/crates/compression-derive/src/compact.rs b/crates/compression-derive/src/compact.rs index 0ae346d55ad..39ab4c8dce5 100644 --- a/crates/compression-derive/src/compact.rs +++ b/crates/compression-derive/src/compact.rs @@ -232,109 +232,80 @@ fn each_variant_compact) -> TokenStream2 .collect() } -fn serialize_struct(s: &synstructure::Structure) -> TokenStream2 { - assert_eq!(s.variants().len(), 1, "structs must have one variant"); - let variant: &synstructure::VariantInfo = &s.variants()[0]; +/// Derives `Compact` trait for the given `struct` or `enum`. +pub fn compact_derive(mut s: synstructure::Structure) -> TokenStream2 { + s.add_bounds(synstructure::AddBounds::None) + .underscore_const(true); let name = &s.ast().ident; let compact_name = format_ident!("Compact{}", name); - let defs = field_defs(&variant.ast().fields); - let count_per_variant = s.each_variant(|variant| sum_counts(variant)); - let compact_per_variant = - s.each_variant(|variant| construct_compact("e! {#compact_name}, variant)); - - let decompact_per_variant = - each_variant_compact(s, "e! {#compact_name}, |variant| { - construct_decompact("e! {#name}, variant) - }); - - let semi = match variant.ast().fields { - syn::Fields::Named(_) => quote! {}, - syn::Fields::Unnamed(_) => quote! {;}, - syn::Fields::Unit => quote! {;}, - }; - let g = s.ast().generics.clone(); let w = g.where_clause.clone(); - let compact = quote! { - #[derive(Clone, serde::Serialize, serde::Deserialize)] - #[doc = concat!("Compacted version of `", stringify!(#name), "`.")] - pub struct #compact_name #g #w #defs #semi - }; - - let impls = s.gen_impl(quote! { - use ::fuel_core_compression::{RegistryDb, tables, Table, Key, Compactable, CountPerTable, CompactionContext}; - - gen impl Compactable for @Self { - - type Compact = #compact_name #g; - - fn count(&self) -> CountPerTable { - match self { #count_per_variant } - } - - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact { - match self { #compact_per_variant } - } - - fn decompact(compact: Self::Compact, reg: &R) -> Self { - match compact { #decompact_per_variant } + let def = match &s.ast().data { + syn::Data::Struct(v) => { + let variant: &synstructure::VariantInfo = &s.variants()[0]; + let defs = field_defs(&variant.ast().fields); + let semi = match v.fields { + syn::Fields::Named(_) => quote! {}, + syn::Fields::Unnamed(_) => quote! {;}, + syn::Fields::Unit => quote! {;}, + }; + quote! { + #[derive(Clone, serde::Serialize, serde::Deserialize)] + #[doc = concat!("Compacted version of `", stringify!(#name), "`.")] + pub struct #compact_name #g #w #defs #semi } } - }); - - quote! { - #compact - #impls - } -} - -fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { - assert!(!s.variants().is_empty(), "empty enums are not supported"); - - let name = &s.ast().ident; - let compact_name = format_ident!("Compact{}", name); + syn::Data::Enum(_) => { + let variant_defs: TokenStream2 = s + .variants() + .iter() + .map(|variant| { + let vname = variant.ast().ident.clone(); + let defs = field_defs(&variant.ast().fields); + quote! { + #vname #defs, + } + }) + .collect(); - let variant_defs: TokenStream2 = s - .variants() - .iter() - .map(|variant| { - let vname = variant.ast().ident.clone(); - let defs = field_defs(&variant.ast().fields); quote! { - #vname #defs, + #[derive(Clone, serde::Serialize, serde::Deserialize)] + #[doc = concat!("Compacted version of `", stringify!(#name), "`.")] + pub enum #compact_name #g #w { #variant_defs } } - }) - .collect(); - let enumdef = quote! { - #[derive(Clone, serde::Serialize, serde::Deserialize)] - #[doc = concat!("Compacted version of `", stringify!(#name), "`.")] - pub enum #compact_name { #variant_defs } + } + syn::Data::Union(_) => panic!("unions are not supported"), }; let count_per_variant = s.each_variant(|variant| sum_counts(variant)); let construct_per_variant = s.each_variant(|variant| { let vname = variant.ast().ident.clone(); - construct_compact( - "e! { - #compact_name :: #vname - }, - variant, - ) + let construct = match &s.ast().data { + syn::Data::Struct(_) => quote! { #compact_name }, + syn::Data::Enum(_) => quote! {#compact_name :: #vname }, + syn::Data::Union(_) => unreachable!(), + }; + construct_compact(&construct, variant) }); let decompact_per_variant = - each_variant_compact(s, "e! {#compact_name}, |variant| { + each_variant_compact(&s, "e! {#compact_name}, |variant| { let vname = variant.ast().ident.clone(); - construct_decompact("e! { #name :: #vname }, variant) + let construct = match &s.ast().data { + syn::Data::Struct(_) => quote! { #name }, + syn::Data::Enum(_) => quote! {#name :: #vname }, + syn::Data::Union(_) => unreachable!(), + }; + construct_decompact(&construct, variant) }); let impls = s.gen_impl(quote! { use ::fuel_core_compression::{RegistryDb, tables, Table, Key, Compactable, CountPerTable, CompactionContext}; gen impl Compactable for @Self { - type Compact = #compact_name; + type Compact = #compact_name #g; fn count(&self) -> CountPerTable { match self { #count_per_variant } @@ -349,22 +320,12 @@ fn serialize_enum(s: &synstructure::Structure) -> TokenStream2 { } } }); - quote! { - #enumdef + let rs = quote! { + #def #impls - } -} - -/// Derives `Compact` trait for the given `struct` or `enum`. -pub fn compact_derive(mut s: synstructure::Structure) -> TokenStream2 { - s.add_bounds(synstructure::AddBounds::None) - .underscore_const(true); + }; - let name = s.ast().ident.to_string(); + let _ = std::fs::write(format!("/tmp/derive/{}.rs", name), &rs.to_string()); - match s.ast().data { - syn::Data::Struct(_) => serialize_struct(&s), - syn::Data::Enum(_) => serialize_enum(&s), - _ => panic!("Can't derive `Compact` for `union`s"), - } + rs } From 08616b37cdcfe38cb9ba7c348350932d73856b8a Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 1 Feb 2024 10:56:41 +0200 Subject: [PATCH 018/112] Fix key add bug --- crates/compression/src/registry/key.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/crates/compression/src/registry/key.rs b/crates/compression/src/registry/key.rs index 9e5545c8eff..f08fbcebc46 100644 --- a/crates/compression/src/registry/key.rs +++ b/crates/compression/src/registry/key.rs @@ -15,7 +15,7 @@ pub struct RawKey([u8; Self::SIZE]); impl RawKey { pub const SIZE: usize = 3; pub const ZERO: Self = Self([0; Self::SIZE]); - pub const MAX_WRITABLE: Self = Self([u8::MAX; Self::SIZE]); + pub const MAX_WRITABLE: Self = Self([u8::MAX, u8::MAX, u8::MAX - 1]); pub const DEFAULT_VALUE: Self = Self([u8::MAX; Self::SIZE]); pub fn as_u32(self) -> u32 { @@ -24,15 +24,14 @@ impl RawKey { /// Wraps around just below max/default value. pub fn add_u32(self, rhs: u32) -> Self { - let lhs = self.as_u32(); - let v = lhs.wrapping_add(rhs); - let b = v.to_be_bytes(); - let raw = Self([b[1], b[2], b[3]]); - if raw == Self::DEFAULT_VALUE { - Self::ZERO - } else { - raw - } + let lhs = self.as_u32() as u64; + let rhs = rhs as u64; + // Safety: cannot overflow as both operands are limited to 32 bits + let result = (lhs + rhs) % (Self::DEFAULT_VALUE.as_u32() as u64); + // Safety: cannot truncate as we are already limited to 24 bits by modulo + let v = result as u32; + let v = v.to_be_bytes(); + Self([v[1], v[2], v[3]]) } /// Wraps around just below max/default value. From ecde26ce101daa8ccfa279a63686588027f0ab5e Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 1 Feb 2024 22:22:30 +0200 Subject: [PATCH 019/112] Migrate the derive macro and compaction machinery to fuel-vm repo --- Cargo.lock | 45 +-- Cargo.toml | 4 - crates/compression-alt/Cargo.toml | 21 -- crates/compression-alt/src/lib.rs | 210 ----------- crates/compression-derive/Cargo.toml | 21 -- crates/compression-derive/src/attribute.rs | 44 --- crates/compression-derive/src/compact.rs | 331 ----------------- crates/compression-derive/src/lib.rs | 15 - crates/compression/Cargo.toml | 17 +- crates/compression/README.md | 34 -- crates/compression/src/compression.rs | 332 ------------------ crates/compression/src/lib.rs | 193 +++++++++- .../compression/src/registry/block_section.rs | 199 ----------- crates/compression/src/registry/db.rs | 20 -- crates/compression/src/registry/in_memory.rs | 110 ------ crates/compression/src/registry/key.rs | 172 --------- crates/compression/src/registry/mod.rs | 283 --------------- crates/types/src/lib.rs | 3 + 18 files changed, 202 insertions(+), 1852 deletions(-) delete mode 100644 crates/compression-alt/Cargo.toml delete mode 100644 crates/compression-alt/src/lib.rs delete mode 100644 crates/compression-derive/Cargo.toml delete mode 100644 crates/compression-derive/src/attribute.rs delete mode 100644 crates/compression-derive/src/compact.rs delete mode 100644 crates/compression-derive/src/lib.rs delete mode 100644 crates/compression/README.md delete mode 100644 crates/compression/src/compression.rs delete mode 100644 crates/compression/src/registry/block_section.rs delete mode 100644 crates/compression/src/registry/db.rs delete mode 100644 crates/compression/src/registry/in_memory.rs delete mode 100644 crates/compression/src/registry/key.rs delete mode 100644 crates/compression/src/registry/mod.rs diff --git a/Cargo.lock b/Cargo.lock index d37d96c22f9..ff92c7e0078 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2608,6 +2608,18 @@ dependencies = [ "strum 0.24.1", ] +[[package]] +name = "fuel-compression" +version = "0.44.0" +dependencies = [ + "bincode", + "fuel-derive", + "paste", + "postcard", + "serde", + "serde-big-array", +] + [[package]] name = "fuel-core" version = "0.22.0" @@ -2776,37 +2788,12 @@ dependencies = [ name = "fuel-core-compression" version = "0.22.0" dependencies = [ - "bincode", - "fuel-core-compression", - "fuel-core-compression-derive", - "fuel-core-types", - "paste", - "postcard", - "serde", - "serde-big-array", -] - -[[package]] -name = "fuel-core-compression-alt" -version = "0.22.0" -dependencies = [ - "fuel-core-compression", "fuel-core-types", + "fuel-vm", "postcard", "serde", ] -[[package]] -name = "fuel-core-compression-derive" -version = "0.22.0" -dependencies = [ - "proc-macro2", - "quote", - "regex", - "syn 2.0.48", - "synstructure 0.13.0", -] - [[package]] name = "fuel-core-consensus-module" version = "0.22.0" @@ -3190,6 +3177,7 @@ version = "0.44.0" dependencies = [ "proc-macro2", "quote", + "regex", "syn 2.0.48", "synstructure 0.13.0", ] @@ -3219,7 +3207,7 @@ dependencies = [ "derivative", "derive_more", "fuel-asm", - "fuel-core-compression", + "fuel-compression", "fuel-crypto", "fuel-merkle", "fuel-types", @@ -3236,7 +3224,7 @@ dependencies = [ name = "fuel-types" version = "0.44.0" dependencies = [ - "fuel-core-compression", + "fuel-compression", "fuel-derive", "hex", "rand", @@ -3255,6 +3243,7 @@ dependencies = [ "derive_more", "ethnum", "fuel-asm", + "fuel-compression", "fuel-crypto", "fuel-merkle", "fuel-storage", diff --git a/Cargo.toml b/Cargo.toml index d108118a8e2..6356ce53475 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,8 +9,6 @@ members = [ "crates/chain-config", "crates/client", "crates/compression", - "crates/compression-alt", - "crates/compression-derive", "crates/database", "crates/fuel-core", "crates/keygen", @@ -64,8 +62,6 @@ fuel-core-database = { version = "0.22.0", path = "./crates/database" } fuel-core-metrics = { version = "0.22.0", path = "./crates/metrics" } fuel-core-services = { version = "0.22.0", path = "./crates/services" } fuel-core-compression = { version = "0.22.0", path = "./crates/compression" } -fuel-core-compression-alt = { version = "0.22.0", path = "./crates/compression-alt" } -fuel-core-compression-derice = { version = "0.22.0", path = "./crates/compression-derive" } fuel-core-consensus-module = { version = "0.22.0", path = "./crates/services/consensus_module" } fuel-core-bft = { version = "0.22.0", path = "./crates/services/consensus_module/bft" } fuel-core-poa = { version = "0.22.0", path = "./crates/services/consensus_module/poa" } diff --git a/crates/compression-alt/Cargo.toml b/crates/compression-alt/Cargo.toml deleted file mode 100644 index ef479b62cfd..00000000000 --- a/crates/compression-alt/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "fuel-core-compression-alt" -version = { workspace = true } -authors = { workspace = true } -categories = ["cryptography::cryptocurrencies"] -edition = { workspace = true } -homepage = { workspace = true } -keywords = ["blockchain", "cryptocurrencies", "fuel-core", "fuel-client", "fuel-compression"] -license = { workspace = true } -repository = { workspace = true } -description = "Compression and decompression of Fuel blocks for DA storage." - -[dependencies] -fuel-core-compression = { workspace = true } -fuel-core-types = { workspace = true, features = ["serde", "da-compression"] } -postcard = { version = "1.0", features = ["use-std"] } -serde = { version = "1.0", features = ["derive"] } - -[dev-dependencies] -fuel-core-compression = { workspace = true, features = ["test-helpers"] } -fuel-core-types = { workspace = true, features = ["test-helpers"] } diff --git a/crates/compression-alt/src/lib.rs b/crates/compression-alt/src/lib.rs deleted file mode 100644 index 3e0682bded1..00000000000 --- a/crates/compression-alt/src/lib.rs +++ /dev/null @@ -1,210 +0,0 @@ -use serde::{ - Deserialize, - Serialize, -}; - -use fuel_core_compression::ChangesPerTable; -use fuel_core_types::fuel_tx::CompactTransaction; - -use fuel_core_types::{ - blockchain::primitives::DaBlockHeight, - fuel_types::{ - BlockHeight, - Bytes32, - }, - tai64::Tai64, -}; - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct Header { - pub da_height: DaBlockHeight, - pub prev_root: Bytes32, - pub height: BlockHeight, - pub time: Tai64, -} - -/// Compressed block. -/// The versioning here working depends on the serialization format, -/// but as long as we we have less than 128 variants, postcard will -/// make that a single byte. -#[derive(Clone, Serialize, Deserialize)] -#[non_exhaustive] -pub enum CompressedBlock { - V0 { - /// Registration section of the compressed block - registrations: ChangesPerTable, - /// Compressed block header - header: Header, - /// Compressed transactions - transactions: Vec, - }, -} - -#[cfg(test)] -mod tests { - use std::array; - - use fuel_core_compression::{ - Compact, - Compactable, - CompactionContext, - InMemoryRegistry, - }; - use fuel_core_types::{ - blockchain::primitives::DaBlockHeight, - fuel_tx::Transaction, - tai64::Tai64, - }; - - use super::*; - - #[test] - fn postcard_roundtrip() { - let original = CompressedBlock::V0 { - registrations: ChangesPerTable::from_start_keys(Default::default()), - header: Header { - da_height: DaBlockHeight::default(), - prev_root: Default::default(), - height: 3u32.into(), - time: Tai64::UNIX_EPOCH, - }, - transactions: vec![], - }; - - let compressed = postcard::to_allocvec(&original).unwrap(); - let decompressed: CompressedBlock = postcard::from_bytes(&compressed).unwrap(); - - let CompressedBlock::V0 { - registrations, - header, - transactions, - } = decompressed; - - assert!(registrations.is_empty()); - assert_eq!(header.height, 3u32.into()); - assert!(transactions.is_empty()); - } - - #[test] - fn compact_transaction() { - let tx = Transaction::default_test_tx(); - let mut registry = InMemoryRegistry::default(); - let (compacted, _) = CompactionContext::run(&mut registry, tx.clone()); - let decompacted = Transaction::decompact(compacted.clone(), ®istry); - assert_eq!(tx, decompacted); - - // Check size reduction - let compressed_original = postcard::to_allocvec(&tx).unwrap(); - let compressed_compact = postcard::to_allocvec(&compacted).unwrap(); - assert!(compressed_compact.len() < compressed_original.len() / 2); // Arbitrary threshold - } - - #[test] - fn compact_transaction_twice_gives_equal_result() { - let tx = Transaction::default_test_tx(); - let mut registry = InMemoryRegistry::default(); - let (compacted1, changes1) = CompactionContext::run(&mut registry, tx.clone()); - let (compacted2, changes2) = CompactionContext::run(&mut registry, tx.clone()); - assert!(!changes1.is_empty()); - assert!(changes2.is_empty()); - let compressed1 = postcard::to_allocvec(&compacted1).unwrap(); - let compressed2 = postcard::to_allocvec(&compacted2).unwrap(); - assert_eq!(compressed1, compressed2); - } - - #[test] - fn sizes_of_repeated_tx_make_sense() { - let tx = Transaction::default_test_tx(); - - let sizes: [usize; 4] = array::from_fn(|i| { - // Registry recreated for each block in this test - let mut registry = InMemoryRegistry::default(); - - let (transactions, registrations) = - CompactionContext::run(&mut registry, vec![tx.clone(); i]); - - let original = CompressedBlock::V0 { - registrations, - header: Header { - da_height: DaBlockHeight::default(), - prev_root: Default::default(), - height: 3u32.into(), - time: Tai64::UNIX_EPOCH, - }, - transactions, - }; - - let compressed = postcard::to_allocvec(&original).unwrap(); - compressed.len() - }); - - assert!( - sizes.windows(2).all(|w| w[0] < w[1]), - "Sizes should be in strictly ascending order" - ); - let deltas: Vec<_> = sizes.windows(2).map(|w| w[1] - w[0]).collect(); - assert!(deltas[0] > deltas[1], "Initial delta should be larger"); - assert!(deltas[1] == deltas[2], "Later delta should be constant"); - } - - #[test] - fn same_compact_tx_is_smaller_in_next_block() { - let tx = Transaction::default_test_tx(); - - let mut registry = InMemoryRegistry::default(); - - let sizes: [usize; 3] = array::from_fn(|_| { - let (transactions, registrations) = - CompactionContext::run(&mut registry, vec![tx.clone()]); - - let original = CompressedBlock::V0 { - registrations, - header: Header { - da_height: DaBlockHeight::default(), - prev_root: Default::default(), - height: 3u32.into(), - time: Tai64::UNIX_EPOCH, - }, - transactions, - }; - - let compressed = postcard::to_allocvec(&original).unwrap(); - compressed.len() - }); - - assert!(sizes[0] > sizes[1], "Size must decrease after first block"); - assert!( - sizes[1] == sizes[2], - "Size must be constant after first block" - ); - } - - #[test] - #[ignore = "This test is slow"] - fn compact_registry_key_wraparound() { - use fuel_core_types::fuel_types::AssetId; - - #[derive(Debug, Clone, Copy, PartialEq, Compact)] - struct Example { - #[da_compress(registry = "AssetId")] - a: AssetId, - } - - let mut registry = InMemoryRegistry::default(); - for i in 0u32..((1 << 24) + 100) { - if i % 10000 == 0 { - println!("i = {} ({})", i, (i as f32) / (1 << 24) as f32); - } - let mut bytes = [0x00; 32]; - bytes[..4].copy_from_slice(&i.to_be_bytes()); - let target = Example { - a: AssetId::from(bytes), - }; - let (compact, _) = CompactionContext::run(&mut registry, target); - if i % 99 == 0 { - let decompact = Example::decompact(compact, ®istry); - assert_eq!(decompact, target); - } - } - } -} diff --git a/crates/compression-derive/Cargo.toml b/crates/compression-derive/Cargo.toml deleted file mode 100644 index ae3b0a19ae4..00000000000 --- a/crates/compression-derive/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "fuel-core-compression-derive" -version = { workspace = true } -authors = { workspace = true } -categories = ["cryptography::cryptocurrencies"] -edition = { workspace = true } -homepage = { workspace = true } -keywords = ["blockchain", "cryptocurrencies", "fuel-core", "fuel-client", "fuel-compression"] -license = { workspace = true } -repository = { workspace = true } -description = "Compression and decompression derive macros for DA storage types." - -[lib] -proc-macro = true - -[dependencies] -quote = "1" -syn = { version = "2", features = ["full"] } -proc-macro2 = "1" -synstructure = "0.13" -regex = "1" diff --git a/crates/compression-derive/src/attribute.rs b/crates/compression-derive/src/attribute.rs deleted file mode 100644 index 4c0e7122e45..00000000000 --- a/crates/compression-derive/src/attribute.rs +++ /dev/null @@ -1,44 +0,0 @@ -use regex::Regex; - -const ATTR: &str = "da_compress"; - -/// Field attributes -pub enum FieldAttrs { - /// Skipped when compacting, and must be reconstructed when decompacting. - Skip, - /// Compacted recursively. - Normal, - /// This value is compacted into a registry lookup. - Registry(String), -} -impl FieldAttrs { - pub fn parse(attrs: &[syn::Attribute]) -> Self { - let re_registry = Regex::new(r#"^registry\s*=\s*"([a-zA-Z_]+)"$"#).unwrap(); - - let mut result = Self::Normal; - for attr in attrs { - if attr.style != syn::AttrStyle::Outer { - continue; - } - - if let syn::Meta::List(ml) = &attr.meta { - if ml.path.segments.len() == 1 && ml.path.segments[0].ident == ATTR { - if !matches!(result, Self::Normal) { - panic!("Duplicate attribute: {}", ml.tokens); - } - - let attr_contents = ml.tokens.to_string(); - if attr_contents == "skip" { - result = Self::Skip; - } else if let Some(m) = re_registry.captures(&attr_contents) { - result = Self::Registry(m.get(1).unwrap().as_str().to_owned()); - } else { - panic!("Invalid attribute: {}", ml.tokens); - } - } - } - } - - result - } -} diff --git a/crates/compression-derive/src/compact.rs b/crates/compression-derive/src/compact.rs deleted file mode 100644 index 39ab4c8dce5..00000000000 --- a/crates/compression-derive/src/compact.rs +++ /dev/null @@ -1,331 +0,0 @@ -use proc_macro2::TokenStream as TokenStream2; -use quote::{ - format_ident, - quote, -}; - -use crate::attribute::FieldAttrs; - -/// Map field definitions to compacted field definitions. -fn field_defs(fields: &syn::Fields) -> TokenStream2 { - let mut defs = TokenStream2::new(); - - for field in fields { - let attrs = FieldAttrs::parse(&field.attrs); - defs.extend(match &attrs { - FieldAttrs::Skip => quote! {}, - FieldAttrs::Normal => { - let ty = &field.ty; - let cty = quote! { - <#ty as ::fuel_core_compression::Compactable>::Compact - }; - if let Some(fname) = field.ident.as_ref() { - quote! { #fname: #cty, } - } else { - quote! { #cty, } - } - } - FieldAttrs::Registry(registry) => { - let reg_ident = format_ident!("{}", registry); - let cty = quote! { - ::fuel_core_compression::Key<::fuel_core_compression::tables::#reg_ident> - }; - if let Some(fname) = field.ident.as_ref() { - quote! { #fname: #cty, } - } else { - quote! { #cty, } - } - } - }); - } - - match fields { - syn::Fields::Named(_) => quote! {{ #defs }}, - syn::Fields::Unnamed(_) => quote! {(#defs)}, - syn::Fields::Unit => quote! {}, - } -} - -/// Construct compact version of the struct from the original one -fn construct_compact( - // The structure to construct, i.e. struct name or enum variant path - compact: &TokenStream2, - variant: &synstructure::VariantInfo<'_>, -) -> TokenStream2 { - let bound_fields: TokenStream2 = variant - .bindings() - .iter() - .map(|binding| { - let attrs = FieldAttrs::parse(&binding.ast().attrs); - let ty = &binding.ast().ty; - let cname = format_ident!("{}_c", binding.binding); - - match attrs { - FieldAttrs::Skip => quote! {}, - FieldAttrs::Normal => { - quote! { - let #cname = <#ty as Compactable>::compact(&#binding, ctx); - } - } - FieldAttrs::Registry(registry) => { - let reg_ident = format_ident!("{}", registry); - let cty = quote! { - Key< - tables::#reg_ident - > - }; - quote! { - let #cname: #cty = ctx.to_key( - ::Type::from(#binding.clone()) - ); - } - } - } - }) - .collect(); - - let construct_fields: TokenStream2 = variant - .bindings() - .iter() - .map(|binding| { - let attrs = FieldAttrs::parse(&binding.ast().attrs); - if matches!(attrs, FieldAttrs::Skip) { - return quote! {}; - } - let cname = format_ident!("{}_c", binding.binding); - if let Some(fname) = &binding.ast().ident { - quote! { #fname: #cname, } - } else { - quote! { #cname, } - } - }) - .collect(); - - let construct_fields = match variant.ast().fields { - syn::Fields::Named(_) => quote! {{ #construct_fields }}, - syn::Fields::Unnamed(_) => quote! {(#construct_fields)}, - syn::Fields::Unit => quote! {}, - }; - - quote! { - #bound_fields - #compact #construct_fields - } -} -/// Construct original version of the struct from the compacted one -fn construct_decompact( - // The original structure to construct, i.e. struct name or enum variant path - original: &TokenStream2, - variant: &synstructure::VariantInfo<'_>, -) -> TokenStream2 { - let bound_fields: TokenStream2 = variant - .bindings() - .iter() - .map(|binding| { - let attrs = FieldAttrs::parse(&binding.ast().attrs); - let ty = &binding.ast().ty; - let cname = format_ident!("{}_c", binding.binding); - - match attrs { - FieldAttrs::Skip => quote! { - let #cname = Default::default(); - }, - FieldAttrs::Normal => { - quote! { - let #cname = <#ty as Compactable>::decompact(#binding, reg); - } - } - FieldAttrs::Registry(registry) => { - let reg_ident = format_ident!("{}", registry); - quote! { - let raw: ::Type = reg.read( - #binding - ); - let #cname = raw.into(); - } - } - } - }) - .collect(); - - let construct_fields: TokenStream2 = variant - .bindings() - .iter() - .map(|binding| { - let cname = format_ident!("{}_c", binding.binding); - if let Some(fname) = &binding.ast().ident { - quote! { #fname: #cname, } - } else { - quote! { #cname, } - } - }) - .collect(); - - let construct_fields = match variant.ast().fields { - syn::Fields::Named(_) => quote! {{ #construct_fields }}, - syn::Fields::Unnamed(_) => quote! {(#construct_fields)}, - syn::Fields::Unit => quote! {}, - }; - - quote! { - #bound_fields - #original #construct_fields - } -} - -// Sum of Compactable::count() of all fields. -fn sum_counts(variant: &synstructure::VariantInfo<'_>) -> TokenStream2 { - variant - .bindings() - .iter() - .map(|binding| { - let attrs = FieldAttrs::parse(&binding.ast().attrs); - let ty = &binding.ast().ty; - - match attrs { - FieldAttrs::Skip => quote! { CountPerTable::default() }, - FieldAttrs::Normal => { - quote! { <#ty as Compactable>::count(&#binding) } - } - FieldAttrs::Registry(registry) => { - let reg_ident = format_ident!("{}", registry); - quote! { - CountPerTable::#reg_ident(1) - } - } - } - }) - .fold( - quote! { CountPerTable::default() }, - |acc, x| quote! { #acc + #x }, - ) -} - -/// Generate a match arm for each variant of the compacted structure -/// using the given function to generate the pattern body. -fn each_variant_compact) -> TokenStream2>( - s: &synstructure::Structure, - compact_name: &TokenStream2, - mut f: F, -) -> TokenStream2 { - s.variants() - .iter() - .map(|variant| { - // Modify the binding pattern to match the compact variant - let mut v2 = variant.clone(); - v2.filter(|field| { - let attrs = FieldAttrs::parse(&field.ast().attrs); - !matches!(attrs, FieldAttrs::Skip) - }); - v2.bindings_mut().iter_mut().for_each(|binding| { - binding.style = synstructure::BindStyle::Move; - }); - let mut p = v2.pat().into_iter(); - let _ = p.next().expect("pattern always begins with an identifier"); - let p = quote! { #compact_name #(#p)* }; - - let decompacted = f(variant); - quote! { - #p => { #decompacted } - } - }) - .collect() -} - -/// Derives `Compact` trait for the given `struct` or `enum`. -pub fn compact_derive(mut s: synstructure::Structure) -> TokenStream2 { - s.add_bounds(synstructure::AddBounds::None) - .underscore_const(true); - - let name = &s.ast().ident; - let compact_name = format_ident!("Compact{}", name); - - let g = s.ast().generics.clone(); - let w = g.where_clause.clone(); - let def = match &s.ast().data { - syn::Data::Struct(v) => { - let variant: &synstructure::VariantInfo = &s.variants()[0]; - let defs = field_defs(&variant.ast().fields); - let semi = match v.fields { - syn::Fields::Named(_) => quote! {}, - syn::Fields::Unnamed(_) => quote! {;}, - syn::Fields::Unit => quote! {;}, - }; - quote! { - #[derive(Clone, serde::Serialize, serde::Deserialize)] - #[doc = concat!("Compacted version of `", stringify!(#name), "`.")] - pub struct #compact_name #g #w #defs #semi - } - } - syn::Data::Enum(_) => { - let variant_defs: TokenStream2 = s - .variants() - .iter() - .map(|variant| { - let vname = variant.ast().ident.clone(); - let defs = field_defs(&variant.ast().fields); - quote! { - #vname #defs, - } - }) - .collect(); - - quote! { - #[derive(Clone, serde::Serialize, serde::Deserialize)] - #[doc = concat!("Compacted version of `", stringify!(#name), "`.")] - pub enum #compact_name #g #w { #variant_defs } - } - } - syn::Data::Union(_) => panic!("unions are not supported"), - }; - - let count_per_variant = s.each_variant(|variant| sum_counts(variant)); - let construct_per_variant = s.each_variant(|variant| { - let vname = variant.ast().ident.clone(); - let construct = match &s.ast().data { - syn::Data::Struct(_) => quote! { #compact_name }, - syn::Data::Enum(_) => quote! {#compact_name :: #vname }, - syn::Data::Union(_) => unreachable!(), - }; - construct_compact(&construct, variant) - }); - - let decompact_per_variant = - each_variant_compact(&s, "e! {#compact_name}, |variant| { - let vname = variant.ast().ident.clone(); - let construct = match &s.ast().data { - syn::Data::Struct(_) => quote! { #name }, - syn::Data::Enum(_) => quote! {#name :: #vname }, - syn::Data::Union(_) => unreachable!(), - }; - construct_decompact(&construct, variant) - }); - - let impls = s.gen_impl(quote! { - use ::fuel_core_compression::{RegistryDb, tables, Table, Key, Compactable, CountPerTable, CompactionContext}; - - gen impl Compactable for @Self { - type Compact = #compact_name #g; - - fn count(&self) -> CountPerTable { - match self { #count_per_variant } - } - - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact { - match self { #construct_per_variant } - } - - fn decompact(compact: Self::Compact, reg: &R) -> Self { - match compact { #decompact_per_variant } - } - } - }); - let rs = quote! { - #def - #impls - }; - - let _ = std::fs::write(format!("/tmp/derive/{}.rs", name), &rs.to_string()); - - rs -} diff --git a/crates/compression-derive/src/lib.rs b/crates/compression-derive/src/lib.rs deleted file mode 100644 index 6772ea00478..00000000000 --- a/crates/compression-derive/src/lib.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! Derive macros for canonical type serialization and deserialization. - -#![deny(unused_must_use, missing_docs)] - -extern crate proc_macro; -mod attribute; -mod compact; - -use self::compact::compact_derive; - -synstructure::decl_derive!( - [Compact, attributes(da_compress)] => - /// Derives `Compact` trait for the given `struct` or `enum`. - compact_derive -); diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index b23eab48496..43db27f0adf 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -11,19 +11,10 @@ repository = { workspace = true } description = "Compression and decompression of Fuel blocks for DA storage." [dependencies] -serde = { version = "1.0", features = ["derive"] } -serde-big-array = "0.5" - +fuel-core-types = { workspace = true, features = ["serde", "da-compression"] } postcard = { version = "1.0", features = ["use-std"] } -bincode = "1.3" - -paste = "1.0" - -fuel-core-compression-derive = { path = "../compression-derive" } +serde = { version = "1.0", features = ["derive"] } [dev-dependencies] -fuel-core-compression = { path = "." } # Self-dependency needed by test for macros -fuel-core-types = { workspace = true, features = ["serde"] } - -[features] -test-helpers = [] +fuel-vm-private = { workspace = true, features = ["test-helpers"] } +fuel-core-types = { workspace = true, features = ["test-helpers"] } diff --git a/crates/compression/README.md b/crates/compression/README.md deleted file mode 100644 index 23767f0ea14..00000000000 --- a/crates/compression/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# Compression and decompression of fuel-types for the DA layer - -## Compressed block header - -Each compressed block begins with a single-byte version field, so that it's possible to change the format later. - -## Temporal registry - -This crate provides offchain registries for different types such as `AssetId`, `ContractId`, scripts, and predicates. Each registry is a key-value store with three-byte key. The registires are essentially compression caches. The three byte key allows cache size of 16 million values before reregistering the older values. - -The registries allow replacing repeated objects with their respective keys, so if an object -is used multiple times in a short interval (couple of months, maybe), then the full value -exists on only a single uncompressed block, - -### Fraud proofs - -Compressed block will start with 32 bytes of merkle root over all compression smts, followed by newly registered values along with their keys. Using an SMT provides flexibility around the algorithm we use to define keys without knowing how exactly values were chosen to be registered. - -Each registry also uses an SMT. Since the keys are three bytes long, the depth of the SMT is capped at 24 levels. - - - - More efficient for fraud proofs instead of needing to provide entire previous blocks with proofs - -## Compression of `UtxoIds` - -Since each `UtxoId` only appears once, there's no point in registering them. Instead, they are replaced with `TxPointer`s (7 bytes worst case), which are still unique. - -### Fraud proofs - -During fraud proofs we need to use the `prev_root` to prove that the referenced block height is part of the chain. - -## Other techniques - -- These techniques should be good enough for now, but there are lots of other interesting ideas for this. diff --git a/crates/compression/src/compression.rs b/crates/compression/src/compression.rs deleted file mode 100644 index aa40e89c2ee..00000000000 --- a/crates/compression/src/compression.rs +++ /dev/null @@ -1,332 +0,0 @@ -use std::marker::PhantomData; - -use serde::{ - Deserialize, - Serialize, -}; - -use crate::{ - registry::{ - access::{ - self, - *, - }, - add_keys, - block_section::WriteTo, - next_keys, - ChangesPerTable, - CountPerTable, - KeyPerTable, - RegistryDb, - Table, - }, - Key, -}; - -#[must_use] -pub struct CompactionContext<'a, R> { - /// The registry - reg: &'a mut R, - /// These are the keys where writing started - start_keys: KeyPerTable, - /// The next keys to use for each table - next_keys: KeyPerTable, - /// Keys in range next_keys..safe_keys_start - /// could be overwritten by the compaction, - /// and cannot be used for new values. - safe_keys_start: KeyPerTable, - changes: ChangesPerTable, -} -impl<'a, R: RegistryDb> CompactionContext<'a, R> { - /// Run the compaction for the given target, returning the compacted data. - /// Changes are applied to the registry, and then returned as well. - pub fn run( - reg: &'a mut R, - target: C, - ) -> (C::Compact, ChangesPerTable) { - let start_keys = next_keys(reg); - let next_keys = start_keys; - let key_limits = target.count(); - let safe_keys_start = add_keys(next_keys, key_limits); - - let mut ctx = Self { - reg, - start_keys, - next_keys, - safe_keys_start, - changes: ChangesPerTable::from_start_keys(start_keys), - }; - - let compacted = target.compact(&mut ctx); - ctx.changes.apply_to_registry(ctx.reg); - (compacted, ctx.changes) - } -} - -impl<'a, R: RegistryDb> CompactionContext<'a, R> { - /// Convert a value to a key - /// If necessary, store the value in the changeset and allocate a new key. - pub fn to_key(&mut self, value: T::Type) -> Key - where - KeyPerTable: access::AccessCopy>, - KeyPerTable: access::AccessMut>, - ChangesPerTable: - access::AccessRef> + access::AccessMut>, - { - // Check if the value is within the current changeset - if let Some(key) = - >>::get(&self.changes) - .lookup_value(&value) - { - return key; - } - - // Check if the registry contains this value already - if let Some(key) = self.reg.index_lookup::(&value) { - let start: Key = self.start_keys.value(); - let end: Key = self.safe_keys_start.value(); - // Check if the value is in the possibly-overwritable range - if !key.is_between(start, end) { - return key; - } - } - // Allocate a new key for this - let key = >>::get_mut(&mut self.next_keys) - .take_next(); - >>::get_mut(&mut self.changes) - .values - .push(value); - key - } -} - -/// Convert data to reference-based format -pub trait Compactable { - type Compact: Clone + Serialize + for<'a> Deserialize<'a>; - - /// Count max number of each key type, for upper limit of overwritten keys - fn count(&self) -> CountPerTable; - - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact; - - fn decompact(compact: Self::Compact, reg: &R) -> Self; -} - -macro_rules! identity_compaction { - ($t:ty) => { - impl Compactable for $t { - type Compact = Self; - - fn count(&self) -> CountPerTable { - CountPerTable::default() - } - - fn compact( - &self, - _ctx: &mut CompactionContext, - ) -> Self::Compact { - *self - } - - fn decompact(compact: Self::Compact, _reg: &R) -> Self { - compact - } - } - }; -} - -identity_compaction!(u8); -identity_compaction!(u16); -identity_compaction!(u32); -identity_compaction!(u64); -identity_compaction!(u128); - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ArrayWrapper Deserialize<'a>>( - #[serde(with = "serde_big_array::BigArray")] pub [T; S], -); - -impl Compactable for [T; S] -where - T: Compactable + Clone + Serialize + for<'a> Deserialize<'a>, -{ - type Compact = ArrayWrapper; - - fn count(&self) -> CountPerTable { - let mut count = CountPerTable::default(); - for item in self.iter() { - count += item.count(); - } - count - } - - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact { - ArrayWrapper(self.clone().map(|item| item.compact(ctx))) - } - - fn decompact(compact: Self::Compact, reg: &R) -> Self { - compact.0.map(|item| T::decompact(item, reg)) - } -} - -impl Compactable for Vec -where - T: Compactable + Clone + Serialize + for<'a> Deserialize<'a>, -{ - type Compact = Vec; - - fn count(&self) -> CountPerTable { - let mut count = CountPerTable::default(); - for item in self.iter() { - count += item.count(); - } - count - } - - fn compact(&self, ctx: &mut CompactionContext) -> Self::Compact { - self.iter().map(|item| item.compact(ctx)).collect() - } - - fn decompact(compact: Self::Compact, reg: &R) -> Self { - compact - .into_iter() - .map(|item| T::decompact(item, reg)) - .collect() - } -} - -impl Compactable for PhantomData { - type Compact = (); - - fn count(&self) -> CountPerTable { - CountPerTable::default() - } - - fn compact(&self, _ctx: &mut CompactionContext) -> Self::Compact { - () - } - - fn decompact(_compact: Self::Compact, _reg: &R) -> Self { - Self - } -} - -#[cfg(test)] -mod tests { - use crate::{ - registry::{ - in_memory::InMemoryRegistry, - tables, - CountPerTable, - }, - Key, - RegistryDb, - }; - use fuel_core_compression::Compactable as _; // Hack for derive - use fuel_core_compression_derive::Compact; - use fuel_core_types::fuel_types::{ - Address, - AssetId, - }; - use serde::{ - Deserialize, - Serialize, - }; - - use super::{ - Compactable, - CompactionContext, - }; - - #[derive(Debug, Clone, PartialEq)] - struct ManualExample { - a: Address, - b: Address, - c: u64, - } - - #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] - struct ManualExampleCompact { - a: Key, - b: Key, - c: u64, - } - - impl Compactable for ManualExample { - type Compact = ManualExampleCompact; - - fn count(&self) -> crate::registry::CountPerTable { - CountPerTable { - Address: 2, - ..Default::default() - } - } - - fn compact( - &self, - ctx: &mut CompactionContext, - ) -> Self::Compact { - let a = ctx.to_key::(*self.a); - let b = ctx.to_key::(*self.b); - ManualExampleCompact { a, b, c: self.c } - } - - fn decompact(compact: Self::Compact, reg: &R) -> Self { - let a = Address::from(reg.read::(compact.a)); - let b = Address::from(reg.read::(compact.b)); - Self { a, b, c: compact.c } - } - } - - #[derive(Debug, Clone, PartialEq, Compact)] - struct AutomaticExample { - #[da_compress(registry = "AssetId")] - a: AssetId, - #[da_compress(registry = "AssetId")] - b: AssetId, - c: u32, - } - - #[test] - fn test_compaction_properties() { - let a = ManualExample { - a: Address::from([1u8; 32]), - b: Address::from([2u8; 32]), - c: 3, - }; - assert_eq!(a.count().Address, 2); - assert_eq!(a.count().AssetId, 0); - - let b = AutomaticExample { - a: AssetId::from([1u8; 32]), - b: AssetId::from([2u8; 32]), - c: 3, - }; - assert_eq!(b.count().Address, 0); - assert_eq!(b.count().AssetId, 2); - } - - #[test] - fn test_compaction_roundtrip() { - let target = ManualExample { - a: Address::from([1u8; 32]), - b: Address::from([2u8; 32]), - c: 3, - }; - let mut registry = InMemoryRegistry::default(); - let (compacted, _) = CompactionContext::run(&mut registry, target.clone()); - let decompacted = ManualExample::decompact(compacted, ®istry); - assert_eq!(target, decompacted); - - let target = AutomaticExample { - a: AssetId::from([1u8; 32]), - b: AssetId::from([2u8; 32]), - c: 3, - }; - let mut registry = fuel_core_compression::InMemoryRegistry::default(); - let (compacted, _) = - fuel_core_compression::CompactionContext::run(&mut registry, target.clone()); - let decompacted = AutomaticExample::decompact(compacted, ®istry); - assert_eq!(target, decompacted); - } -} diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 56b9606112c..71ad82da02d 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -1,20 +1,183 @@ -mod compression; -mod registry; +use serde::{ + Deserialize, + Serialize, +}; -pub use compression::{ - Compactable, - CompactionContext, +use fuel_core_types::{ + fuel_compression::ChangesPerTable, + fuel_tx::CompactTransaction, }; -pub use registry::{ - tables, - ChangesPerTable, - CountPerTable, - Key, - RegistryDb, - Table, + +use fuel_core_types::{ + blockchain::primitives::DaBlockHeight, + fuel_types::{ + BlockHeight, + Bytes32, + }, + tai64::Tai64, }; -#[cfg(feature = "test-helpers")] -pub use registry::in_memory::InMemoryRegistry; +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Header { + pub da_height: DaBlockHeight, + pub prev_root: Bytes32, + pub height: BlockHeight, + pub time: Tai64, +} + +/// Compressed block. +/// The versioning here working depends on the serialization format, +/// but as long as we we have less than 128 variants, postcard will +/// make that a single byte. +#[derive(Clone, Serialize, Deserialize)] +#[non_exhaustive] +pub enum CompressedBlock { + V0 { + /// Registration section of the compressed block + registrations: ChangesPerTable, + /// Compressed block header + header: Header, + /// Compressed transactions + transactions: Vec, + }, +} + +#[cfg(test)] +mod tests { + use std::array; + + use fuel_core_types::{ + blockchain::primitives::DaBlockHeight, + fuel_compression::{ + Compact, + Compactable, + CompactionContext, + InMemoryRegistry, + }, + fuel_tx::Transaction, + tai64::Tai64, + }; + + use super::*; + + #[test] + fn postcard_roundtrip() { + let original = CompressedBlock::V0 { + registrations: ChangesPerTable::from_start_keys(Default::default()), + header: Header { + da_height: DaBlockHeight::default(), + prev_root: Default::default(), + height: 3u32.into(), + time: Tai64::UNIX_EPOCH, + }, + transactions: vec![], + }; + + let compressed = postcard::to_allocvec(&original).unwrap(); + let decompressed: CompressedBlock = postcard::from_bytes(&compressed).unwrap(); + + let CompressedBlock::V0 { + registrations, + header, + transactions, + } = decompressed; + + assert!(registrations.is_empty()); + assert_eq!(header.height, 3u32.into()); + assert!(transactions.is_empty()); + } + + #[test] + fn compact_transaction() { + let tx = Transaction::default_test_tx(); + let mut registry = InMemoryRegistry::default(); + let (compacted, _) = CompactionContext::run(&mut registry, tx.clone()); + let decompacted = Transaction::decompact(compacted.clone(), ®istry); + assert_eq!(tx, decompacted); + + // Check size reduction + let compressed_original = postcard::to_allocvec(&tx).unwrap(); + let compressed_compact = postcard::to_allocvec(&compacted).unwrap(); + assert!(compressed_compact.len() < compressed_original.len() / 2); // Arbitrary threshold + } + + #[test] + fn compact_transaction_twice_gives_equal_result() { + let tx = Transaction::default_test_tx(); + let mut registry = InMemoryRegistry::default(); + let (compacted1, changes1) = CompactionContext::run(&mut registry, tx.clone()); + let (compacted2, changes2) = CompactionContext::run(&mut registry, tx.clone()); + assert!(!changes1.is_empty()); + assert!(changes2.is_empty()); + let compressed1 = postcard::to_allocvec(&compacted1).unwrap(); + let compressed2 = postcard::to_allocvec(&compacted2).unwrap(); + assert_eq!(compressed1, compressed2); + } + + #[test] + fn sizes_of_repeated_tx_make_sense() { + let tx = Transaction::default_test_tx(); + + let sizes: [usize; 4] = array::from_fn(|i| { + // Registry recreated for each block in this test + let mut registry = InMemoryRegistry::default(); + + let (transactions, registrations) = + CompactionContext::run(&mut registry, vec![tx.clone(); i]); + + let original = CompressedBlock::V0 { + registrations, + header: Header { + da_height: DaBlockHeight::default(), + prev_root: Default::default(), + height: 3u32.into(), + time: Tai64::UNIX_EPOCH, + }, + transactions, + }; + + let compressed = postcard::to_allocvec(&original).unwrap(); + compressed.len() + }); + + assert!( + sizes.windows(2).all(|w| w[0] < w[1]), + "Sizes should be in strictly ascending order" + ); + let deltas: Vec<_> = sizes.windows(2).map(|w| w[1] - w[0]).collect(); + assert!(deltas[0] > deltas[1], "Initial delta should be larger"); + assert!(deltas[1] == deltas[2], "Later delta should be constant"); + } + + #[test] + fn same_compact_tx_is_smaller_in_next_block() { + let tx = Transaction::default_test_tx(); + + let mut registry = InMemoryRegistry::default(); + + let sizes: [usize; 3] = array::from_fn(|_| { + let (transactions, registrations) = + CompactionContext::run(&mut registry, vec![tx.clone()]); + + let original = CompressedBlock::V0 { + registrations, + header: Header { + da_height: DaBlockHeight::default(), + prev_root: Default::default(), + height: 3u32.into(), + time: Tai64::UNIX_EPOCH, + }, + transactions, + }; + + let compressed = postcard::to_allocvec(&original).unwrap(); + compressed.len() + }); -pub use fuel_core_compression_derive::Compact; + assert!(sizes[0] > sizes[1], "Size must decrease after first block"); + assert!( + sizes[1] == sizes[2], + "Size must be constant after first block" + ); + } +} diff --git a/crates/compression/src/registry/block_section.rs b/crates/compression/src/registry/block_section.rs deleted file mode 100644 index d389c564ba7..00000000000 --- a/crates/compression/src/registry/block_section.rs +++ /dev/null @@ -1,199 +0,0 @@ -use core::fmt; - -use serde::{ - ser::SerializeTuple, - Deserialize, - Serialize, -}; - -use super::{ - key::Key, - ChangesPerTable, - Table, -}; - -/// New registrations written to a specific table. -#[derive(Clone, PartialEq, Eq)] -pub struct WriteTo { - /// The values are inserted starting from this key - pub start_key: Key, - /// Values. inserted using incrementing ids starting from `start_key` - pub values: Vec, -} - -impl fmt::Debug for WriteTo -where - T::Type: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.values.is_empty() { - return f.write_str("WriteTo::EMPTY"); - } - - f.debug_struct("WriteTo") - .field("start_key", &self.start_key) - .field("values", &self.values) - .finish() - } -} - -impl WriteTo -where - T::Type: PartialEq, -{ - /// Reverse lookup. - /// TODO: possibly add a lookup table for this, if deemed necessary - pub fn lookup_value(&self, needle: &T::Type) -> Option> { - if *needle == T::Type::default() { - return Some(Key::DEFAULT_VALUE); - } - - let mut key = self.start_key; - for v in &self.values { - if v == needle { - return Some(key); - } - key = key.next(); - } - None - } -} - -/// Custom serialization is used to omit the start_key when the sequence is empty -impl Serialize for WriteTo -where - T: Table + Serialize, -{ - fn serialize(&self, serializer: S) -> Result { - let mut tup = serializer.serialize_tuple(2)?; - tup.serialize_element(&self.values)?; - if self.values.is_empty() { - tup.serialize_element(&())?; - } else { - tup.serialize_element(&self.start_key)?; - } - tup.end() - } -} - -impl<'de, T: Table> Deserialize<'de> for WriteTo -where - T: Deserialize<'de>, -{ - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - deserializer.deserialize_tuple( - 2, - Self { - start_key: Key::ZERO, - values: Vec::new(), - }, - ) - } -} - -impl<'de, T: Table + Deserialize<'de>> serde::de::Visitor<'de> for WriteTo { - type Value = WriteTo; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str(concat!("WriteTo<", stringify!(T), "> instance")) - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'de>, - { - let values: Vec = seq.next_element()?.ok_or( - serde::de::Error::invalid_length(0, &"WriteTo<_> with 2 elements"), - )?; - - if values.is_empty() { - let _: () = seq.next_element()?.ok_or(serde::de::Error::invalid_length( - 1, - &"WriteTo<_> with 2 elements", - ))?; - Ok(WriteTo { - start_key: Key::ZERO, - values, - }) - } else { - let start_key: Key = seq.next_element()?.ok_or( - serde::de::Error::invalid_length(1, &"WriteTo<_> with 2 elements"), - )?; - Ok(WriteTo { start_key, values }) - } - } -} - -/// Registeration section of the compressed block -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Registrations { - /// Merkle root of the registeration table merkle roots - pub tables_root: [u8; 32], - /// Changes per table - pub changes: ChangesPerTable, -} - -#[cfg(test)] -mod tests { - use super::*; - use bincode::Options; - use fuel_core_types::{ - fuel_asm::op, - fuel_tx::AssetId, - fuel_types::Address, - }; - - #[test] - fn test_tables() { - let original = Registrations { - tables_root: Default::default(), - changes: ChangesPerTable { - AssetId: WriteTo { - start_key: Key::try_from(100).unwrap(), - values: vec![*AssetId::from([0xa0; 32]), *AssetId::from([0xa1; 32])], - }, - Address: WriteTo { - start_key: Key::ZERO, - values: vec![*Address::from([0xc0; 32])], - }, - ScriptCode: WriteTo { - start_key: Key::ZERO, - values: vec![ - vec![op::addi(0x20, 0x20, 1), op::ret(0)] - .into_iter() - .collect(), - vec![op::muli(0x20, 0x20, 5), op::ret(1)] - .into_iter() - .collect(), - ], - }, - Witness: WriteTo { - start_key: Key::ZERO, - values: vec![], - }, - }, - }; - - let pc_compressed = postcard::to_stdvec(&original).unwrap(); - let pc_decompressed: Registrations = - postcard::from_bytes(&pc_compressed).unwrap(); - assert_eq!(original, pc_decompressed); - - let bc_opt = bincode::DefaultOptions::new().with_varint_encoding(); - - let bc_compressed = bc_opt.serialize(&original).unwrap(); - let bc_decompressed: Registrations = bc_opt.deserialize(&bc_compressed).unwrap(); - assert_eq!(original, bc_decompressed); - - println!("data: {original:?}"); - println!("postcard compressed size {}", pc_compressed.len()); - println!("bincode compressed size {}", bc_compressed.len()); - println!("postcard compressed: {:x?}", pc_compressed); - println!("bincode compressed: {:x?}", bc_compressed); - - // panic!("ok, just showing the results"); - } -} diff --git a/crates/compression/src/registry/db.rs b/crates/compression/src/registry/db.rs deleted file mode 100644 index 40c729c31e4..00000000000 --- a/crates/compression/src/registry/db.rs +++ /dev/null @@ -1,20 +0,0 @@ -use super::{ - Key, - Table, -}; - -pub trait RegistryDb { - /// Get next key for the given table. This is where the next write should start at. - /// The result of this function is just a suggestion, and the caller may choose to - /// ignore it, although it's rare that they would know better. - fn next_key(&self) -> Key; - - /// Read a value from the registry by key - fn read(&self, key: Key) -> T::Type; - - /// Write a continuous sequence of values to the registry - fn batch_write(&mut self, start_key: Key, values: Vec); - - /// Lookup a key by value - fn index_lookup(&self, value: &T::Type) -> Option>; -} diff --git a/crates/compression/src/registry/in_memory.rs b/crates/compression/src/registry/in_memory.rs deleted file mode 100644 index f55b001597b..00000000000 --- a/crates/compression/src/registry/in_memory.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::collections::HashMap; - -use super::{ - db::*, - key::RawKey, - Key, - Table, -}; - -/// Simple and inefficient in-memory registry for testing purposes. -#[derive(Debug, Clone, PartialEq, Eq, Default)] -pub struct InMemoryRegistry { - next_keys: HashMap<&'static str, RawKey>, - storage: HashMap<&'static str, HashMap>>, - index: HashMap<&'static str, HashMap, RawKey>>, -} - -impl RegistryDb for InMemoryRegistry { - fn next_key(&self) -> Key { - Key::from_raw(self.next_keys.get(T::NAME).copied().unwrap_or(RawKey::ZERO)) - } - - fn read(&self, key: Key) -> T::Type { - if key == Key::DEFAULT_VALUE { - return T::Type::default(); - } - - self.storage - .get(T::NAME) - .and_then(|table| table.get(&key.raw())) - .map(|bytes| postcard::from_bytes(bytes).expect("Invalid value in registry")) - .unwrap_or_default() - } - - fn batch_write(&mut self, start_key: Key, values: Vec) { - let empty = values.is_empty(); - if !empty && start_key == Key::DEFAULT_VALUE { - panic!("Cannot write to the default value key"); - } - let table = self.storage.entry(T::NAME).or_default(); - let mut key = start_key.raw(); - for value in values.into_iter() { - let value = postcard::to_stdvec(&value).unwrap(); - let mut prefix = value.clone(); - prefix.truncate(32); - self.index.entry(T::NAME).or_default().insert(prefix, key); - table.insert(key, value); - key = key.next(); - } - if !empty { - self.next_keys.insert(T::NAME, key); - } - } - - fn index_lookup(&self, value: &T::Type) -> Option> { - if *value == T::Type::default() { - return Some(Key::DEFAULT_VALUE); - } - - let needle = postcard::to_stdvec(value).unwrap(); - let mut prefix = needle.clone(); - prefix.truncate(32); - if let Some(cand) = self.index.get(T::NAME)?.get(&prefix).copied() { - let cand_val = self.storage.get(T::NAME)?.get(&cand)?; - if *cand_val == needle { - return Some(Key::from_raw(cand)); - } - } - - None - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - tables, - Key, - }; - - #[test] - fn in_memory_registry_works() { - let mut reg = InMemoryRegistry::default(); - - // Empty - assert_eq!( - reg.read(Key::::try_from(100).unwrap()), - [0; 32] - ); - - // Write - reg.batch_write( - Key::::from_raw(RawKey::try_from(100u32).unwrap()), - vec![[1; 32], [2; 32]], - ); - - // Read - assert_eq!( - reg.read(Key::::try_from(100).unwrap()), - [1; 32] - ); - - // Index - assert_eq!( - reg.index_lookup(&[1; 32]), - Some(Key::::try_from(100).unwrap()) - ); - } -} diff --git a/crates/compression/src/registry/key.rs b/crates/compression/src/registry/key.rs deleted file mode 100644 index f08fbcebc46..00000000000 --- a/crates/compression/src/registry/key.rs +++ /dev/null @@ -1,172 +0,0 @@ -use core::fmt; -use std::marker::PhantomData; - -use serde::{ - Deserialize, - Serialize, -}; - -use super::Table; - -/// Untyped key pointing to a registry table entry. -/// The last key (all bits set) is reserved for the default value and cannot be written to. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct RawKey([u8; Self::SIZE]); -impl RawKey { - pub const SIZE: usize = 3; - pub const ZERO: Self = Self([0; Self::SIZE]); - pub const MAX_WRITABLE: Self = Self([u8::MAX, u8::MAX, u8::MAX - 1]); - pub const DEFAULT_VALUE: Self = Self([u8::MAX; Self::SIZE]); - - pub fn as_u32(self) -> u32 { - u32::from_be_bytes([0, self.0[0], self.0[1], self.0[2]]) - } - - /// Wraps around just below max/default value. - pub fn add_u32(self, rhs: u32) -> Self { - let lhs = self.as_u32() as u64; - let rhs = rhs as u64; - // Safety: cannot overflow as both operands are limited to 32 bits - let result = (lhs + rhs) % (Self::DEFAULT_VALUE.as_u32() as u64); - // Safety: cannot truncate as we are already limited to 24 bits by modulo - let v = result as u32; - let v = v.to_be_bytes(); - Self([v[1], v[2], v[3]]) - } - - /// Wraps around just below max/default value. - pub fn next(self) -> Self { - self.add_u32(1) - } - - /// Is `self` between `start` and `end`? i.e. in the half-open logical range `start`..`end`, - /// so that wrap-around cases are handled correctly. - /// - /// Panics if max/default value is used. - pub fn is_between(self, start: Self, end: Self) -> bool { - assert!( - self != Self::DEFAULT_VALUE, - "Cannot use max/default value in is_between" - ); - assert!( - start != Self::DEFAULT_VALUE, - "Cannot use max/default value in is_between" - ); - assert!( - end != Self::DEFAULT_VALUE, - "Cannot use max/default value in is_between" - ); - - let low = start.as_u32(); - let high = end.as_u32(); - let v = self.as_u32(); - - if high >= low { - low <= v && v < high - } else { - v < high || v >= low - } - } -} -impl TryFrom for RawKey { - type Error = &'static str; - fn try_from(value: u32) -> Result { - let v = value.to_be_bytes(); - if v[0] != 0 { - return Err("RawKey must be less than 2^24"); - } - - let mut bytes = [0u8; 3]; - bytes.copy_from_slice(&v[1..]); - Ok(Self(bytes)) - } -} - -/// Typed key to a registry table entry. -/// The last key (all bits set) is reserved for the default value and cannot be written to. -#[derive(Eq, Hash, Serialize, Deserialize)] -#[serde(transparent)] -pub struct Key(RawKey, PhantomData); -impl Clone for Key { - fn clone(&self) -> Self { - Self(self.0, PhantomData) - } -} -impl Copy for Key {} - -impl PartialEq> for Key { - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 - } -} - -impl Key { - /// This is the first writable key. - pub const ZERO: Self = Self(RawKey::ZERO, PhantomData); - - /// This key is reserved for the default value and cannot be written to. - pub const DEFAULT_VALUE: Self = Self(RawKey::DEFAULT_VALUE, PhantomData); - - pub fn raw(&self) -> RawKey { - self.0 - } - - pub fn from_raw(raw: RawKey) -> Self { - Self(raw, PhantomData) - } - - /// Wraps around at limit, i.e. one below the max/default value - pub fn add_u32(self, rhs: u32) -> Self { - Self(self.0.add_u32(rhs), PhantomData) - } - - /// Wraps around at limit, i.e. one below the max/default value - pub fn next(self) -> Self { - Self(self.0.next(), PhantomData) - } - - /// Is `self` between `start` and `end`? i.e. in the half-open logical range `start`..`end`, - /// so that wrap-around cases are handled correctly. - pub fn is_between(self, start: Self, end: Self) -> bool { - self.0.is_between(start.0, end.0) - } - /// Increments the key by one, and returns the previous value. - /// Skips the max/default value. - pub fn take_next(&mut self) -> Self { - let result = *self; - self.0 = self.0.next(); - result - } -} - -impl TryFrom for Key { - type Error = &'static str; - fn try_from(value: u32) -> Result { - Ok(Self(RawKey::try_from(value)?, PhantomData)) - } -} - -impl fmt::Debug for Key { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if *self == Self::DEFAULT_VALUE { - write!(f, "Key<{}>::DEFAULT_VALUE", T::NAME) - } else { - write!(f, "Key<{}>({})", T::NAME, self.0.as_u32()) - } - } -} - -#[cfg(test)] -mod tests { - use super::RawKey; - - #[test] - fn key_next() { - assert_eq!(RawKey::ZERO.next(), RawKey([0, 0, 1])); - assert_eq!(RawKey::ZERO.next().next(), RawKey([0, 0, 2])); - assert_eq!(RawKey([0, 0, 255]).next(), RawKey([0, 1, 0])); - assert_eq!(RawKey([0, 1, 255]).next(), RawKey([0, 2, 0])); - assert_eq!(RawKey([0, 255, 255]).next(), RawKey([1, 0, 0])); - assert_eq!(RawKey::MAX_WRITABLE.next(), RawKey::ZERO); - } -} diff --git a/crates/compression/src/registry/mod.rs b/crates/compression/src/registry/mod.rs deleted file mode 100644 index 9bbd08bef24..00000000000 --- a/crates/compression/src/registry/mod.rs +++ /dev/null @@ -1,283 +0,0 @@ -use serde::{ - Deserialize, - Serialize, -}; - -pub(crate) mod block_section; -pub mod db; -pub(crate) mod in_memory; -mod key; - -use self::block_section::WriteTo; -pub use self::{ - db::RegistryDb, - key::Key, -}; - -mod _private { - pub trait Seal {} -} - -pub trait Table: _private::Seal { - const NAME: &'static str; - type Type: PartialEq + Default + Serialize + for<'de> Deserialize<'de>; -} - -pub mod access { - pub trait AccessCopy { - fn value(&self) -> V; - } - - pub trait AccessRef { - fn get(&self) -> &V; - } - - pub trait AccessMut { - fn get_mut(&mut self) -> &mut V; - } -} - -macro_rules! tables { - // $index muse use increasing numbers starting from zero - ($($name:ident: $ty:ty),*$(,)?) => { - pub mod tables { - $( - /// Specifies the table to use for a given key. - /// The data is separated to tables based on the data type being stored. - #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] - pub struct $name; - - impl super::_private::Seal for $name {} - impl super::Table for $name { - const NAME: &'static str = stringify!($name); - type Type = $ty; - } - )* - } - - /// One counter per table - #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] - #[allow(non_snake_case)] // The field names match table type names eactly - #[non_exhaustive] - pub struct CountPerTable { - $(pub $name: usize),* - } - - impl CountPerTable {$( - /// Custom constructor per table - #[allow(non_snake_case)] // The field names match table type names eactly - pub fn $name(value: usize) -> Self { - Self { - $name: value, - ..Self::default() - } - } - )*} - - $( - impl access::AccessCopy for CountPerTable { - fn value(&self) -> usize { - self.$name - } - } - )* - - impl core::ops::Add for CountPerTable { - type Output = Self; - - fn add(self, rhs: CountPerTable) -> Self::Output { - Self { - $($name: self.$name + rhs.$name),* - } - } - } - - impl core::ops::AddAssign for CountPerTable { - fn add_assign(&mut self, rhs: CountPerTable) { - $(self.$name += rhs.$name);* - } - } - - /// One key value per table - #[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)] - #[allow(non_snake_case)] // The field names match table type names eactly - #[non_exhaustive] - pub struct KeyPerTable { - $(pub $name: Key),* - } - - impl Default for KeyPerTable { - fn default() -> Self { - Self { - $($name: Key::ZERO,)* - } - } - } - - $( - impl access::AccessCopy> for KeyPerTable { - fn value(&self) -> Key { - self.$name - } - } - impl access::AccessRef> for KeyPerTable { - fn get(&self) -> &Key { - &self.$name - } - } - impl access::AccessMut> for KeyPerTable { - fn get_mut(&mut self) -> &mut Key { - &mut self.$name - } - } - )* - - pub fn next_keys(reg: &mut R) -> KeyPerTable { - KeyPerTable { - $( $name: reg.next_key(), )* - } - } - - /// Used to add together keys and counts to deterimine possible overwrite range - pub fn add_keys(keys: KeyPerTable, counts: CountPerTable) -> KeyPerTable { - KeyPerTable { - $( - $name: keys.$name.add_u32(counts.$name.try_into() - .expect("Count too large. Shoudn't happen as we control inputs here.") - ), - )* - } - } - - /// Registeration changes per table - #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] - #[allow(non_snake_case)] // The field names match table type names eactly - #[non_exhaustive] - pub struct ChangesPerTable { - $(pub $name: WriteTo),* - } - - impl ChangesPerTable { - pub fn is_empty(&self) -> bool { - true $(&& self.$name.values.is_empty())* - } - - pub fn from_start_keys(start_keys: KeyPerTable) -> Self { - Self { - $($name: WriteTo { - start_key: start_keys.$name, - values: Vec::new(), - }),* - } - } - - /// Apply changes to the registry db - pub fn apply_to_registry(&self, reg: &mut R) { - $( - reg.batch_write(self.$name.start_key, self.$name.values.clone()); - )* - } - } - - $( - impl access::AccessRef> for ChangesPerTable { - fn get(&self) -> &WriteTo { - &self.$name - } - } - impl access::AccessMut> for ChangesPerTable { - fn get_mut(&mut self) -> &mut WriteTo { - &mut self.$name - } - } - )* - }; -} - -tables!( - AssetId: [u8; 32], - Address: [u8; 32], - ScriptCode: Vec, - Witness: Vec, -); - -#[cfg(test)] -mod tests { - use fuel_core_types::fuel_types::AssetId; - use tests::key::RawKey; - - use super::*; - - #[test] - fn test_in_memory_db() { - let mut reg = in_memory::InMemoryRegistry::default(); - - // Empty - assert_eq!( - reg.read(Key::::try_from(100).unwrap()), - [0; 32] - ); - assert_eq!( - reg.index_lookup(&*AssetId::from([1; 32])), - None::> - ); - - // Write - reg.batch_write( - Key::::from_raw(RawKey::try_from(100u32).unwrap()), - vec![[1; 32], [2; 32]], - ); - assert_eq!( - reg.read(Key::::try_from(100).unwrap()), - [1; 32] - ); - assert_eq!( - reg.read(Key::::try_from(101).unwrap()), - [2; 32] - ); - assert_eq!( - reg.read(Key::::try_from(102).unwrap()), - [0; 32] - ); - - // Overwrite - reg.batch_write( - Key::::from_raw(RawKey::try_from(99u32).unwrap()), - vec![[10; 32], [11; 32]], - ); - assert_eq!( - reg.read(Key::::try_from(99).unwrap()), - [10; 32] - ); - assert_eq!( - reg.read(Key::::try_from(100).unwrap()), - [11; 32] - ); - - // Wrapping - reg.batch_write( - Key::::from_raw(RawKey::MAX_WRITABLE), - vec![[3; 32], [4; 32]], - ); - - assert_eq!( - reg.read(Key::::from_raw(RawKey::MAX_WRITABLE)), - [3; 32] - ); - - assert_eq!( - reg.read(Key::::from_raw(RawKey::ZERO)), - [4; 32] - ); - - assert_eq!( - reg.index_lookup(&*AssetId::from([3; 32])), - Some(Key::::from_raw(RawKey::MAX_WRITABLE)) - ); - - assert_eq!( - reg.index_lookup(&*AssetId::from([4; 32])), - Some(Key::::from_raw(RawKey::ZERO)) - ); - } -} diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 2924c1390c1..ecd2e5bdcb7 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -8,6 +8,9 @@ #![deny(missing_docs)] #![deny(warnings)] +#[doc(no_inline)] +#[cfg(feature = "da-compression")] +pub use fuel_vm_private::fuel_compression; #[doc(no_inline)] pub use fuel_vm_private::{ fuel_asm, From fae559be84b072707e9d63e4b3f45a22818d9456 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 12 Feb 2024 19:34:17 +0200 Subject: [PATCH 020/112] Work towards properly architected (de)compression services --- Cargo.lock | 36 ++--- Cargo.toml | 2 +- crates/compression/Cargo.toml | 8 + crates/compression/src/db.rs | 151 ++++++++++++++++++ crates/compression/src/lib.rs | 52 +++--- crates/compression/src/ports.rs | 15 ++ crates/compression/src/services/compress.rs | 88 ++++++++++ crates/compression/src/services/decompress.rs | 108 +++++++++++++ .../src/graphql_api/worker_service.rs | 1 - crates/fuel-core/src/schema/tx/input.rs | 1 - crates/fuel-core/src/schema/tx/output.rs | 1 - crates/fuel-core/src/schema/tx/types.rs | 6 - crates/services/executor/src/executor.rs | 2 - .../txpool/src/containers/dependency.rs | 7 - 14 files changed, 415 insertions(+), 63 deletions(-) create mode 100644 crates/compression/src/db.rs create mode 100644 crates/compression/src/ports.rs create mode 100644 crates/compression/src/services/compress.rs create mode 100644 crates/compression/src/services/decompress.rs diff --git a/Cargo.lock b/Cargo.lock index ff92c7e0078..2a9fe190e29 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -747,15 +747,6 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - [[package]] name = "bindgen" version = "0.65.1" @@ -2600,7 +2591,7 @@ dependencies = [ [[package]] name = "fuel-asm" -version = "0.44.0" +version = "0.45.0" dependencies = [ "bitflags 2.4.2", "fuel-types", @@ -2610,11 +2601,10 @@ dependencies = [ [[package]] name = "fuel-compression" -version = "0.44.0" +version = "0.45.0" dependencies = [ - "bincode", + "anyhow", "fuel-derive", - "paste", "postcard", "serde", "serde-big-array", @@ -2788,10 +2778,14 @@ dependencies = [ name = "fuel-core-compression" version = "0.22.0" dependencies = [ + "anyhow", + "async-trait", "fuel-core-types", "fuel-vm", "postcard", + "rocksdb", "serde", + "tokio", ] [[package]] @@ -3154,7 +3148,7 @@ dependencies = [ [[package]] name = "fuel-crypto" -version = "0.44.0" +version = "0.45.0" dependencies = [ "coins-bip32", "coins-bip39", @@ -3173,7 +3167,7 @@ dependencies = [ [[package]] name = "fuel-derive" -version = "0.44.0" +version = "0.45.0" dependencies = [ "proc-macro2", "quote", @@ -3184,7 +3178,7 @@ dependencies = [ [[package]] name = "fuel-merkle" -version = "0.44.0" +version = "0.45.0" dependencies = [ "derive_more", "digest 0.10.7", @@ -3197,12 +3191,13 @@ dependencies = [ [[package]] name = "fuel-storage" -version = "0.44.0" +version = "0.45.0" [[package]] name = "fuel-tx" -version = "0.44.0" +version = "0.45.0" dependencies = [ + "anyhow", "bitflags 2.4.2", "derivative", "derive_more", @@ -3222,8 +3217,9 @@ dependencies = [ [[package]] name = "fuel-types" -version = "0.44.0" +version = "0.45.0" dependencies = [ + "anyhow", "fuel-compression", "fuel-derive", "hex", @@ -3233,7 +3229,7 @@ dependencies = [ [[package]] name = "fuel-vm" -version = "0.44.0" +version = "0.45.0" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 6356ce53475..18463530d39 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,7 +79,7 @@ fuel-core-tests = { version = "0.0.0", path = "./tests" } fuel-core-xtask = { version = "0.0.0", path = "./xtask" } # Fuel dependencies -fuel-vm-private = { path = "../fuel-vm/fuel-vm", version = "0.44.0", package = "fuel-vm", default-features = false } +fuel-vm-private = { path = "../fuel-vm/fuel-vm", version = "0.45.0", package = "fuel-vm", default-features = false } # Common dependencies anyhow = "1.0" diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 43db27f0adf..e514f181368 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -11,10 +11,18 @@ repository = { workspace = true } description = "Compression and decompression of Fuel blocks for DA storage." [dependencies] +anyhow = { workspace = true } +async-trait = { workspace = true } fuel-core-types = { workspace = true, features = ["serde", "da-compression"] } postcard = { version = "1.0", features = ["use-std"] } +rocksdb = { version = "0.21", default-features = false, optional = true } serde = { version = "1.0", features = ["derive"] } +tokio = { workspace = true, features = ["sync"] } [dev-dependencies] fuel-vm-private = { workspace = true, features = ["test-helpers"] } fuel-core-types = { workspace = true, features = ["test-helpers"] } + +[features] +default = ["rocksdb"] +rocksdb = ["dep:rocksdb"] diff --git a/crates/compression/src/db.rs b/crates/compression/src/db.rs new file mode 100644 index 00000000000..7c4de13ea7b --- /dev/null +++ b/crates/compression/src/db.rs @@ -0,0 +1,151 @@ +use std::path::Path; + +use fuel_core_types::{ + fuel_compression::{ + Key, + RegistryDb, + Table, + }, + fuel_types::BlockHeight, +}; +use rocksdb::WriteBatchWithTransaction; + +/// Access temporal registry state +pub trait TemporalRegistry: RegistryDb { + /// The temporal database is only valid for the block on this height. + fn next_block_height(&self) -> anyhow::Result; +} + +pub struct RocksDb { + db: rocksdb::DB, +} + +impl RocksDb { + pub fn open>(path: P) -> anyhow::Result { + use rocksdb::{ + ColumnFamilyDescriptor, + Options, + DB, + }; + + let mut db_opts = Options::default(); + db_opts.create_missing_column_families(true); + db_opts.create_if_missing(true); + Ok(Self { + db: DB::open_cf_descriptors( + &db_opts, + path, + vec![ + // Meta table holding misc + ColumnFamilyDescriptor::new("meta", Options::default()), + // Next temporal registry key for each table + ColumnFamilyDescriptor::new("next_keys", Options::default()), + // Temporal registry key:value pairs, with key as + // null-separated (table, key) pair + ColumnFamilyDescriptor::new("temporal", Options::default()), + // Reverse index into temporal registry values, with key as + // null-separated (table, indexed_value) pair + ColumnFamilyDescriptor::new("index", Options::default()), + ], + )?, + }) + } +} + +impl RegistryDb for RocksDb { + fn next_key(&self) -> anyhow::Result> { + let cf_next_keys = self.db.cf_handle("next_keys").unwrap(); + let Some(bytes) = self.db.get_cf(&cf_next_keys, T::NAME)? else { + return Ok(Key::ZERO); + }; + Ok(postcard::from_bytes(&bytes).expect("Invalid key")) + } + + fn read(&self, key: Key) -> anyhow::Result { + assert_ne!(key, Key::DEFAULT_VALUE); + + let mut k = [0u8; 3]; + postcard::to_slice(&key, &mut k).expect("Always fits"); + let cf = self.db.cf_handle(T::NAME).unwrap(); + let Some(bytes) = self.db.get_cf(&cf, &k)? else { + return Ok(T::Type::default()); + }; + Ok(postcard::from_bytes(&bytes).expect("Invalid value")) + } + + fn batch_write( + &mut self, + start_key: Key, + values: Vec, + ) -> anyhow::Result<()> { + let mut key = start_key; + + let mut batch = WriteBatchWithTransaction::::default(); + + let cf_registry = self.db.cf_handle("temporal").unwrap(); + let cf_index = self.db.cf_handle("index").unwrap(); + + let empty = values.is_empty(); + for value in values.into_iter() { + let bare_key = postcard::to_stdvec(&key).expect("Never fails"); + let v = postcard::to_stdvec(&value).expect("Never fails"); + + let mut table_suffix: Vec = T::NAME.bytes().collect(); + table_suffix.push(0); + + // Write new value + let k: Vec = table_suffix + .iter() + .chain(bare_key.iter()) + .copied() + .collect(); + batch.put_cf(&cf_registry, k.clone(), v.clone()); + + // Remove the overwritten value from index, if any + if let Some(old) = self.db.get_cf(&cf_registry, k)? { + let iv: Vec = table_suffix.clone().into_iter().chain(old).collect(); + batch.delete_cf(&cf_index, iv); + } + + // Add it to the index + let iv: Vec = table_suffix.into_iter().chain(v).collect(); + batch.put_cf(&cf_index, iv, bare_key); + + key = key.next(); + } + self.db.write(batch)?; + + if !empty { + let key = postcard::to_stdvec(&key).expect("Never fails"); + let cf_next_keys = self.db.cf_handle("next_keys").unwrap(); + self.db.put_cf(&cf_next_keys, T::NAME, key)?; + } + + Ok(()) + } + + fn index_lookup(&self, value: &T::Type) -> anyhow::Result>> { + let cf_index = self.db.cf_handle("index").unwrap(); + let val = postcard::to_stdvec(&value).expect("Never fails"); + let mut key: Vec = T::NAME.bytes().collect(); + key.push(0); + key.extend(val); + let Some(k) = self.db.get_cf(&cf_index, key)? else { + return Ok(None); + }; + Ok(Some(postcard::from_bytes(&k).expect("Never fails"))) + } +} + +impl TemporalRegistry for RocksDb { + fn next_block_height(&self) -> anyhow::Result { + let cf_meta = self.db.cf_handle("meta").unwrap(); + let Some(bytes) = self.db.get_cf(&cf_meta, b"current_block")? else { + return Ok(BlockHeight::default()); + }; + debug_assert!(bytes.len() == 4); + let mut buffer = [0u8; 4]; + buffer.copy_from_slice(&bytes[..]); + Ok(BlockHeight::from(buffer)) + } +} diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 71ad82da02d..2a1d1f3461b 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -1,3 +1,10 @@ +pub mod db; +mod ports; +mod services { + mod compress; + mod decompress; +} + use serde::{ Deserialize, Serialize, @@ -18,7 +25,7 @@ use fuel_core_types::{ }; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct Header { +struct Header { pub da_height: DaBlockHeight, pub prev_root: Bytes32, pub height: BlockHeight, @@ -26,20 +33,15 @@ pub struct Header { } /// Compressed block. -/// The versioning here working depends on the serialization format, -/// but as long as we we have less than 128 variants, postcard will -/// make that a single byte. #[derive(Clone, Serialize, Deserialize)] #[non_exhaustive] -pub enum CompressedBlock { - V0 { - /// Registration section of the compressed block - registrations: ChangesPerTable, - /// Compressed block header - header: Header, - /// Compressed transactions - transactions: Vec, - }, +struct CompressedBlock { + /// Registration section of the compressed block + registrations: ChangesPerTable, + /// Compressed block header + header: Header, + /// Compressed transactions + transactions: Vec, } #[cfg(test)] @@ -49,7 +51,6 @@ mod tests { use fuel_core_types::{ blockchain::primitives::DaBlockHeight, fuel_compression::{ - Compact, Compactable, CompactionContext, InMemoryRegistry, @@ -62,7 +63,7 @@ mod tests { #[test] fn postcard_roundtrip() { - let original = CompressedBlock::V0 { + let original = CompressedBlock { registrations: ChangesPerTable::from_start_keys(Default::default()), header: Header { da_height: DaBlockHeight::default(), @@ -76,7 +77,7 @@ mod tests { let compressed = postcard::to_allocvec(&original).unwrap(); let decompressed: CompressedBlock = postcard::from_bytes(&compressed).unwrap(); - let CompressedBlock::V0 { + let CompressedBlock { registrations, header, transactions, @@ -91,8 +92,8 @@ mod tests { fn compact_transaction() { let tx = Transaction::default_test_tx(); let mut registry = InMemoryRegistry::default(); - let (compacted, _) = CompactionContext::run(&mut registry, tx.clone()); - let decompacted = Transaction::decompact(compacted.clone(), ®istry); + let (compacted, _) = CompactionContext::run(&mut registry, tx.clone()).unwrap(); + let decompacted = Transaction::decompact(compacted.clone(), ®istry).unwrap(); assert_eq!(tx, decompacted); // Check size reduction @@ -105,8 +106,10 @@ mod tests { fn compact_transaction_twice_gives_equal_result() { let tx = Transaction::default_test_tx(); let mut registry = InMemoryRegistry::default(); - let (compacted1, changes1) = CompactionContext::run(&mut registry, tx.clone()); - let (compacted2, changes2) = CompactionContext::run(&mut registry, tx.clone()); + let (compacted1, changes1) = + CompactionContext::run(&mut registry, tx.clone()).unwrap(); + let (compacted2, changes2) = + CompactionContext::run(&mut registry, tx.clone()).unwrap(); assert!(!changes1.is_empty()); assert!(changes2.is_empty()); let compressed1 = postcard::to_allocvec(&compacted1).unwrap(); @@ -123,9 +126,10 @@ mod tests { let mut registry = InMemoryRegistry::default(); let (transactions, registrations) = - CompactionContext::run(&mut registry, vec![tx.clone(); i]); + CompactionContext::run(&mut registry, vec![tx.clone(); i]) + .expect("Compaction error"); - let original = CompressedBlock::V0 { + let original = CompressedBlock { registrations, header: Header { da_height: DaBlockHeight::default(), @@ -157,9 +161,9 @@ mod tests { let sizes: [usize; 3] = array::from_fn(|_| { let (transactions, registrations) = - CompactionContext::run(&mut registry, vec![tx.clone()]); + CompactionContext::run(&mut registry, vec![tx.clone()]).unwrap(); - let original = CompressedBlock::V0 { + let original = CompressedBlock { registrations, header: Header { da_height: DaBlockHeight::default(), diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs new file mode 100644 index 00000000000..38f270fea2b --- /dev/null +++ b/crates/compression/src/ports.rs @@ -0,0 +1,15 @@ +//! Ports this services requires to function. + +use fuel_core_types::blockchain::block::Block; + +#[async_trait::async_trait] +pub trait CompressPort { + /// Compress the next block. + async fn compress_next(&mut self, block: Block) -> anyhow::Result>; +} + +#[async_trait::async_trait] +pub trait DecompressPort { + /// Decompress the next block. + async fn decompress_next(&mut self, block: Vec) -> anyhow::Result; +} diff --git a/crates/compression/src/services/compress.rs b/crates/compression/src/services/compress.rs new file mode 100644 index 00000000000..01a5f0ce8d7 --- /dev/null +++ b/crates/compression/src/services/compress.rs @@ -0,0 +1,88 @@ +use tokio::sync::mpsc; + +use fuel_core_types::blockchain::block::Block; + +use crate::{ + db::TemporalRegistry, + CompressedBlock, + Header, +}; + +use fuel_core_types::{ + blockchain::primitives::DaBlockHeight, + fuel_compression::{ + Compactable, + CompactionContext, + InMemoryRegistry, + RegistryDb, + }, + fuel_tx::Transaction, + tai64::Tai64, +}; + +/// Task handle +pub struct Task { + request_receiver: mpsc::Receiver, +} + +pub enum TaskRequest { + Compress { + block: Block, + response: mpsc::Sender, CompressError>>, + }, +} + +pub enum CompressError { + /// Only the next sequential block can be compressed + NotLatest, + Other(anyhow::Error), +} +impl From for CompressError { + fn from(err: anyhow::Error) -> Self { + Self::Other(err) + } +} + +async fn run( + mut db: R, + mut request_receiver: mpsc::Receiver, +) { + while let Some(req) = request_receiver.recv().await { + match req { + TaskRequest::Compress { block, response } => { + let reply = compress(&mut db, block); + response.send(reply).await.expect("Failed to respond"); + } + } + } +} + +fn compress( + db: &mut R, + block: Block, +) -> Result, CompressError> { + if *block.header().height() != db.next_block_height()? { + return Err(CompressError::NotLatest); + } + + let (transactions, registrations) = + CompactionContext::run(db, block.transactions().to_vec())?; + + let compact = CompressedBlock { + registrations, + header: Header { + da_height: block.header().da_height, + prev_root: *block.header().prev_root(), + height: *block.header().height(), + time: block.header().time(), + }, + transactions, + }; + + let version = 0u8; + + let compressed = + postcard::to_allocvec(&(version, compact)).expect("Serialization cannot fail"); + + Ok(compressed) +} diff --git a/crates/compression/src/services/decompress.rs b/crates/compression/src/services/decompress.rs new file mode 100644 index 00000000000..b2d01bee3af --- /dev/null +++ b/crates/compression/src/services/decompress.rs @@ -0,0 +1,108 @@ +use tokio::sync::mpsc; + +use fuel_core_types::{ + blockchain::{ + block::{ + Block, + BlockV1, + PartialFuelBlock, + }, + header::{ + ApplicationHeader, + ConsensusHeader, + PartialBlockHeader, + }, + primitives::Empty, + }, + fuel_compression::Compactable, + fuel_tx::Transaction, +}; + +use crate::{ + db::TemporalRegistry, + CompressedBlock, +}; + +/// Task handle +pub struct Task { + request_receiver: mpsc::Receiver, +} + +pub enum TaskRequest { + Decompress { + block: Vec, + response: mpsc::Sender>, + }, +} + +pub enum DecompressError { + /// Only the next sequential block can be decompressed + NotLatest, + /// Unknown compression version + UnknownVersion, + /// Deserialization error + Postcard(postcard::Error), + /// Other errors + Other(anyhow::Error), +} +impl From for DecompressError { + fn from(err: postcard::Error) -> Self { + Self::Postcard(err) + } +} +impl From for DecompressError { + fn from(err: anyhow::Error) -> Self { + Self::Other(err) + } +} + +async fn run( + mut db: R, + mut request_receiver: mpsc::Receiver, +) { + while let Some(req) = request_receiver.recv().await { + match req { + TaskRequest::Decompress { block, response } => { + let reply = decompress(&mut db, block); + response.send(reply).await.expect("Failed to respond"); + } + } + } +} + +fn decompress( + db: &mut R, + block: Vec, +) -> Result { + if block.is_empty() || block[0] != 0 { + return Err(DecompressError::UnknownVersion); + } + + let compressed: CompressedBlock = postcard::from_bytes(&block[1..])?; + + // TODO: should be store height on da just to have this check? + // if *block.header.height != db.next_block_height()? { + // return Err(DecompressError::NotLatest); + // } + + let mut transactions = Vec::new(); + for tx in compressed.transactions.into_iter() { + transactions.push(Transaction::decompact(tx, db)?); + } + + Ok(PartialFuelBlock { + header: PartialBlockHeader { + application: ApplicationHeader { + da_height: compressed.header.da_height, + generated: Empty, + }, + consensus: ConsensusHeader { + prev_root: compressed.header.prev_root, + height: compressed.header.height, + time: compressed.header.time, + generated: Empty, + }, + }, + transactions, + }) +} diff --git a/crates/fuel-core/src/graphql_api/worker_service.rs b/crates/fuel-core/src/graphql_api/worker_service.rs index 600e489708d..142545e5d19 100644 --- a/crates/fuel-core/src/graphql_api/worker_service.rs +++ b/crates/fuel-core/src/graphql_api/worker_service.rs @@ -143,7 +143,6 @@ where owners.push(to); } Output::Contract(_) | Output::ContractCreated { .. } => {} - _ => {} } } diff --git a/crates/fuel-core/src/schema/tx/input.rs b/crates/fuel-core/src/schema/tx/input.rs index 1fef1956b27..fe037180273 100644 --- a/crates/fuel-core/src/schema/tx/input.rs +++ b/crates/fuel-core/src/schema/tx/input.rs @@ -298,7 +298,6 @@ impl From<&fuel_tx::Input> for Input { predicate: HexString(predicate.clone()), predicate_data: HexString(predicate_data.clone()), }), - input => todo!("No mapping for input {input:?}"), } } } diff --git a/crates/fuel-core/src/schema/tx/output.rs b/crates/fuel-core/src/schema/tx/output.rs index 59dd388bc4c..3bf336c1a18 100644 --- a/crates/fuel-core/src/schema/tx/output.rs +++ b/crates/fuel-core/src/schema/tx/output.rs @@ -161,7 +161,6 @@ impl TryFrom<&fuel_tx::Output> for Output { contract_id: *contract_id, state_root: *state_root, }), - _ => return Err(format!("Unsupported output type: {:?}", output)), }; Ok(val) } diff --git a/crates/fuel-core/src/schema/tx/types.rs b/crates/fuel-core/src/schema/tx/types.rs index 02994d1503a..1b7cd5b5e4d 100644 --- a/crates/fuel-core/src/schema/tx/types.rs +++ b/crates/fuel-core/src/schema/tx/types.rs @@ -382,7 +382,6 @@ impl Transaction { fuel_tx::Transaction::Mint(mint) => { Some(vec![Contract(mint.input_contract().contract_id)]) } - _ => None, } } @@ -390,7 +389,6 @@ impl Transaction { match &self.0 { fuel_tx::Transaction::Script(_) | fuel_tx::Transaction::Create(_) => None, fuel_tx::Transaction::Mint(mint) => Some(mint.input_contract().into()), - _ => None, } } @@ -399,7 +397,6 @@ impl Transaction { fuel_tx::Transaction::Script(script) => Some((*script.policies()).into()), fuel_tx::Transaction::Create(create) => Some((*create.policies()).into()), fuel_tx::Transaction::Mint(_) => None, - _ => None, } } @@ -408,7 +405,6 @@ impl Transaction { fuel_tx::Transaction::Script(script) => Some(script.price().into()), fuel_tx::Transaction::Create(create) => Some(create.price().into()), fuel_tx::Transaction::Mint(_) => None, - _ => None, } } @@ -427,7 +423,6 @@ impl Transaction { fuel_tx::Transaction::Script(script) => Some(script.maturity().into()), fuel_tx::Transaction::Create(create) => Some(create.maturity().into()), fuel_tx::Transaction::Mint(_) => None, - _ => None, } } @@ -519,7 +514,6 @@ impl Transaction { .collect(), ), fuel_tx::Transaction::Mint(_) => None, - _ => None, } } diff --git a/crates/services/executor/src/executor.rs b/crates/services/executor/src/executor.rs index ea9c7ab07c0..75851e4a760 100644 --- a/crates/services/executor/src/executor.rs +++ b/crates/services/executor/src/executor.rs @@ -1189,7 +1189,6 @@ where ) } } - _ => return Err(ExecutorError::UnknownTransactionType("Output")), } } @@ -1600,7 +1599,6 @@ where }, )?; } - _ => return Err(ExecutorError::UnknownTransactionType("Output")), } } Ok(()) diff --git a/crates/services/txpool/src/containers/dependency.rs b/crates/services/txpool/src/containers/dependency.rs index ec1efb4ea84..cdd8959899b 100644 --- a/crates/services/txpool/src/containers/dependency.rs +++ b/crates/services/txpool/src/containers/dependency.rs @@ -156,7 +156,6 @@ impl Dependency { | Input::MessageDataPredicate(_) => { // Message inputs do not depend on any other fuel transactions } - _ => {} } } } @@ -239,7 +238,6 @@ impl Dependency { Output::ContractCreated { .. } => { return Err(Error::NotInsertedIoContractOutput.into()) } - _ => todo!("Unsupported output type"), }; } else { return Err(anyhow!("Use it only for coin output check")) @@ -440,7 +438,6 @@ impl Dependency { // yey we got our contract } - _ => todo!("Unsupported input type"), } } @@ -521,7 +518,6 @@ impl Dependency { | Input::MessageCoinPredicate(_) | Input::MessageDataSigned(_) | Input::MessageDataPredicate(_) => {} - _ => todo!("Unsupported input type"), } } @@ -569,7 +565,6 @@ impl Dependency { // do nothing, this contract is already already found in dependencies. // as it is tied with input and used_by is already inserted. } - _ => todo!("Unsupported output type"), }; } @@ -629,7 +624,6 @@ impl Dependency { } } } - _ => todo!("Unsupported output type"), }; } @@ -673,7 +667,6 @@ impl Dependency { | Input::MessageDataPredicate(MessageDataPredicate { nonce, .. }) => { self.messages.remove(nonce); } - _ => todo!("Unsupported input type"), } } From 0ce4650945a1e714ba7fc8e2393d9ac62013d748 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 23 Jul 2024 09:04:02 +0300 Subject: [PATCH 021/112] Fix compression and it's tests --- Cargo.lock | 59 ++-- Cargo.toml | 5 +- crates/.DS_Store | Bin 0 -> 6148 bytes crates/compression/.DS_Store | Bin 0 -> 6148 bytes crates/compression/Cargo.toml | 30 ++ crates/compression/src/.DS_Store | Bin 0 -> 6148 bytes crates/compression/src/block_section.rs | 257 ++++++++++++++++++ crates/compression/src/db.rs | 179 ++++++++++++ crates/compression/src/lib.rs | 148 ++++++++++ crates/compression/src/ports.rs | 15 + crates/compression/src/services/compress.rs | 198 ++++++++++++++ crates/compression/src/services/decompress.rs | 161 +++++++++++ crates/types/Cargo.toml | 1 + crates/types/src/lib.rs | 3 + 14 files changed, 1039 insertions(+), 17 deletions(-) create mode 100644 crates/.DS_Store create mode 100644 crates/compression/.DS_Store create mode 100644 crates/compression/Cargo.toml create mode 100644 crates/compression/src/.DS_Store create mode 100644 crates/compression/src/block_section.rs create mode 100644 crates/compression/src/db.rs create mode 100644 crates/compression/src/lib.rs create mode 100644 crates/compression/src/ports.rs create mode 100644 crates/compression/src/services/compress.rs create mode 100644 crates/compression/src/services/decompress.rs diff --git a/Cargo.lock b/Cargo.lock index 74af6d9bc55..d1ab50d2c79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2774,8 +2774,6 @@ dependencies = [ [[package]] name = "fuel-asm" version = "0.55.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "491f1777538b0e1d479609d0d75bca5242c7fd3394f2ddd4ea55b8c96bcc8387" dependencies = [ "bitflags 2.6.0", "fuel-types", @@ -2783,6 +2781,18 @@ dependencies = [ "strum 0.24.1", ] +[[package]] +name = "fuel-compression" +version = "0.55.0" +dependencies = [ + "anyhow", + "fuel-derive", + "paste", + "postcard", + "serde", + "serde-big-array", +] + [[package]] name = "fuel-core" version = "0.31.0" @@ -2970,6 +2980,22 @@ dependencies = [ "tokio", ] +[[package]] +name = "fuel-core-compression" +version = "0.31.0" +dependencies = [ + "anyhow", + "async-trait", + "bincode", + "fuel-core-compression", + "fuel-core-types", + "postcard", + "rocksdb", + "serde", + "tempfile", + "tokio", +] + [[package]] name = "fuel-core-consensus-module" version = "0.31.0" @@ -3396,8 +3422,6 @@ dependencies = [ [[package]] name = "fuel-crypto" version = "0.55.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f74f03ba9b27f375a0482b1afe20d5b8cfd032fedba683a584cdbd6d10147439" dependencies = [ "coins-bip32", "coins-bip39", @@ -3417,11 +3441,10 @@ dependencies = [ [[package]] name = "fuel-derive" version = "0.55.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ad30ad1a11e5a811ae67b6b0cb6785ce21bcd5ef0afd442fd963d5be95d09d" dependencies = [ "proc-macro2", "quote", + "regex", "syn 2.0.71", "synstructure", ] @@ -3437,8 +3460,6 @@ dependencies = [ [[package]] name = "fuel-merkle" version = "0.55.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5433c41ffbf531eed1380148cd68e37f9dd7e25966a9c59518f6b09e346e80e2" dependencies = [ "derive_more", "digest 0.10.7", @@ -3452,19 +3473,17 @@ dependencies = [ [[package]] name = "fuel-storage" version = "0.55.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce3fc3cd96fe312442cdf35966b96d66becd02582b505f856f74953f57adf020" [[package]] name = "fuel-tx" version = "0.55.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e00cc42ae3121b1881a6ae8306696d1bea73adca424216d9f676ee91d3927c74" dependencies = [ + "anyhow", "bitflags 2.6.0", "derivative", "derive_more", "fuel-asm", + "fuel-compression", "fuel-crypto", "fuel-merkle", "fuel-types", @@ -3481,9 +3500,9 @@ dependencies = [ [[package]] name = "fuel-types" version = "0.55.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae98e143dec4e6cb114a92435e314f1d4815e17e8fded24332fb285319d60167" dependencies = [ + "anyhow", + "fuel-compression", "fuel-derive", "hex", "rand", @@ -3493,8 +3512,6 @@ dependencies = [ [[package]] name = "fuel-vm" version = "0.55.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "641a2ee5a3398633fa243fba3343cbe2225ae335a09141f6b94041720cfc3520" dependencies = [ "anyhow", "async-trait", @@ -3504,6 +3521,7 @@ dependencies = [ "derive_more", "ethnum", "fuel-asm", + "fuel-compression", "fuel-crypto", "fuel-merkle", "fuel-storage", @@ -7478,6 +7496,15 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-big-array" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" +dependencies = [ + "serde", +] + [[package]] name = "serde_derive" version = "1.0.204" diff --git a/Cargo.toml b/Cargo.toml index c19705bc988..96c32c195e6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,7 @@ members = [ "bin/fuel-core", "bin/fuel-core-client", "bin/keygen", + "crates/compression", "crates/chain-config", "crates/client", "crates/database", @@ -61,6 +62,7 @@ fuel-core-keygen = { version = "0.31.0", path = "./crates/keygen" } fuel-core-keygen-bin = { version = "0.31.0", path = "./bin/keygen" } fuel-core-chain-config = { version = "0.31.0", path = "./crates/chain-config", default-features = false } fuel-core-client = { version = "0.31.0", path = "./crates/client" } +fuel-core-compression = { version = "0.31.0", path = "./crates/compression" } fuel-core-database = { version = "0.31.0", path = "./crates/database" } fuel-core-metrics = { version = "0.31.0", path = "./crates/metrics" } fuel-core-services = { version = "0.31.0", path = "./crates/services" } @@ -85,7 +87,8 @@ fuel-core-xtask = { version = "0.0.0", path = "./xtask" } fuel-gas-price-algorithm = { version = "0.31.0", path = "crates/fuel-gas-price-algorithm" } # Fuel dependencies -fuel-vm-private = { version = "0.55.0", package = "fuel-vm", default-features = false } +fuel-vm-private = { path = "../fuel-vm/fuel-vm", package = "fuel-vm", default-features = false } +# fuel-vm-private = { version = "0.55.0", package = "fuel-vm", default-features = false } # Common dependencies anyhow = "1.0" diff --git a/crates/.DS_Store b/crates/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..3c7ea968b1301b2beaf2b2d2c841a790cbffb6e8 GIT binary patch literal 6148 zcmeHKJ8Hu~5S>X}2;8`IxmU;y7NMNL7ckf%7#!KiB(*A^D@V(l4*?;kaN#Dr5i@Uh zG;f7oq0xwlwx9bKk(G!va6`FTn48@e~t9E@g-M4>?cx z{nuvbkEi}UPQOiOmI_b-DnJFO02R1X0qec6W*Nvx1*iZO_*KBZ4+U;m6URXRbRhT$ z09+vLhPBTUz+wqtO&kM}foV{ILDd{FH0a2etgDG*V9-T#_|SZ^=7ggDbevzjT(kx< zQUNM3Rp2?cE9?KS@IU7NDTzBOKn4Dl0=nonU5i)B-a30Z>$L^GgWNyd0`h(h%tVH*;V z(1UO;njL?U0h+rtXn+TO=;F*x=kGL>_kJjSY^TL>hU$DOK?@)YF=pIZQFU(~2T;5t^aZg2vxXqYiSyZtDXQ5QXv=t&eKGr$Zm1IxjH*}bgda{Mm; zB{RSbERX@(A8b@Y$6%^aZ5>#sCjcT{BbA^{*G0;)4LSx>jo5-BbSk1w73PT{bUONN z6XzIAHR^N_=J6rSmxXzu2=#UB-&Wxu9F1Hu1I)lI16f@y(fNP!`}==3iEGRNGw`n% z5V@w?tmBZ(**Z5kI%{pzJ5&;iOEpeY&`?J)#?nz-Mpc4-n+!z9V5$*4D0~r6G;qNT HER=ySPTOGu literal 0 HcmV?d00001 diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml new file mode 100644 index 00000000000..23ca8400227 --- /dev/null +++ b/crates/compression/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "fuel-core-compression" +version = { workspace = true } +authors = { workspace = true } +categories = ["cryptography::cryptocurrencies"] +edition = { workspace = true } +homepage = { workspace = true } +keywords = ["blockchain", "cryptocurrencies", "fuel-core", "fuel-client", "fuel-compression"] +license = { workspace = true } +repository = { workspace = true } +description = "Compression and decompression of Fuel blocks for DA storage." + +[dependencies] +anyhow = { workspace = true } +async-trait = { workspace = true } +fuel-core-types = { workspace = true, features = ["serde", "da-compression"] } +postcard = { version = "1.0", features = ["use-std"] } +rocksdb = { version = "0.21", default-features = false, optional = true } +serde = { version = "1.0", features = ["derive"] } +tokio = { workspace = true, features = ["sync"] } + +[dev-dependencies] +bincode = { version = "1.3" } +fuel-core-compression = { workspace = true, features = ["test-helpers"] } +tempfile = "3" + +[features] +default = ["rocksdb"] +rocksdb = ["dep:rocksdb"] +test-helpers = ["fuel-core-types/test-helpers", "fuel-core-types/random", "fuel-core-types/std"] \ No newline at end of file diff --git a/crates/compression/src/.DS_Store b/crates/compression/src/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..d64f70fb0dec3708643118c69a3b614542d1c44f GIT binary patch literal 6148 zcmeHKF;2uV5S)cO6p1D!rC)%=52DjiP*71(fs&&*ak`)csPhh)XKl}}oskk}0m$NEbpn(C3N}SiCu2IDUD`|JIZX4I12M68*+0J$s27 z8?-$2*YmGAv&Atw^k)N>ivJpS0-EJk3)Rq3b+D)OaYlKFO~~FRJvPt9#8Mu vz;?~1ro2HN8vW5v00;UVIf{$69@HnVN;nv57M+jbME? { + /// The values are inserted starting from this key + pub start_key: Key, + /// Values, inserted using incrementing ids starting from `start_key` + pub values: Vec, +} + +impl fmt::Debug for WriteTo +where + T::Type: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.values.is_empty() { + return f.write_str("WriteTo::EMPTY"); + } + + f.debug_struct("WriteTo") + .field("start_key", &self.start_key) + .field("values", &self.values) + .finish() + } +} + +impl WriteTo +where + T::Type: PartialEq, +{ + /// Reverse lookup. + /// TODO: possibly add a lookup table for this, if deemed necessary + pub fn lookup_value(&self, needle: &T::Type) -> Option> { + if *needle == T::Type::default() { + return Some(Key::DEFAULT_VALUE); + } + + let mut key = self.start_key; + for v in &self.values { + if v == needle { + return Some(key); + } + key = key.next(); + } + None + } +} + +/// Custom serialization is used to omit the start_key when the sequence is empty +impl Serialize for WriteTo +where + T: Table + Serialize, +{ + fn serialize(&self, serializer: S) -> Result { + let mut tup = serializer.serialize_tuple(2)?; + tup.serialize_element(&self.values)?; + if self.values.is_empty() { + tup.serialize_element(&())?; + } else { + tup.serialize_element(&self.start_key)?; + } + tup.end() + } +} + +impl<'de, T: Table> Deserialize<'de> for WriteTo +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_tuple( + 2, + Self { + start_key: Key::ZERO, + values: Vec::new(), + }, + ) + } +} + +impl<'de, T: Table + Deserialize<'de>> serde::de::Visitor<'de> for WriteTo { + type Value = WriteTo; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(concat!("WriteTo<", stringify!(T), "> instance")) + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + let values: Vec = seq.next_element()?.ok_or( + serde::de::Error::invalid_length(0, &"WriteTo<_> with 2 elements"), + )?; + + if values.is_empty() { + seq.next_element()?.ok_or(serde::de::Error::invalid_length( + 1, + &"WriteTo<_> with 2 elements", + ))?; + Ok(WriteTo { + start_key: Key::ZERO, + values, + }) + } else { + let start_key: Key = seq.next_element()?.ok_or( + serde::de::Error::invalid_length(1, &"WriteTo<_> with 2 elements"), + )?; + Ok(WriteTo { start_key, values }) + } + } +} + +/// Registeration section of the compressed block +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Registrations { + /// Merkle root of the registeration table merkle roots + pub tables_root: [u8; 32], + /// Changes per table + pub changes: ChangesPerTable, +} + +macro_rules! tables { + ($($name:ident),*$(,)?) => { + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] + #[allow(non_snake_case)] // Match table/type names exactly + pub struct ChangesPerTable { + $(pub $name: WriteTo,)* + } + + impl ChangesPerTable { + pub fn from_start_keys(start_keys: KeyPerTable) -> Self { + Self { + $($name: WriteTo { + start_key: start_keys.$name, + values: Vec::new(), + }),* + } + } + + pub fn is_empty(&self) -> bool { + $(self.$name.values.is_empty() &&)* true + } + + + /// Apply changes to the db + pub fn write_to_db(&self, reg: &mut crate::db::RocksDb) -> anyhow::Result<()> { + $( + reg.batch_write(self.$name.start_key, self.$name.values.clone())?; + )* + Ok(()) + } + } + + $( + impl access::AccessRef> for ChangesPerTable { + fn get(&self) -> &WriteTo { + &self.$name + } + } + impl access::AccessMut> for ChangesPerTable { + fn get_mut(&mut self) -> &mut WriteTo { + &mut self.$name + } + } + )* + }; +} + +tables!(AssetId, Address, ContractId, ScriptCode, Witness); + +#[cfg(test)] +mod tests { + use super::*; + use bincode::Options; + use fuel_core_types::{ + fuel_asm::op, + fuel_tx::{ + AssetId, + ContractId, + }, + fuel_types::Address, + }; + + #[test] + fn test_tables() { + let original = Registrations { + tables_root: Default::default(), + changes: ChangesPerTable { + AssetId: WriteTo { + start_key: Key::try_from(100).unwrap(), + values: vec![*AssetId::from([0xa0; 32]), *AssetId::from([0xa1; 32])], + }, + Address: WriteTo { + start_key: Key::ZERO, + values: vec![*Address::from([0xb0; 32])], + }, + ContractId: WriteTo { + start_key: Key::ZERO, + values: vec![*ContractId::from([0xc0; 32])], + }, + ScriptCode: WriteTo { + start_key: Key::ZERO, + values: vec![ + vec![op::addi(0x20, 0x20, 1), op::ret(0)] + .into_iter() + .collect(), + vec![op::muli(0x20, 0x20, 5), op::ret(1)] + .into_iter() + .collect(), + ], + }, + Witness: WriteTo { + start_key: Key::ZERO, + values: vec![], + }, + }, + }; + + let pc_compressed = postcard::to_stdvec(&original).unwrap(); + let pc_decompressed: Registrations = + postcard::from_bytes(&pc_compressed).unwrap(); + assert_eq!(original, pc_decompressed); + + let bc_opt = bincode::DefaultOptions::new().with_varint_encoding(); + + let bc_compressed = bc_opt.serialize(&original).unwrap(); + let bc_decompressed: Registrations = bc_opt.deserialize(&bc_compressed).unwrap(); + assert_eq!(original, bc_decompressed); + + println!("data: {original:?}"); + println!("postcard compressed size {}", pc_compressed.len()); + println!("bincode compressed size {}", bc_compressed.len()); + println!("postcard compressed: {:x?}", pc_compressed); + println!("bincode compressed: {:x?}", bc_compressed); + + // panic!("ok, just showing the results"); + } +} diff --git a/crates/compression/src/db.rs b/crates/compression/src/db.rs new file mode 100644 index 00000000000..3b913b0f8e7 --- /dev/null +++ b/crates/compression/src/db.rs @@ -0,0 +1,179 @@ +use std::path::Path; + +use fuel_core_types::{ + fuel_compression::{ + Key, + KeyPerTable, + RawKey, + Table, + TableName, + }, + fuel_types::BlockHeight, +}; +use rocksdb::WriteBatchWithTransaction; + +pub struct RocksDb { + db: rocksdb::DB, +} + +impl RocksDb { + pub fn open>(path: P) -> anyhow::Result { + use rocksdb::{ + ColumnFamilyDescriptor, + Options, + DB, + }; + + let mut db_opts = Options::default(); + db_opts.create_missing_column_families(true); + db_opts.create_if_missing(true); + Ok(Self { + db: DB::open_cf_descriptors( + &db_opts, + path, + vec![ + // Meta table holding misc + ColumnFamilyDescriptor::new("meta", Options::default()), + // Next temporal registry key for each table + ColumnFamilyDescriptor::new("next_keys", Options::default()), + // Temporal registry key:value pairs, with key as + // null-separated (table, key) pair + ColumnFamilyDescriptor::new("temporal", Options::default()), + // Reverse index into temporal registry values, with key as + // null-separated (table, indexed_value) pair + ColumnFamilyDescriptor::new("index", Options::default()), + ], + )?, + }) + } +} + +impl RocksDb { + fn next_key_raw(&self, table: TableName) -> anyhow::Result { + let cf_next_keys = self.db.cf_handle("next_keys").unwrap(); + let Some(bytes) = self.db.get_cf(&cf_next_keys, table)? else { + return Ok(RawKey::ZERO); + }; + Ok(postcard::from_bytes(&bytes).expect("Invalid key")) + } + + fn next_key(&self) -> anyhow::Result> { + Ok(Key::from_raw(self.next_key_raw(T::NAME)?)) + } + + pub fn start_keys(&self) -> anyhow::Result { + // TODO: allow KeyPerTable::from_fn to pass through errors + Ok(KeyPerTable::from_fn(|table_name| { + self.next_key_raw(table_name).expect("Db error") + })) + } + + fn read(&self, key: Key) -> anyhow::Result { + assert_ne!(key, Key::DEFAULT_VALUE); + + let mut k = [0u8; 3]; + postcard::to_slice(&key, &mut k).expect("Always fits"); + let cf = self.db.cf_handle(T::NAME).unwrap(); + let Some(bytes) = self.db.get_cf(&cf, &k)? else { + return Ok(T::Type::default()); + }; + Ok(postcard::from_bytes(&bytes).expect("Invalid value")) + } + + pub fn batch_write( + &mut self, + start_key: Key, + values: Vec, + ) -> anyhow::Result<()> { + let mut key = start_key; + + let mut batch = WriteBatchWithTransaction::::default(); + + let cf_registry = self.db.cf_handle("temporal").unwrap(); + let cf_index = self.db.cf_handle("index").unwrap(); + + let empty = values.is_empty(); + for value in values.into_iter() { + let bare_key = postcard::to_stdvec(&key).expect("Never fails"); + let v = postcard::to_stdvec(&value).expect("Never fails"); + + let mut table_suffix: Vec = T::NAME.bytes().collect(); + table_suffix.push(0); + + // Write new value + let k: Vec = table_suffix + .iter() + .chain(bare_key.iter()) + .copied() + .collect(); + batch.put_cf(&cf_registry, k.clone(), v.clone()); + + // Remove the overwritten value from index, if any + if let Some(old) = self.db.get_cf(&cf_registry, k)? { + let iv: Vec = table_suffix.clone().into_iter().chain(old).collect(); + batch.delete_cf(&cf_index, iv); + } + + // Add it to the index + let iv: Vec = table_suffix.into_iter().chain(v).collect(); + batch.put_cf(&cf_index, iv, bare_key); + + key = key.next(); + } + self.db.write(batch)?; + + if !empty { + let key = postcard::to_stdvec(&key).expect("Never fails"); + let cf_next_keys = self.db.cf_handle("next_keys").unwrap(); + self.db.put_cf(&cf_next_keys, T::NAME, key)?; + } + + Ok(()) + } + + pub fn index_lookup( + &self, + value: &T::Type, + ) -> anyhow::Result>> { + let cf_index = self.db.cf_handle("index").unwrap(); + let val = postcard::to_stdvec(&value).expect("Never fails"); + let mut key: Vec = T::NAME.bytes().collect(); + key.push(0); + key.extend(val); + let Some(k) = self.db.get_cf(&cf_index, key)? else { + return Ok(None); + }; + Ok(Some(postcard::from_bytes(&k).expect("Never fails"))) + } + + pub fn next_block_height(&self) -> anyhow::Result { + let cf_meta = self.db.cf_handle("meta").unwrap(); + let Some(bytes) = self.db.get_cf(&cf_meta, b"current_block")? else { + return Ok(BlockHeight::default()); + }; + debug_assert!(bytes.len() == 4); + let mut buffer = [0u8; 4]; + buffer.copy_from_slice(&bytes[..]); + Ok(BlockHeight::from(buffer)) + } + + pub fn increment_block_height(&self) -> anyhow::Result<()> { + // TODO: potential TOCTOU bug here + let cf_meta = self.db.cf_handle("meta").unwrap(); + let old_bh = match self.db.get_cf(&cf_meta, b"current_block")? { + Some(bytes) => { + debug_assert!(bytes.len() == 4); + let mut buffer = [0u8; 4]; + buffer.copy_from_slice(&bytes[..]); + BlockHeight::from(buffer) + } + None => BlockHeight::default(), + }; + let new_bh = old_bh + .succ() + .ok_or_else(|| anyhow::anyhow!("Block height overflow"))?; + self.db + .put_cf(&cf_meta, b"current_block", new_bh.to_bytes())?; + Ok(()) + } +} diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs new file mode 100644 index 00000000000..6510992a1a5 --- /dev/null +++ b/crates/compression/src/lib.rs @@ -0,0 +1,148 @@ +mod block_section; +pub mod db; +mod ports; +mod services { + pub mod compress; + pub mod decompress; +} + +use block_section::ChangesPerTable; +use serde::{ + Deserialize, + Serialize, +}; + +use fuel_core_types::{ + blockchain::{ + header::{ + ConsensusParametersVersion, + StateTransitionBytecodeVersion, + }, + primitives::DaBlockHeight, + }, + fuel_tx::CompactTransaction, + fuel_types::{ + BlockHeight, + Bytes32, + }, + tai64::Tai64, +}; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +struct Header { + pub da_height: DaBlockHeight, + pub prev_root: Bytes32, + pub height: BlockHeight, + pub time: Tai64, + pub consensus_parameters_version: ConsensusParametersVersion, + pub state_transition_bytecode_version: StateTransitionBytecodeVersion, +} + +/// Compressed block, without the preceeding version byte. +#[derive(Clone, Serialize, Deserialize)] +struct CompressedBlockPayload { + /// Registration section of the compressed block + registrations: ChangesPerTable, + /// Compressed block header + header: Header, + /// Compressed transactions + transactions: Vec, +} + +#[cfg(test)] +mod tests { + use std::array; + + use db::RocksDb; + use fuel_core_types::{ + blockchain::{ + block::Block, + header::{ + ApplicationHeader, + ConsensusHeader, + PartialBlockHeader, + }, + primitives::{ + DaBlockHeight, + Empty, + }, + }, + fuel_tx::Transaction, + tai64::Tai64, + }; + use tempfile::TempDir; + + use super::*; + + #[test] + fn postcard_roundtrip() { + let original = CompressedBlockPayload { + registrations: ChangesPerTable::from_start_keys(Default::default()), + header: Header { + da_height: DaBlockHeight::default(), + prev_root: Default::default(), + height: 3u32.into(), + consensus_parameters_version: 1, + state_transition_bytecode_version: 2, + time: Tai64::UNIX_EPOCH, + }, + transactions: vec![], + }; + + let compressed = postcard::to_allocvec(&original).unwrap(); + let decompressed: CompressedBlockPayload = + postcard::from_bytes(&compressed).unwrap(); + + let CompressedBlockPayload { + registrations, + header, + transactions, + } = decompressed; + + assert!(registrations.is_empty()); + assert_eq!(header.height, 3u32.into()); + assert!(transactions.is_empty()); + } + + #[test] + fn same_compact_tx_is_smaller_in_next_block() { + let tx = Transaction::default_test_tx(); + + let tmpdir = TempDir::new().unwrap(); + let mut db = RocksDb::open(tmpdir.path()).unwrap(); + + let sizes: [usize; 3] = array::from_fn(|h| { + services::compress::compress( + &mut db, + Block::new( + PartialBlockHeader { + application: ApplicationHeader { + da_height: DaBlockHeight::default(), + consensus_parameters_version: 4, + state_transition_bytecode_version: 5, + generated: Empty, + }, + consensus: ConsensusHeader { + prev_root: Bytes32::default(), + height: (h as u32).into(), + time: Tai64::UNIX_EPOCH, + generated: Empty, + }, + }, + vec![tx.clone()], + &[], + Bytes32::default(), + ) + .expect("Invalid block header"), + ) + .unwrap() + .len() + }); + + assert!(sizes[0] > sizes[1], "Size must decrease after first block"); + assert!( + sizes[1] == sizes[2], + "Size must be constant after first block" + ); + } +} diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs new file mode 100644 index 00000000000..38f270fea2b --- /dev/null +++ b/crates/compression/src/ports.rs @@ -0,0 +1,15 @@ +//! Ports this services requires to function. + +use fuel_core_types::blockchain::block::Block; + +#[async_trait::async_trait] +pub trait CompressPort { + /// Compress the next block. + async fn compress_next(&mut self, block: Block) -> anyhow::Result>; +} + +#[async_trait::async_trait] +pub trait DecompressPort { + /// Decompress the next block. + async fn decompress_next(&mut self, block: Vec) -> anyhow::Result; +} diff --git a/crates/compression/src/services/compress.rs b/crates/compression/src/services/compress.rs new file mode 100644 index 00000000000..ccaee881639 --- /dev/null +++ b/crates/compression/src/services/compress.rs @@ -0,0 +1,198 @@ +use fuel_core_types::fuel_compression::{ + access::*, + tables, + Key, + KeyPerTable, + RawKey, + Table, +}; +use tokio::sync::mpsc; + +use fuel_core_types::blockchain::block::Block; + +use crate::{ + block_section::{ + ChangesPerTable, + WriteTo, + }, + db::RocksDb, + CompressedBlockPayload, + Header, +}; + +use fuel_core_types::{ + blockchain::primitives::DaBlockHeight, + fuel_compression::{ + Compactable, + CompactionContext, + }, + fuel_tx::Transaction, + tai64::Tai64, +}; + +/// Task handle +pub struct Task { + request_receiver: mpsc::Receiver, +} + +pub enum TaskRequest { + Compress { + block: Block, + response: mpsc::Sender, CompressError>>, + }, +} + +#[derive(Debug)] +pub enum CompressError { + /// Only the next sequential block can be compressed + NotLatest, + Other(anyhow::Error), +} +impl From for CompressError { + fn from(err: anyhow::Error) -> Self { + Self::Other(err) + } +} + +async fn run(mut db: RocksDb, mut request_receiver: mpsc::Receiver) { + while let Some(req) = request_receiver.recv().await { + match req { + TaskRequest::Compress { block, response } => { + let reply = compress(&mut db, block); + response.send(reply).await.expect("Failed to respond"); + } + } + } +} + +pub fn compress(db: &mut RocksDb, block: Block) -> Result, CompressError> { + if *block.header().height() != db.next_block_height()? { + return Err(CompressError::NotLatest); + } + + let target = block.transactions().to_vec(); + + let start_keys = db.start_keys()?; + let key_limits = target.count(); + let safe_keys_start = start_keys.offset_by(key_limits); + let mut ctx = Ctx { + db, + start_keys, + next_keys: start_keys, + safe_keys_start, + changes: ChangesPerTable::from_start_keys(start_keys), + }; + let transactions = target.compact(&mut ctx)?; + let registrations = ctx.changes; + + // Apply changes to the db + // TODO: these should be done in a single atomic transaction + registrations.write_to_db(db)?; + db.increment_block_height()?; + + // Construct the actual compacted block + let compact = CompressedBlockPayload { + registrations, + header: Header { + da_height: block.header().da_height, + prev_root: *block.header().prev_root(), + consensus_parameters_version: block.header().consensus_parameters_version, + state_transition_bytecode_version: block + .header() + .state_transition_bytecode_version, + height: *block.header().height(), + time: block.header().time(), + }, + transactions, + }; + + let version = 0u8; + + let compressed = + postcard::to_allocvec(&(version, compact)).expect("Serialization cannot fail"); + + Ok(compressed) +} + +pub struct Ctx<'a> { + db: &'a mut RocksDb, + /// These are the keys where writing started + start_keys: KeyPerTable, + /// The next keys to use for each table + next_keys: KeyPerTable, + /// Keys in range next_keys..safe_keys_start + /// could be overwritten by the compaction, + /// and cannot be used for new values. + safe_keys_start: KeyPerTable, + changes: ChangesPerTable, +} + +impl<'a> Ctx<'a> { + /// Convert a value to a key + /// If necessary, store the value in the changeset and allocate a new key. + fn value_to_key(&mut self, value: T::Type) -> anyhow::Result> + where + ChangesPerTable: AccessRef> + AccessMut>, + KeyPerTable: AccessCopy> + AccessMut>, + { + // Check if the value is within the current changeset + if let Some(key) = + >::get(&self.changes).lookup_value(&value) + { + return Ok(key); + } + + // Check if the registry contains this value already. + if let Some(key) = self.db.index_lookup(&value)? { + // Check if the value is in the possibly-overwritable range + let start: Key = self.start_keys.value(); + let end: Key = self.safe_keys_start.value(); + if !key.is_between(start, end) { + return Ok(key); + } + } + + // Allocate a new key for this + let key = >>::get_mut(&mut self.next_keys) + .take_next(); + + >::get_mut(&mut self.changes) + .values + .push(value); + Ok(key) + } +} + +impl<'a> CompactionContext for Ctx<'a> { + fn to_key_AssetId( + &mut self, + value: [u8; 32], + ) -> anyhow::Result> { + self.value_to_key(value) + } + + fn to_key_Address( + &mut self, + value: [u8; 32], + ) -> anyhow::Result> { + self.value_to_key(value) + } + + fn to_key_ContractId( + &mut self, + value: [u8; 32], + ) -> anyhow::Result> { + self.value_to_key(value) + } + + fn to_key_ScriptCode( + &mut self, + value: Vec, + ) -> anyhow::Result> { + self.value_to_key(value) + } + + fn to_key_Witness(&mut self, value: Vec) -> anyhow::Result> { + self.value_to_key(value) + } +} diff --git a/crates/compression/src/services/decompress.rs b/crates/compression/src/services/decompress.rs new file mode 100644 index 00000000000..602b8117821 --- /dev/null +++ b/crates/compression/src/services/decompress.rs @@ -0,0 +1,161 @@ +use fuel_core_types::fuel_compression::{ + tables, + DecompactionContext, + Key, + Table, +}; +use tokio::sync::mpsc; + +use fuel_core_types::{ + blockchain::{ + block::{ + Block, + BlockV1, + PartialFuelBlock, + }, + header::{ + ApplicationHeader, + ConsensusHeader, + PartialBlockHeader, + }, + primitives::Empty, + }, + fuel_compression::Compactable, + fuel_tx::Transaction, +}; + +use crate::{ + db::RocksDb, + CompressedBlockPayload, +}; + +/// Task handle +pub struct Task { + request_receiver: mpsc::Receiver, +} + +pub enum TaskRequest { + Decompress { + block: Vec, + response: mpsc::Sender>, + }, +} + +#[derive(Debug)] +pub enum DecompressError { + /// Only the next sequential block can be decompressed + NotLatest, + /// Unknown compression version + UnknownVersion, + /// Deserialization error + Postcard(postcard::Error), + /// Other errors + Other(anyhow::Error), +} +impl From for DecompressError { + fn from(err: postcard::Error) -> Self { + Self::Postcard(err) + } +} +impl From for DecompressError { + fn from(err: anyhow::Error) -> Self { + Self::Other(err) + } +} + +async fn run(mut db: RocksDb, mut request_receiver: mpsc::Receiver) { + while let Some(req) = request_receiver.recv().await { + match req { + TaskRequest::Decompress { block, response } => { + let reply = decompress(&mut db, block); + response.send(reply).await.expect("Failed to respond"); + } + } + } +} + +fn decompress( + db: &mut RocksDb, + block: Vec, +) -> Result { + if block.is_empty() || block[0] != 0 { + return Err(DecompressError::UnknownVersion); + } + + let compressed: CompressedBlockPayload = postcard::from_bytes(&block[1..])?; + + // TODO: should be store height on da just to have this check? + // if *block.header.height != db.next_block_height()? { + // return Err(DecompressError::NotLatest); + // } + + let ctx = Ctx { db }; + + let mut transactions = Vec::new(); + for tx in compressed.transactions.into_iter() { + transactions.push(Transaction::decompact(tx, &ctx)?); + } + + Ok(PartialFuelBlock { + header: PartialBlockHeader { + application: ApplicationHeader { + da_height: compressed.header.da_height, + consensus_parameters_version: compressed + .header + .consensus_parameters_version, + state_transition_bytecode_version: compressed + .header + .state_transition_bytecode_version, + generated: Empty, + }, + consensus: ConsensusHeader { + prev_root: compressed.header.prev_root, + height: compressed.header.height, + time: compressed.header.time, + generated: Empty, + }, + }, + transactions, + }) +} + +pub struct Ctx<'a> { + db: &'a RocksDb, +} + +impl DecompactionContext for Ctx<'_> { + fn read_AssetId( + &self, + key: Key, + ) -> anyhow::Result<::Type> { + todo!() + } + + fn read_Address( + &self, + key: Key, + ) -> anyhow::Result<::Type> { + todo!() + } + + fn read_ContractId( + &self, + key: Key, + ) -> anyhow::Result<::Type> { + todo!() + } + + fn read_ScriptCode( + &self, + key: Key, + ) -> anyhow::Result<::Type> { + todo!() + } + + fn read_Witness( + &self, + key: Key, + ) -> anyhow::Result<::Type> { + todo!() + } +} diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index 3b49badf909..17ae2a3e343 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -34,6 +34,7 @@ zeroize = "1.5" [features] default = ["std"] serde = ["dep:serde", "fuel-vm-private/serde"] +da-compression = ["fuel-vm-private/da-compression"] std = ["fuel-vm-private/std"] random = ["dep:rand", "fuel-vm-private/random"] test-helpers = ["random", "fuel-vm-private/test-helpers"] diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 7ee96478e45..950a7c6260d 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -8,6 +8,9 @@ #![deny(missing_docs)] #![deny(warnings)] +#[doc(no_inline)] +#[cfg(feature = "da-compression")] +pub use fuel_vm_private::fuel_compression; #[doc(no_inline)] pub use fuel_vm_private::{ fuel_asm, From ff8e753f80c6330703d84a86673f201e0d90a44d Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 23 Jul 2024 09:30:21 +0300 Subject: [PATCH 022/112] Add decompression and a roundtrip test --- crates/compression/src/db.rs | 22 +++++-- crates/compression/src/lib.rs | 60 ++++++++++++++++++- crates/compression/src/services/compress.rs | 12 +--- crates/compression/src/services/decompress.rs | 20 +++---- crates/types/src/blockchain/block.rs | 5 +- crates/types/src/blockchain/header.rs | 2 +- crates/types/src/blockchain/primitives.rs | 2 +- 7 files changed, 92 insertions(+), 31 deletions(-) diff --git a/crates/compression/src/db.rs b/crates/compression/src/db.rs index 3b913b0f8e7..d970ed61af4 100644 --- a/crates/compression/src/db.rs +++ b/crates/compression/src/db.rs @@ -68,12 +68,24 @@ impl RocksDb { })) } - fn read(&self, key: Key) -> anyhow::Result { - assert_ne!(key, Key::DEFAULT_VALUE); + pub fn read(&self, key: Key) -> anyhow::Result { + if key == Key::DEFAULT_VALUE { + return Ok(T::Type::default()); + } + + let bare_key = postcard::to_stdvec(&key).expect("Never fails"); + + let mut table_suffix: Vec = T::NAME.bytes().collect(); + table_suffix.push(0); + + // Write new value + let k: Vec = table_suffix + .iter() + .chain(bare_key.iter()) + .copied() + .collect(); - let mut k = [0u8; 3]; - postcard::to_slice(&key, &mut k).expect("Always fits"); - let cf = self.db.cf_handle(T::NAME).unwrap(); + let cf = self.db.cf_handle("temporal").unwrap(); let Some(bytes) = self.db.get_cf(&cf, &k)? else { return Ok(T::Type::default()); }; diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 6510992a1a5..caac1341e2b 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -20,7 +20,10 @@ use fuel_core_types::{ }, primitives::DaBlockHeight, }, - fuel_tx::CompactTransaction, + fuel_tx::{ + CompactTransaction, + MessageId, + }, fuel_types::{ BlockHeight, Bytes32, @@ -56,7 +59,10 @@ mod tests { use db::RocksDb; use fuel_core_types::{ blockchain::{ - block::Block, + block::{ + Block, + PartialFuelBlock, + }, header::{ ApplicationHeader, ConsensusHeader, @@ -145,4 +151,54 @@ mod tests { "Size must be constant after first block" ); } + + #[test] + fn compress_decompress_roundtrip() { + let tx = Transaction::default_test_tx(); + + let tmpdir = TempDir::new().unwrap(); + let mut db = RocksDb::open(tmpdir.path()).unwrap(); + + let original_blocks: [Block; 3] = array::from_fn(|h| { + Block::new( + PartialBlockHeader { + application: ApplicationHeader { + da_height: DaBlockHeight::default(), + consensus_parameters_version: 4, + state_transition_bytecode_version: 5, + generated: Empty, + }, + consensus: ConsensusHeader { + prev_root: Bytes32::default(), + height: (h as u32).into(), + time: Tai64::UNIX_EPOCH, + generated: Empty, + }, + }, + vec![tx.clone()], + &[], + Bytes32::default(), + ) + .expect("Invalid block header") + }); + + let compressed_bytes: [Vec; 3] = original_blocks + .clone() + .map(|block| services::compress::compress(&mut db, block).unwrap()); + + drop(tmpdir); + let tmpdir2 = TempDir::new().unwrap(); + let mut db = RocksDb::open(tmpdir2.path()).unwrap(); + + let decompressed_blocks: [PartialFuelBlock; 3] = array::from_fn(|h| { + services::decompress::decompress(&mut db, compressed_bytes[h].clone()) + .expect("Decompression failed") + }); + + for (original, decompressed) in + original_blocks.iter().zip(decompressed_blocks.iter()) + { + assert_eq!(PartialFuelBlock::from(original.clone()), *decompressed); + } + } } diff --git a/crates/compression/src/services/compress.rs b/crates/compression/src/services/compress.rs index ccaee881639..b12d761d96a 100644 --- a/crates/compression/src/services/compress.rs +++ b/crates/compression/src/services/compress.rs @@ -3,7 +3,6 @@ use fuel_core_types::fuel_compression::{ tables, Key, KeyPerTable, - RawKey, Table, }; use tokio::sync::mpsc; @@ -20,14 +19,9 @@ use crate::{ Header, }; -use fuel_core_types::{ - blockchain::primitives::DaBlockHeight, - fuel_compression::{ - Compactable, - CompactionContext, - }, - fuel_tx::Transaction, - tai64::Tai64, +use fuel_core_types::fuel_compression::{ + Compactable, + CompactionContext, }; /// Task handle diff --git a/crates/compression/src/services/decompress.rs b/crates/compression/src/services/decompress.rs index 602b8117821..2519d20cb63 100644 --- a/crates/compression/src/services/decompress.rs +++ b/crates/compression/src/services/decompress.rs @@ -8,11 +8,7 @@ use tokio::sync::mpsc; use fuel_core_types::{ blockchain::{ - block::{ - Block, - BlockV1, - PartialFuelBlock, - }, + block::PartialFuelBlock, header::{ ApplicationHeader, ConsensusHeader, @@ -74,7 +70,7 @@ async fn run(mut db: RocksDb, mut request_receiver: mpsc::Receiver) } } -fn decompress( +pub fn decompress( db: &mut RocksDb, block: Vec, ) -> Result { @@ -89,6 +85,8 @@ fn decompress( // return Err(DecompressError::NotLatest); // } + compressed.registrations.write_to_db(db)?; + let ctx = Ctx { db }; let mut transactions = Vec::new(); @@ -128,34 +126,34 @@ impl DecompactionContext for Ctx<'_> { &self, key: Key, ) -> anyhow::Result<::Type> { - todo!() + self.db.read(key) } fn read_Address( &self, key: Key, ) -> anyhow::Result<::Type> { - todo!() + self.db.read(key) } fn read_ContractId( &self, key: Key, ) -> anyhow::Result<::Type> { - todo!() + self.db.read(key) } fn read_ScriptCode( &self, key: Key, ) -> anyhow::Result<::Type> { - todo!() + self.db.read(key) } fn read_Witness( &self, key: Key, ) -> anyhow::Result<::Type> { - todo!() + self.db.read(key) } } diff --git a/crates/types/src/blockchain/block.rs b/crates/types/src/blockchain/block.rs index b104eaa85f1..0e6cc621d1f 100644 --- a/crates/types/src/blockchain/block.rs +++ b/crates/types/src/blockchain/block.rs @@ -56,7 +56,7 @@ pub struct BlockV1 { transactions: Vec, } -/// Compressed version of the fuel `Block`. +/// Fuel `Block` with transactions represented by their id only. pub type CompressedBlock = Block; /// Fuel block with all transaction data included @@ -65,7 +65,7 @@ pub type CompressedBlock = Block; /// transactions to produce a [`Block`] or /// it can be created with pre-executed transactions in /// order to validate they were constructed correctly. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct PartialFuelBlock { /// The partial header. pub header: PartialBlockHeader, @@ -118,6 +118,7 @@ impl Block { } /// Compresses the fuel block and replaces transactions with hashes. + /// Note that this is different from the DA compression process. pub fn compress(&self, chain_id: &ChainId) -> CompressedBlock { match self { Block::V1(inner) => { diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 9da0daf1ffc..bda6455376a 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -148,7 +148,7 @@ impl BlockHeader { } } -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "test-helpers"), derive(Default))] /// A partially complete fuel block header that does not diff --git a/crates/types/src/blockchain/primitives.rs b/crates/types/src/blockchain/primitives.rs index bc3c9e04a6f..3221cd012d6 100644 --- a/crates/types/src/blockchain/primitives.rs +++ b/crates/types/src/blockchain/primitives.rs @@ -25,7 +25,7 @@ use secrecy::{ }; use zeroize::Zeroize; -#[derive(Clone, Copy, Debug, Default)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] /// Empty generated fields. pub struct Empty; From bddc56a655071e075f33d5e3fddc46ccd9baf5c7 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 3 Sep 2024 03:11:36 +0300 Subject: [PATCH 023/112] Update to match latest VM PR changes --- Cargo.lock | 2 + crates/compression/src/.DS_Store | Bin 6148 -> 6148 bytes crates/compression/src/context/compress.rs | 110 ++++++++++++ crates/compression/src/context/decompress.rs | 73 ++++++++ crates/compression/src/context/prepare.rs | 101 +++++++++++ crates/compression/src/db.rs | 146 ++++----------- crates/compression/src/eviction_policy.rs | 30 ++++ crates/compression/src/lib.rs | 29 +-- crates/compression/src/services/compress.rs | 133 +++----------- crates/compression/src/services/decompress.rs | 62 +------ crates/compression/src/tables.rs | 166 ++++++++++++++++++ 11 files changed, 568 insertions(+), 284 deletions(-) create mode 100644 crates/compression/src/context/compress.rs create mode 100644 crates/compression/src/context/decompress.rs create mode 100644 crates/compression/src/context/prepare.rs create mode 100644 crates/compression/src/eviction_policy.rs create mode 100644 crates/compression/src/tables.rs diff --git a/Cargo.lock b/Cargo.lock index 5460f0a15bd..6954bf6f414 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3154,6 +3154,7 @@ dependencies = [ name = "fuel-compression" version = "0.56.0" dependencies = [ + "anyhow", "fuel-derive", "serde", "serde-big-array", @@ -3860,6 +3861,7 @@ version = "0.56.0" name = "fuel-tx" version = "0.56.0" dependencies = [ + "anyhow", "bitflags 2.6.0", "derivative", "derive_more", diff --git a/crates/compression/src/.DS_Store b/crates/compression/src/.DS_Store index d64f70fb0dec3708643118c69a3b614542d1c44f..598f12c8b7116d6d6dbdc62982b93bf94a4a78f6 100644 GIT binary patch delta 45 zcmZoMXfc@JFU-Qgz`)4BAi%&-?3t6FoRpKFG?|fUB_qpbAI9Bm6B`^hvvd6A2LSd- B3@rcv delta 92 zcmZoMXfc@JFUrcmz`)4BAi%(o%@EH}%#g~E%uq7 { + pub db: &'a mut RocksDb, + pub cache_evictor: CacheEvictor, + /// Changes to the temporary registry, to be included in the compressed block header + pub changes: PerRegistryKeyspace>, +} + +fn registry_substitute( + keyspace: RegistryKeyspace, + value: &T, + ctx: &mut CompressCtx<'_>, +) -> anyhow::Result { + if *value == T::default() { + return Ok(RawKey::DEFAULT_VALUE); + } + + if let Some(found) = ctx.db.registry_index_lookup(keyspace, value)? { + return Ok(found); + } + + let key = ctx.cache_evictor.next_key(keyspace)?; + let old = ctx.changes[keyspace].insert(key, PostcardSerialized::new(value)?); + assert!(old.is_none(), "Key collision in registry substitution"); + Ok(key) +} + +impl<'a> RegistrySubstitutableBy> for Address { + fn substitute( + &self, + ctx: &mut CompressCtx<'_>, + keyspace: &str, + ) -> anyhow::Result { + registry_substitute( + check_keyspace!(keyspace, RegistryKeyspace::address), + self, + ctx, + ) + } +} + +impl<'a> RegistrySubstitutableBy> for AssetId { + fn substitute( + &self, + ctx: &mut CompressCtx<'_>, + keyspace: &str, + ) -> anyhow::Result { + registry_substitute( + check_keyspace!(keyspace, RegistryKeyspace::asset_id), + self, + ctx, + ) + } +} + +impl<'a> RegistrySubstitutableBy> for ContractId { + fn substitute( + &self, + ctx: &mut CompressCtx<'_>, + keyspace: &str, + ) -> anyhow::Result { + registry_substitute( + check_keyspace!(keyspace, RegistryKeyspace::contract_id), + self, + ctx, + ) + } +} + +impl<'a> RegistrySubstitutableBy> for Vec { + fn substitute( + &self, + ctx: &mut CompressCtx<'_>, + keyspace: &str, + ) -> anyhow::Result { + registry_substitute( + check_keyspace!( + keyspace, + RegistryKeyspace::script_code | RegistryKeyspace::witness + ), + self, + ctx, + ) + } +} diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs new file mode 100644 index 00000000000..772644a8e6c --- /dev/null +++ b/crates/compression/src/context/decompress.rs @@ -0,0 +1,73 @@ +use anyhow::bail; +use fuel_core_types::{ + fuel_compression::{ + RawKey, + RegistryDesubstitutableBy, + }, + fuel_tx::{ + Address, + AssetId, + ContractId, + }, +}; + +use crate::{ + db::RocksDb, + tables::{ + check_keyspace, + RegistryKeyspace, + }, +}; + +pub struct DecompressCtx<'a> { + pub db: &'a RocksDb, +} + +impl<'a> RegistryDesubstitutableBy> for Address { + fn desubstitute( + c: &RawKey, + ctx: &DecompressCtx, + keyspace: &str, + ) -> anyhow::Result { + ctx.db + .read_registry(check_keyspace!(keyspace, RegistryKeyspace::address), *c) + } +} + +impl<'a> RegistryDesubstitutableBy> for AssetId { + fn desubstitute( + c: &RawKey, + ctx: &DecompressCtx, + keyspace: &str, + ) -> anyhow::Result { + ctx.db + .read_registry(check_keyspace!(keyspace, RegistryKeyspace::asset_id), *c) + } +} + +impl<'a> RegistryDesubstitutableBy> for ContractId { + fn desubstitute( + c: &RawKey, + ctx: &DecompressCtx, + keyspace: &str, + ) -> anyhow::Result { + ctx.db + .read_registry(check_keyspace!(keyspace, RegistryKeyspace::contract_id), *c) + } +} + +impl<'a> RegistryDesubstitutableBy> for Vec { + fn desubstitute( + c: &RawKey, + ctx: &DecompressCtx, + keyspace: &str, + ) -> anyhow::Result> { + ctx.db.read_registry( + check_keyspace!( + keyspace, + RegistryKeyspace::script_code | RegistryKeyspace::witness + ), + *c, + ) + } +} diff --git a/crates/compression/src/context/prepare.rs b/crates/compression/src/context/prepare.rs new file mode 100644 index 00000000000..25c3c9ed2b6 --- /dev/null +++ b/crates/compression/src/context/prepare.rs @@ -0,0 +1,101 @@ +use std::collections::HashSet; + +use anyhow::bail; +use fuel_core_types::{ + fuel_compression::{ + RawKey, + RegistrySubstitutableBy, + }, + fuel_tx::*, +}; + +use crate::{ + db::RocksDb, + tables::{ + check_keyspace, + PerRegistryKeyspace, + RegistryKeyspace, + }, +}; + +/// Preparation pass through the block to collect all keys accessed during compression. +/// Returns placeholder. The resulting "compressed block" should be discarded. +pub struct PrepareCtx<'a> { + /// Database handle + pub db: &'a mut RocksDb, + /// Keys accessed during compression. Will not be overwritten. + pub accessed_keys: PerRegistryKeyspace>, +} + +fn registry_prepare( + keyspace: RegistryKeyspace, + value: &T, + ctx: &mut PrepareCtx<'_>, +) -> anyhow::Result { + if *value == T::default() { + return Ok(RawKey::ZERO); + } + if let Some(found) = ctx.db.registry_index_lookup(keyspace, value)? { + ctx.accessed_keys[keyspace].insert(found); + } + Ok(RawKey::ZERO) +} + +impl<'a> RegistrySubstitutableBy> for Address { + fn substitute( + &self, + ctx: &mut PrepareCtx<'a>, + keyspace: &str, + ) -> anyhow::Result { + registry_prepare( + check_keyspace!(keyspace, RegistryKeyspace::address), + self, + ctx, + ) + } +} + +impl<'a> RegistrySubstitutableBy> for AssetId { + fn substitute( + &self, + ctx: &mut PrepareCtx<'_>, + keyspace: &str, + ) -> anyhow::Result { + registry_prepare( + check_keyspace!(keyspace, RegistryKeyspace::asset_id), + self, + ctx, + ) + } +} + +impl<'a> RegistrySubstitutableBy> for ContractId { + fn substitute( + &self, + ctx: &mut PrepareCtx<'_>, + keyspace: &str, + ) -> anyhow::Result { + registry_prepare( + check_keyspace!(keyspace, RegistryKeyspace::contract_id), + self, + ctx, + ) + } +} + +impl<'a> RegistrySubstitutableBy> for Vec { + fn substitute( + &self, + ctx: &mut PrepareCtx<'_>, + keyspace: &str, + ) -> anyhow::Result { + registry_prepare( + check_keyspace!( + keyspace, + RegistryKeyspace::script_code | RegistryKeyspace::witness + ), + self, + ctx, + ) + } +} diff --git a/crates/compression/src/db.rs b/crates/compression/src/db.rs index d970ed61af4..36126a26319 100644 --- a/crates/compression/src/db.rs +++ b/crates/compression/src/db.rs @@ -1,19 +1,15 @@ use std::path::Path; +use anyhow::bail; use fuel_core_types::{ - fuel_compression::{ - Key, - KeyPerTable, - RawKey, - Table, - TableName, - }, + fuel_compression::RawKey, fuel_types::BlockHeight, }; -use rocksdb::WriteBatchWithTransaction; + +use crate::tables::RegistryKeyspace; pub struct RocksDb { - db: rocksdb::DB, + pub(crate) db: rocksdb::DB, } impl RocksDb { @@ -32,16 +28,14 @@ impl RocksDb { &db_opts, path, vec![ - // Meta table holding misc + // Meta table holding misc data ColumnFamilyDescriptor::new("meta", Options::default()), - // Next temporal registry key for each table - ColumnFamilyDescriptor::new("next_keys", Options::default()), // Temporal registry key:value pairs, with key as // null-separated (table, key) pair ColumnFamilyDescriptor::new("temporal", Options::default()), // Reverse index into temporal registry values, with key as // null-separated (table, indexed_value) pair - ColumnFamilyDescriptor::new("index", Options::default()), + ColumnFamilyDescriptor::new("temporal_index", Options::default()), ], )?, }) @@ -49,115 +43,49 @@ impl RocksDb { } impl RocksDb { - fn next_key_raw(&self, table: TableName) -> anyhow::Result { - let cf_next_keys = self.db.cf_handle("next_keys").unwrap(); - let Some(bytes) = self.db.get_cf(&cf_next_keys, table)? else { - return Ok(RawKey::ZERO); - }; - Ok(postcard::from_bytes(&bytes).expect("Invalid key")) - } - - fn next_key(&self) -> anyhow::Result> { - Ok(Key::from_raw(self.next_key_raw(T::NAME)?)) - } - - pub fn start_keys(&self) -> anyhow::Result { - // TODO: allow KeyPerTable::from_fn to pass through errors - Ok(KeyPerTable::from_fn(|table_name| { - self.next_key_raw(table_name).expect("Db error") - })) - } - - pub fn read(&self, key: Key) -> anyhow::Result { - if key == Key::DEFAULT_VALUE { - return Ok(T::Type::default()); + pub fn read_registry( + &self, + keyspace: RegistryKeyspace, + key: RawKey, + ) -> anyhow::Result + where + T: serde::de::DeserializeOwned + Default, + { + if key == RawKey::DEFAULT_VALUE { + return Ok(T::default()); } - let bare_key = postcard::to_stdvec(&key).expect("Never fails"); + let db_key: Vec = + keyspace.name().bytes().chain(core::iter::once(0)).collect(); + let db_key = postcard::to_extend(&key, db_key).expect("Never fails"); - let mut table_suffix: Vec = T::NAME.bytes().collect(); - table_suffix.push(0); - - // Write new value - let k: Vec = table_suffix - .iter() - .chain(bare_key.iter()) - .copied() - .collect(); + println!("read_registry {:?}", &db_key); let cf = self.db.cf_handle("temporal").unwrap(); - let Some(bytes) = self.db.get_cf(&cf, &k)? else { - return Ok(T::Type::default()); + let Some(bytes) = self.db.get_cf(&cf, &db_key)? else { + bail!("Key {keyspace:?}:{key:?} not found"); }; - Ok(postcard::from_bytes(&bytes).expect("Invalid value")) - } - - pub fn batch_write( - &mut self, - start_key: Key, - values: Vec, - ) -> anyhow::Result<()> { - let mut key = start_key; - - let mut batch = WriteBatchWithTransaction::::default(); - - let cf_registry = self.db.cf_handle("temporal").unwrap(); - let cf_index = self.db.cf_handle("index").unwrap(); - - let empty = values.is_empty(); - for value in values.into_iter() { - let bare_key = postcard::to_stdvec(&key).expect("Never fails"); - let v = postcard::to_stdvec(&value).expect("Never fails"); - - let mut table_suffix: Vec = T::NAME.bytes().collect(); - table_suffix.push(0); - - // Write new value - let k: Vec = table_suffix - .iter() - .chain(bare_key.iter()) - .copied() - .collect(); - batch.put_cf(&cf_registry, k.clone(), v.clone()); - - // Remove the overwritten value from index, if any - if let Some(old) = self.db.get_cf(&cf_registry, k)? { - let iv: Vec = table_suffix.clone().into_iter().chain(old).collect(); - batch.delete_cf(&cf_index, iv); - } - - // Add it to the index - let iv: Vec = table_suffix.into_iter().chain(v).collect(); - batch.put_cf(&cf_index, iv, bare_key); - - key = key.next(); - } - self.db.write(batch)?; - - if !empty { - let key = postcard::to_stdvec(&key).expect("Never fails"); - let cf_next_keys = self.db.cf_handle("next_keys").unwrap(); - self.db.put_cf(&cf_next_keys, T::NAME, key)?; - } - - Ok(()) + Ok(postcard::from_bytes(&bytes)?) } - pub fn index_lookup( + pub fn registry_index_lookup( &self, - value: &T::Type, - ) -> anyhow::Result>> { - let cf_index = self.db.cf_handle("index").unwrap(); - let val = postcard::to_stdvec(&value).expect("Never fails"); - let mut key: Vec = T::NAME.bytes().collect(); - key.push(0); - key.extend(val); - let Some(k) = self.db.get_cf(&cf_index, key)? else { + keyspace: RegistryKeyspace, + value: V, + ) -> anyhow::Result> { + let db_key: Vec = + keyspace.name().bytes().chain(core::iter::once(0)).collect(); + let db_key = postcard::to_extend(&value, db_key).expect("Never fails"); + + let cf_index = self.db.cf_handle("temporal_index").unwrap(); + let Some(k) = self.db.get_cf(&cf_index, db_key)? else { return Ok(None); }; - Ok(Some(postcard::from_bytes(&k).expect("Never fails"))) + Ok(Some(postcard::from_bytes(&k)?)) } +} +impl RocksDb { pub fn next_block_height(&self) -> anyhow::Result { let cf_meta = self.db.cf_handle("meta").unwrap(); let Some(bytes) = self.db.get_cf(&cf_meta, b"current_block")? else { diff --git a/crates/compression/src/eviction_policy.rs b/crates/compression/src/eviction_policy.rs new file mode 100644 index 00000000000..81179678dd9 --- /dev/null +++ b/crates/compression/src/eviction_policy.rs @@ -0,0 +1,30 @@ +use std::collections::HashSet; + +use fuel_core_types::fuel_compression::RawKey; + +use crate::tables::{ + PerRegistryKeyspace, + RegistryKeyspace, +}; + +pub struct CacheEvictor { + /// Set of keys that must not be evicted + pub keep_keys: PerRegistryKeyspace>, +} + +impl CacheEvictor { + /// Get a key, evicting an old value if necessary + pub fn next_key(&mut self, keyspace: RegistryKeyspace) -> anyhow::Result { + // Pick first key not in the set + // TODO: this can be optimized by keeping a counter of the last key used + // TODO: use a proper algo, maybe LRU? + let mut key = RawKey::ZERO; + while self.keep_keys[keyspace].contains(&key) { + key = key.next(); + assert_ne!(key, RawKey::ZERO, "Ran out of keys"); + } + + self.keep_keys[keyspace].insert(key); + Ok(key) + } +} diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index e6ef1c74b54..198f2fcb39c 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -1,9 +1,16 @@ pub mod db; +mod eviction_policy; mod ports; -mod services { +mod tables; +pub mod services { pub mod compress; pub mod decompress; } +mod context { + pub mod compress; + pub mod decompress; + pub mod prepare; +} use serde::{ Deserialize, @@ -17,13 +24,15 @@ use fuel_core_types::{ StateTransitionBytecodeVersion, }, primitives::DaBlockHeight, - }, fuel_compression::RawKey, fuel_tx::{ - Address, CompactTransaction, MessageId - }, fuel_types::{ + }, + fuel_tx::CompressedTransaction, + fuel_types::{ BlockHeight, Bytes32, - }, tai64::Tai64 + }, + tai64::Tai64, }; +use tables::RegistrationsPerTable; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] struct Header { @@ -43,12 +52,7 @@ struct CompressedBlockPayload { /// Compressed block header header: Header, /// Compressed transactions - transactions: Vec, -} - -#[derive(Clone, Serialize, Deserialize)] -pub struct RegistrationsPerTable { - address: Vec<(RawKey, Address)>, + transactions: Vec, } #[cfg(test)] @@ -82,7 +86,7 @@ mod tests { #[test] fn postcard_roundtrip() { let original = CompressedBlockPayload { - registrations: ChangesPerTable::from_start_keys(Default::default()), + registrations: RegistrationsPerTable::default(), header: Header { da_height: DaBlockHeight::default(), prev_root: Default::default(), @@ -185,6 +189,7 @@ mod tests { .clone() .map(|block| services::compress::compress(&mut db, block).unwrap()); + db.db.flush().unwrap(); drop(tmpdir); let tmpdir2 = TempDir::new().unwrap(); let mut db = RocksDb::open(tmpdir2.path()).unwrap(); diff --git a/crates/compression/src/services/compress.rs b/crates/compression/src/services/compress.rs index b12d761d96a..34e36a8fbef 100644 --- a/crates/compression/src/services/compress.rs +++ b/crates/compression/src/services/compress.rs @@ -1,29 +1,23 @@ -use fuel_core_types::fuel_compression::{ - access::*, - tables, - Key, - KeyPerTable, - Table, -}; +use fuel_core_types::fuel_compression::CompressibleBy; use tokio::sync::mpsc; use fuel_core_types::blockchain::block::Block; use crate::{ - block_section::{ - ChangesPerTable, - WriteTo, + context::{ + compress::CompressCtx, + prepare::PrepareCtx, }, db::RocksDb, + eviction_policy::CacheEvictor, + tables::{ + PerRegistryKeyspace, + RegistrationsPerTable, + }, CompressedBlockPayload, Header, }; -use fuel_core_types::fuel_compression::{ - Compactable, - CompactionContext, -}; - /// Task handle pub struct Task { request_receiver: mpsc::Receiver, @@ -48,7 +42,7 @@ impl From for CompressError { } } -async fn run(mut db: RocksDb, mut request_receiver: mpsc::Receiver) { +pub async fn run(mut db: RocksDb, mut request_receiver: mpsc::Receiver) { while let Some(req) = request_receiver.recv().await { match req { TaskRequest::Compress { block, response } => { @@ -66,21 +60,25 @@ pub fn compress(db: &mut RocksDb, block: Block) -> Result, CompressError let target = block.transactions().to_vec(); - let start_keys = db.start_keys()?; - let key_limits = target.count(); - let safe_keys_start = start_keys.offset_by(key_limits); - let mut ctx = Ctx { + let mut prepare_ctx = PrepareCtx { db, - start_keys, - next_keys: start_keys, - safe_keys_start, - changes: ChangesPerTable::from_start_keys(start_keys), + accessed_keys: PerRegistryKeyspace::default(), }; - let transactions = target.compact(&mut ctx)?; + let _ = target.compress(&mut prepare_ctx)?; + + let mut ctx = CompressCtx { + db: prepare_ctx.db, + cache_evictor: CacheEvictor { + keep_keys: prepare_ctx.accessed_keys, + }, + changes: Default::default(), + }; + let transactions = target.compress(&mut ctx)?; let registrations = ctx.changes; + let registrations = RegistrationsPerTable::try_from(registrations)?; // Apply changes to the db - // TODO: these should be done in a single atomic transaction + // TODO: these two operations should be atomic together registrations.write_to_db(db)?; db.increment_block_height()?; @@ -107,86 +105,3 @@ pub fn compress(db: &mut RocksDb, block: Block) -> Result, CompressError Ok(compressed) } - -pub struct Ctx<'a> { - db: &'a mut RocksDb, - /// These are the keys where writing started - start_keys: KeyPerTable, - /// The next keys to use for each table - next_keys: KeyPerTable, - /// Keys in range next_keys..safe_keys_start - /// could be overwritten by the compaction, - /// and cannot be used for new values. - safe_keys_start: KeyPerTable, - changes: ChangesPerTable, -} - -impl<'a> Ctx<'a> { - /// Convert a value to a key - /// If necessary, store the value in the changeset and allocate a new key. - fn value_to_key(&mut self, value: T::Type) -> anyhow::Result> - where - ChangesPerTable: AccessRef> + AccessMut>, - KeyPerTable: AccessCopy> + AccessMut>, - { - // Check if the value is within the current changeset - if let Some(key) = - >::get(&self.changes).lookup_value(&value) - { - return Ok(key); - } - - // Check if the registry contains this value already. - if let Some(key) = self.db.index_lookup(&value)? { - // Check if the value is in the possibly-overwritable range - let start: Key = self.start_keys.value(); - let end: Key = self.safe_keys_start.value(); - if !key.is_between(start, end) { - return Ok(key); - } - } - - // Allocate a new key for this - let key = >>::get_mut(&mut self.next_keys) - .take_next(); - - >::get_mut(&mut self.changes) - .values - .push(value); - Ok(key) - } -} - -impl<'a> CompactionContext for Ctx<'a> { - fn to_key_AssetId( - &mut self, - value: [u8; 32], - ) -> anyhow::Result> { - self.value_to_key(value) - } - - fn to_key_Address( - &mut self, - value: [u8; 32], - ) -> anyhow::Result> { - self.value_to_key(value) - } - - fn to_key_ContractId( - &mut self, - value: [u8; 32], - ) -> anyhow::Result> { - self.value_to_key(value) - } - - fn to_key_ScriptCode( - &mut self, - value: Vec, - ) -> anyhow::Result> { - self.value_to_key(value) - } - - fn to_key_Witness(&mut self, value: Vec) -> anyhow::Result> { - self.value_to_key(value) - } -} diff --git a/crates/compression/src/services/decompress.rs b/crates/compression/src/services/decompress.rs index 2519d20cb63..8726bc91134 100644 --- a/crates/compression/src/services/decompress.rs +++ b/crates/compression/src/services/decompress.rs @@ -1,9 +1,3 @@ -use fuel_core_types::fuel_compression::{ - tables, - DecompactionContext, - Key, - Table, -}; use tokio::sync::mpsc; use fuel_core_types::{ @@ -16,11 +10,12 @@ use fuel_core_types::{ }, primitives::Empty, }, - fuel_compression::Compactable, + fuel_compression::DecompressibleBy, fuel_tx::Transaction, }; use crate::{ + context::decompress::DecompressCtx, db::RocksDb, CompressedBlockPayload, }; @@ -59,7 +54,7 @@ impl From for DecompressError { } } -async fn run(mut db: RocksDb, mut request_receiver: mpsc::Receiver) { +pub async fn run(mut db: RocksDb, mut request_receiver: mpsc::Receiver) { while let Some(req) = request_receiver.recv().await { match req { TaskRequest::Decompress { block, response } => { @@ -87,12 +82,12 @@ pub fn decompress( compressed.registrations.write_to_db(db)?; - let ctx = Ctx { db }; + let ctx = DecompressCtx { db }; - let mut transactions = Vec::new(); - for tx in compressed.transactions.into_iter() { - transactions.push(Transaction::decompact(tx, &ctx)?); - } + let transactions = as DecompressibleBy<_>>::decompress( + &compressed.transactions, + &ctx, + )?; Ok(PartialFuelBlock { header: PartialBlockHeader { @@ -116,44 +111,3 @@ pub fn decompress( transactions, }) } - -pub struct Ctx<'a> { - db: &'a RocksDb, -} - -impl DecompactionContext for Ctx<'_> { - fn read_AssetId( - &self, - key: Key, - ) -> anyhow::Result<::Type> { - self.db.read(key) - } - - fn read_Address( - &self, - key: Key, - ) -> anyhow::Result<::Type> { - self.db.read(key) - } - - fn read_ContractId( - &self, - key: Key, - ) -> anyhow::Result<::Type> { - self.db.read(key) - } - - fn read_ScriptCode( - &self, - key: Key, - ) -> anyhow::Result<::Type> { - self.db.read(key) - } - - fn read_Witness( - &self, - key: Key, - ) -> anyhow::Result<::Type> { - self.db.read(key) - } -} diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs new file mode 100644 index 00000000000..143802b0954 --- /dev/null +++ b/crates/compression/src/tables.rs @@ -0,0 +1,166 @@ +use super::db::RocksDb; +use fuel_core_types::{ + fuel_compression::RawKey, + fuel_tx::{ + Address, + AssetId, + ContractId, + }, +}; +use std::collections::HashMap; + +use rocksdb::WriteBatchWithTransaction; + +/// Type-erased (serialized) data +#[derive(Debug, Clone)] +pub struct PostcardSerialized(Vec); +impl PostcardSerialized { + pub(crate) fn new(value: T) -> anyhow::Result { + Ok(Self(postcard::to_stdvec(&value)?)) + } +} + +macro_rules! check_keyspace { + ($keyspace:expr, $expected:pat) => { + match RegistryKeyspace::from_str($keyspace) { + Some(val @ $expected) => val, + Some(other) => { + bail!("Keyspace {other:?} not valid for {}", stringify!($expected)) + } + None => bail!("Unknown keyspace {:?}", $keyspace), + } + }; +} +pub(crate) use check_keyspace; + +macro_rules! tables { + ($($name:ident: $type:ty),*$(,)?) => { + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] + #[allow(non_camel_case_types)] // Match names in structs exactly + pub enum RegistryKeyspace { + $( + $name, + )* + } + impl RegistryKeyspace { + pub fn name(&self) -> &'static str { + match self { + $( + Self::$name => stringify!($name), + )* + } + } + pub fn from_str(name: &str) -> Option { + match name { + $( + stringify!($name) => Some(Self::$name), + )* + _ => None, + } + } + } + + + #[derive(Debug, Clone, Default)] + pub struct PerRegistryKeyspace { + $(pub $name: T,)* + } + impl core::ops::Index for PerRegistryKeyspace { + type Output = T; + + fn index(&self, index: RegistryKeyspace) -> &Self::Output { + match index { + $( + RegistryKeyspace::$name => &self.$name, + )* + } + } + } + impl core::ops::IndexMut for PerRegistryKeyspace { + fn index_mut(&mut self, index: RegistryKeyspace) -> &mut Self::Output { + match index { + $( + RegistryKeyspace::$name => &mut self.$name, + )* + } + } + } + + #[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)] + pub struct RegistrationsPerTable { + $(pub $name: Vec<(RawKey, $type)>,)* + } + + impl TryFrom>> for RegistrationsPerTable { + type Error = anyhow::Error; + + fn try_from(value: PerRegistryKeyspace>) -> Result { + let mut result = Self::default(); + $( + for (key, value) in value.$name.into_iter() { + result.$name.push((key, postcard::from_bytes(&value.0)?)); + } + )* + Ok(result) + } + } + + impl RegistrationsPerTable { + pub(crate) fn is_empty(&self) -> bool { + $( + if !self.$name.is_empty() { + return false; + } + )* + true + } + + pub(crate) fn write_all_tx(&self, db: &mut RocksDb) -> anyhow::Result<()> { + let mut batch = WriteBatchWithTransaction::::default(); + let cf_registry = db.db.cf_handle("temporal").unwrap(); + let cf_index = db.db.cf_handle("temporal_index").unwrap(); + + $( + let mut key_table_prefix: Vec = stringify!($name).bytes().collect(); + key_table_prefix.reserve(4); + key_table_prefix.push(0); + + for (key, value) in self.$name.iter() { + // Get key bytes + let raw_key = postcard::to_stdvec(&key).expect("Never fails"); + + // Write new value + let db_key: Vec = key_table_prefix.iter().copied().chain(raw_key.clone()).collect(); + let db_value = postcard::to_stdvec(&value).expect("Never fails"); + + println!("write_to_db {:?}", &db_key); + + batch.put_cf(&cf_registry, db_key.clone(), db_value.clone()); + + // Remove the overwritten value from index, if any + if let Some(old_value) = db.db.get_cf(&cf_registry, db_key.clone())? { + let index_value: Vec = key_table_prefix.iter().copied().chain(old_value).collect(); + batch.delete_cf(&cf_index, index_value); + } + + // Add the new value to the index + let index_key: Vec = key_table_prefix.iter().copied().chain(db_value).collect(); + batch.put_cf(&cf_index, index_key, raw_key); + } + + )* + + db.db.write(batch)?; + Ok(()) + } + } + }; +} + +tables!( + address: Address, + asset_id: AssetId, + contract_id: ContractId, + script_code: Vec, + witness: Vec, +); From f09f51af79e7ad4e8270c5e7bdd4f1a7f34f493e Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 3 Sep 2024 03:50:27 +0300 Subject: [PATCH 024/112] Update to latest fuel-vm and fix most clippy issues as well --- crates/compression/src/context/compress.rs | 16 ++++++++-------- crates/compression/src/context/decompress.rs | 16 ++++++++-------- crates/compression/src/context/prepare.rs | 16 ++++++++-------- crates/compression/src/lib.rs | 2 +- crates/compression/src/tables.rs | 3 ++- .../consensus_module/poa/src/service_test.rs | 1 + crates/services/executor/src/ports.rs | 1 + 7 files changed, 29 insertions(+), 26 deletions(-) diff --git a/crates/compression/src/context/compress.rs b/crates/compression/src/context/compress.rs index 729d4789eee..5f95f7b5194 100644 --- a/crates/compression/src/context/compress.rs +++ b/crates/compression/src/context/compress.rs @@ -50,11 +50,11 @@ fn registry_substitute( Ok(key) } -impl<'a> RegistrySubstitutableBy> for Address { +impl RegistrySubstitutableBy> for Address { fn substitute( &self, - ctx: &mut CompressCtx<'_>, keyspace: &str, + ctx: &mut CompressCtx<'_>, ) -> anyhow::Result { registry_substitute( check_keyspace!(keyspace, RegistryKeyspace::address), @@ -64,11 +64,11 @@ impl<'a> RegistrySubstitutableBy> for Address { } } -impl<'a> RegistrySubstitutableBy> for AssetId { +impl RegistrySubstitutableBy> for AssetId { fn substitute( &self, - ctx: &mut CompressCtx<'_>, keyspace: &str, + ctx: &mut CompressCtx<'_>, ) -> anyhow::Result { registry_substitute( check_keyspace!(keyspace, RegistryKeyspace::asset_id), @@ -78,11 +78,11 @@ impl<'a> RegistrySubstitutableBy> for AssetId { } } -impl<'a> RegistrySubstitutableBy> for ContractId { +impl RegistrySubstitutableBy> for ContractId { fn substitute( &self, - ctx: &mut CompressCtx<'_>, keyspace: &str, + ctx: &mut CompressCtx<'_>, ) -> anyhow::Result { registry_substitute( check_keyspace!(keyspace, RegistryKeyspace::contract_id), @@ -92,11 +92,11 @@ impl<'a> RegistrySubstitutableBy> for ContractId { } } -impl<'a> RegistrySubstitutableBy> for Vec { +impl RegistrySubstitutableBy> for Vec { fn substitute( &self, - ctx: &mut CompressCtx<'_>, keyspace: &str, + ctx: &mut CompressCtx<'_>, ) -> anyhow::Result { registry_substitute( check_keyspace!( diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs index 772644a8e6c..2835b315eb0 100644 --- a/crates/compression/src/context/decompress.rs +++ b/crates/compression/src/context/decompress.rs @@ -23,44 +23,44 @@ pub struct DecompressCtx<'a> { pub db: &'a RocksDb, } -impl<'a> RegistryDesubstitutableBy> for Address { +impl RegistryDesubstitutableBy> for Address { fn desubstitute( c: &RawKey, - ctx: &DecompressCtx, keyspace: &str, + ctx: &DecompressCtx, ) -> anyhow::Result { ctx.db .read_registry(check_keyspace!(keyspace, RegistryKeyspace::address), *c) } } -impl<'a> RegistryDesubstitutableBy> for AssetId { +impl RegistryDesubstitutableBy> for AssetId { fn desubstitute( c: &RawKey, - ctx: &DecompressCtx, keyspace: &str, + ctx: &DecompressCtx, ) -> anyhow::Result { ctx.db .read_registry(check_keyspace!(keyspace, RegistryKeyspace::asset_id), *c) } } -impl<'a> RegistryDesubstitutableBy> for ContractId { +impl RegistryDesubstitutableBy> for ContractId { fn desubstitute( c: &RawKey, - ctx: &DecompressCtx, keyspace: &str, + ctx: &DecompressCtx, ) -> anyhow::Result { ctx.db .read_registry(check_keyspace!(keyspace, RegistryKeyspace::contract_id), *c) } } -impl<'a> RegistryDesubstitutableBy> for Vec { +impl RegistryDesubstitutableBy> for Vec { fn desubstitute( c: &RawKey, - ctx: &DecompressCtx, keyspace: &str, + ctx: &DecompressCtx, ) -> anyhow::Result> { ctx.db.read_registry( check_keyspace!( diff --git a/crates/compression/src/context/prepare.rs b/crates/compression/src/context/prepare.rs index 25c3c9ed2b6..5d526e638c3 100644 --- a/crates/compression/src/context/prepare.rs +++ b/crates/compression/src/context/prepare.rs @@ -41,11 +41,11 @@ fn registry_prepare( Ok(RawKey::ZERO) } -impl<'a> RegistrySubstitutableBy> for Address { +impl RegistrySubstitutableBy> for Address { fn substitute( &self, - ctx: &mut PrepareCtx<'a>, keyspace: &str, + ctx: &mut PrepareCtx<'_>, ) -> anyhow::Result { registry_prepare( check_keyspace!(keyspace, RegistryKeyspace::address), @@ -55,11 +55,11 @@ impl<'a> RegistrySubstitutableBy> for Address { } } -impl<'a> RegistrySubstitutableBy> for AssetId { +impl RegistrySubstitutableBy> for AssetId { fn substitute( &self, - ctx: &mut PrepareCtx<'_>, keyspace: &str, + ctx: &mut PrepareCtx<'_>, ) -> anyhow::Result { registry_prepare( check_keyspace!(keyspace, RegistryKeyspace::asset_id), @@ -69,11 +69,11 @@ impl<'a> RegistrySubstitutableBy> for AssetId { } } -impl<'a> RegistrySubstitutableBy> for ContractId { +impl RegistrySubstitutableBy> for ContractId { fn substitute( &self, - ctx: &mut PrepareCtx<'_>, keyspace: &str, + ctx: &mut PrepareCtx<'_>, ) -> anyhow::Result { registry_prepare( check_keyspace!(keyspace, RegistryKeyspace::contract_id), @@ -83,11 +83,11 @@ impl<'a> RegistrySubstitutableBy> for ContractId { } } -impl<'a> RegistrySubstitutableBy> for Vec { +impl RegistrySubstitutableBy> for Vec { fn substitute( &self, - ctx: &mut PrepareCtx<'_>, keyspace: &str, + ctx: &mut PrepareCtx<'_>, ) -> anyhow::Result { registry_prepare( check_keyspace!( diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 198f2fcb39c..06b996d9d9e 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -1,6 +1,6 @@ pub mod db; mod eviction_policy; -mod ports; +pub mod ports; mod tables; pub mod services { pub mod compress; diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index 143802b0954..a9615ebe3ef 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -106,6 +106,7 @@ macro_rules! tables { } impl RegistrationsPerTable { + #[cfg(test)] pub(crate) fn is_empty(&self) -> bool { $( if !self.$name.is_empty() { @@ -115,7 +116,7 @@ macro_rules! tables { true } - pub(crate) fn write_all_tx(&self, db: &mut RocksDb) -> anyhow::Result<()> { + pub(crate) fn write_to_db(&self, db: &mut RocksDb) -> anyhow::Result<()> { let mut batch = WriteBatchWithTransaction::::default(); let cf_registry = db.db.cf_handle("temporal").unwrap(); let cf_index = db.db.cf_handle("temporal_index").unwrap(); diff --git a/crates/services/consensus_module/poa/src/service_test.rs b/crates/services/consensus_module/poa/src/service_test.rs index 9d1b87c008e..c63a4585c68 100644 --- a/crates/services/consensus_module/poa/src/service_test.rs +++ b/crates/services/consensus_module/poa/src/service_test.rs @@ -460,6 +460,7 @@ fn test_signing_key() -> Secret { } #[derive(Debug, PartialEq)] +#[allow(clippy::large_enum_variant)] enum FakeProducedBlock { Predefined(Block), New(BlockHeight, Tai64), diff --git a/crates/services/executor/src/ports.rs b/crates/services/executor/src/ports.rs index 22eb15c2403..e8f96366c28 100644 --- a/crates/services/executor/src/ports.rs +++ b/crates/services/executor/src/ports.rs @@ -17,6 +17,7 @@ use fuel_core_types::{ use alloc::vec::Vec; /// The wrapper around either `Transaction` or `CheckedTransaction`. +#[allow(clippy::large_enum_variant)] pub enum MaybeCheckedTransaction { CheckedTransaction(CheckedTransaction, ConsensusParametersVersion), Transaction(fuel_tx::Transaction), From 980dd14e305e37657ae9d7281deb377d25a6fe14 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 3 Sep 2024 04:57:24 +0300 Subject: [PATCH 025/112] Update to latest vm changes --- Cargo.lock | 4 --- crates/compression/README.md | 34 +++++++++++++++++++ crates/compression/src/context/compress.rs | 8 ++--- crates/compression/src/context/decompress.rs | 8 ++--- crates/compression/src/context/prepare.rs | 8 ++--- crates/compression/src/lib.rs | 2 +- crates/compression/src/services/decompress.rs | 2 +- 7 files changed, 48 insertions(+), 18 deletions(-) create mode 100644 crates/compression/README.md diff --git a/Cargo.lock b/Cargo.lock index 6954bf6f414..23fca3c074b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3154,7 +3154,6 @@ dependencies = [ name = "fuel-compression" version = "0.56.0" dependencies = [ - "anyhow", "fuel-derive", "serde", "serde-big-array", @@ -3826,7 +3825,6 @@ version = "0.56.0" dependencies = [ "proc-macro2", "quote", - "regex", "syn 2.0.77", "synstructure", ] @@ -3861,7 +3859,6 @@ version = "0.56.0" name = "fuel-tx" version = "0.56.0" dependencies = [ - "anyhow", "bitflags 2.6.0", "derivative", "derive_more", @@ -3884,7 +3881,6 @@ dependencies = [ name = "fuel-types" version = "0.56.0" dependencies = [ - "anyhow", "fuel-compression", "fuel-derive", "hex", diff --git a/crates/compression/README.md b/crates/compression/README.md new file mode 100644 index 00000000000..23767f0ea14 --- /dev/null +++ b/crates/compression/README.md @@ -0,0 +1,34 @@ +# Compression and decompression of fuel-types for the DA layer + +## Compressed block header + +Each compressed block begins with a single-byte version field, so that it's possible to change the format later. + +## Temporal registry + +This crate provides offchain registries for different types such as `AssetId`, `ContractId`, scripts, and predicates. Each registry is a key-value store with three-byte key. The registires are essentially compression caches. The three byte key allows cache size of 16 million values before reregistering the older values. + +The registries allow replacing repeated objects with their respective keys, so if an object +is used multiple times in a short interval (couple of months, maybe), then the full value +exists on only a single uncompressed block, + +### Fraud proofs + +Compressed block will start with 32 bytes of merkle root over all compression smts, followed by newly registered values along with their keys. Using an SMT provides flexibility around the algorithm we use to define keys without knowing how exactly values were chosen to be registered. + +Each registry also uses an SMT. Since the keys are three bytes long, the depth of the SMT is capped at 24 levels. + + + - More efficient for fraud proofs instead of needing to provide entire previous blocks with proofs + +## Compression of `UtxoIds` + +Since each `UtxoId` only appears once, there's no point in registering them. Instead, they are replaced with `TxPointer`s (7 bytes worst case), which are still unique. + +### Fraud proofs + +During fraud proofs we need to use the `prev_root` to prove that the referenced block height is part of the chain. + +## Other techniques + +- These techniques should be good enough for now, but there are lots of other interesting ideas for this. diff --git a/crates/compression/src/context/compress.rs b/crates/compression/src/context/compress.rs index 5f95f7b5194..15218ef6bc2 100644 --- a/crates/compression/src/context/compress.rs +++ b/crates/compression/src/context/compress.rs @@ -50,7 +50,7 @@ fn registry_substitute( Ok(key) } -impl RegistrySubstitutableBy> for Address { +impl RegistrySubstitutableBy, anyhow::Error> for Address { fn substitute( &self, keyspace: &str, @@ -64,7 +64,7 @@ impl RegistrySubstitutableBy> for Address { } } -impl RegistrySubstitutableBy> for AssetId { +impl RegistrySubstitutableBy, anyhow::Error> for AssetId { fn substitute( &self, keyspace: &str, @@ -78,7 +78,7 @@ impl RegistrySubstitutableBy> for AssetId { } } -impl RegistrySubstitutableBy> for ContractId { +impl RegistrySubstitutableBy, anyhow::Error> for ContractId { fn substitute( &self, keyspace: &str, @@ -92,7 +92,7 @@ impl RegistrySubstitutableBy> for ContractId { } } -impl RegistrySubstitutableBy> for Vec { +impl RegistrySubstitutableBy, anyhow::Error> for Vec { fn substitute( &self, keyspace: &str, diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs index 2835b315eb0..b5acbb7767d 100644 --- a/crates/compression/src/context/decompress.rs +++ b/crates/compression/src/context/decompress.rs @@ -23,7 +23,7 @@ pub struct DecompressCtx<'a> { pub db: &'a RocksDb, } -impl RegistryDesubstitutableBy> for Address { +impl RegistryDesubstitutableBy, anyhow::Error> for Address { fn desubstitute( c: &RawKey, keyspace: &str, @@ -34,7 +34,7 @@ impl RegistryDesubstitutableBy> for Address { } } -impl RegistryDesubstitutableBy> for AssetId { +impl RegistryDesubstitutableBy, anyhow::Error> for AssetId { fn desubstitute( c: &RawKey, keyspace: &str, @@ -45,7 +45,7 @@ impl RegistryDesubstitutableBy> for AssetId { } } -impl RegistryDesubstitutableBy> for ContractId { +impl RegistryDesubstitutableBy, anyhow::Error> for ContractId { fn desubstitute( c: &RawKey, keyspace: &str, @@ -56,7 +56,7 @@ impl RegistryDesubstitutableBy> for ContractId { } } -impl RegistryDesubstitutableBy> for Vec { +impl RegistryDesubstitutableBy, anyhow::Error> for Vec { fn desubstitute( c: &RawKey, keyspace: &str, diff --git a/crates/compression/src/context/prepare.rs b/crates/compression/src/context/prepare.rs index 5d526e638c3..950cb2dc607 100644 --- a/crates/compression/src/context/prepare.rs +++ b/crates/compression/src/context/prepare.rs @@ -41,7 +41,7 @@ fn registry_prepare( Ok(RawKey::ZERO) } -impl RegistrySubstitutableBy> for Address { +impl RegistrySubstitutableBy, anyhow::Error> for Address { fn substitute( &self, keyspace: &str, @@ -55,7 +55,7 @@ impl RegistrySubstitutableBy> for Address { } } -impl RegistrySubstitutableBy> for AssetId { +impl RegistrySubstitutableBy, anyhow::Error> for AssetId { fn substitute( &self, keyspace: &str, @@ -69,7 +69,7 @@ impl RegistrySubstitutableBy> for AssetId { } } -impl RegistrySubstitutableBy> for ContractId { +impl RegistrySubstitutableBy, anyhow::Error> for ContractId { fn substitute( &self, keyspace: &str, @@ -83,7 +83,7 @@ impl RegistrySubstitutableBy> for ContractId { } } -impl RegistrySubstitutableBy> for Vec { +impl RegistrySubstitutableBy, anyhow::Error> for Vec { fn substitute( &self, keyspace: &str, diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 06b996d9d9e..d475908adfb 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -44,7 +44,7 @@ struct Header { pub state_transition_bytecode_version: StateTransitionBytecodeVersion, } -/// Compressed block, without the preceeding version byte. +/// Compressed block, without the preceding version byte. #[derive(Clone, Serialize, Deserialize)] struct CompressedBlockPayload { /// Registration section of the compressed block diff --git a/crates/compression/src/services/decompress.rs b/crates/compression/src/services/decompress.rs index 8726bc91134..50c381fec39 100644 --- a/crates/compression/src/services/decompress.rs +++ b/crates/compression/src/services/decompress.rs @@ -84,7 +84,7 @@ pub fn decompress( let ctx = DecompressCtx { db }; - let transactions = as DecompressibleBy<_>>::decompress( + let transactions = as DecompressibleBy<_, _>>::decompress( &compressed.transactions, &ctx, )?; From e08328afb919dfaae6d05978739003977a782a46 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 3 Sep 2024 15:26:16 +0300 Subject: [PATCH 026/112] Update to latest vm changes --- crates/compression/README.md | 2 +- crates/compression/src/context/compress.rs | 56 ++++---------------- crates/compression/src/context/decompress.rs | 50 ++++------------- crates/compression/src/context/prepare.rs | 55 ++++--------------- crates/compression/src/services/compress.rs | 8 ++- crates/compression/src/tables.rs | 14 ----- 6 files changed, 37 insertions(+), 148 deletions(-) diff --git a/crates/compression/README.md b/crates/compression/README.md index 23767f0ea14..fcbb2446433 100644 --- a/crates/compression/README.md +++ b/crates/compression/README.md @@ -1,4 +1,4 @@ -# Compression and decompression of fuel-types for the DA layer +# Compression and decompression of transactions for the DA layer ## Compressed block header diff --git a/crates/compression/src/context/compress.rs b/crates/compression/src/context/compress.rs index 15218ef6bc2..e8b1be1b495 100644 --- a/crates/compression/src/context/compress.rs +++ b/crates/compression/src/context/compress.rs @@ -1,6 +1,5 @@ use std::collections::HashMap; -use anyhow::bail; use fuel_core_types::{ fuel_compression::{ RawKey, @@ -10,6 +9,7 @@ use fuel_core_types::{ Address, AssetId, ContractId, + ScriptCode, }, }; @@ -17,7 +17,6 @@ use crate::{ db::RocksDb, eviction_policy::CacheEvictor, tables::{ - check_keyspace, PerRegistryKeyspace, PostcardSerialized, RegistryKeyspace, @@ -51,60 +50,25 @@ fn registry_substitute( } impl RegistrySubstitutableBy, anyhow::Error> for Address { - fn substitute( - &self, - keyspace: &str, - ctx: &mut CompressCtx<'_>, - ) -> anyhow::Result { - registry_substitute( - check_keyspace!(keyspace, RegistryKeyspace::address), - self, - ctx, - ) + fn substitute(&self, ctx: &mut CompressCtx<'_>) -> anyhow::Result { + registry_substitute(RegistryKeyspace::address, self, ctx) } } impl RegistrySubstitutableBy, anyhow::Error> for AssetId { - fn substitute( - &self, - keyspace: &str, - ctx: &mut CompressCtx<'_>, - ) -> anyhow::Result { - registry_substitute( - check_keyspace!(keyspace, RegistryKeyspace::asset_id), - self, - ctx, - ) + fn substitute(&self, ctx: &mut CompressCtx<'_>) -> anyhow::Result { + registry_substitute(RegistryKeyspace::asset_id, self, ctx) } } impl RegistrySubstitutableBy, anyhow::Error> for ContractId { - fn substitute( - &self, - keyspace: &str, - ctx: &mut CompressCtx<'_>, - ) -> anyhow::Result { - registry_substitute( - check_keyspace!(keyspace, RegistryKeyspace::contract_id), - self, - ctx, - ) + fn substitute(&self, ctx: &mut CompressCtx<'_>) -> anyhow::Result { + registry_substitute(RegistryKeyspace::contract_id, self, ctx) } } -impl RegistrySubstitutableBy, anyhow::Error> for Vec { - fn substitute( - &self, - keyspace: &str, - ctx: &mut CompressCtx<'_>, - ) -> anyhow::Result { - registry_substitute( - check_keyspace!( - keyspace, - RegistryKeyspace::script_code | RegistryKeyspace::witness - ), - self, - ctx, - ) +impl RegistrySubstitutableBy, anyhow::Error> for ScriptCode { + fn substitute(&self, ctx: &mut CompressCtx<'_>) -> anyhow::Result { + registry_substitute(RegistryKeyspace::script_code, self, ctx) } } diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs index b5acbb7767d..9dff34a1919 100644 --- a/crates/compression/src/context/decompress.rs +++ b/crates/compression/src/context/decompress.rs @@ -1,4 +1,3 @@ -use anyhow::bail; use fuel_core_types::{ fuel_compression::{ RawKey, @@ -8,15 +7,13 @@ use fuel_core_types::{ Address, AssetId, ContractId, + ScriptCode, }, }; use crate::{ db::RocksDb, - tables::{ - check_keyspace, - RegistryKeyspace, - }, + tables::RegistryKeyspace, }; pub struct DecompressCtx<'a> { @@ -24,50 +21,25 @@ pub struct DecompressCtx<'a> { } impl RegistryDesubstitutableBy, anyhow::Error> for Address { - fn desubstitute( - c: &RawKey, - keyspace: &str, - ctx: &DecompressCtx, - ) -> anyhow::Result { - ctx.db - .read_registry(check_keyspace!(keyspace, RegistryKeyspace::address), *c) + fn desubstitute(c: &RawKey, ctx: &DecompressCtx) -> anyhow::Result { + ctx.db.read_registry(RegistryKeyspace::address, *c) } } impl RegistryDesubstitutableBy, anyhow::Error> for AssetId { - fn desubstitute( - c: &RawKey, - keyspace: &str, - ctx: &DecompressCtx, - ) -> anyhow::Result { - ctx.db - .read_registry(check_keyspace!(keyspace, RegistryKeyspace::asset_id), *c) + fn desubstitute(c: &RawKey, ctx: &DecompressCtx) -> anyhow::Result { + ctx.db.read_registry(RegistryKeyspace::asset_id, *c) } } impl RegistryDesubstitutableBy, anyhow::Error> for ContractId { - fn desubstitute( - c: &RawKey, - keyspace: &str, - ctx: &DecompressCtx, - ) -> anyhow::Result { - ctx.db - .read_registry(check_keyspace!(keyspace, RegistryKeyspace::contract_id), *c) + fn desubstitute(c: &RawKey, ctx: &DecompressCtx) -> anyhow::Result { + ctx.db.read_registry(RegistryKeyspace::contract_id, *c) } } -impl RegistryDesubstitutableBy, anyhow::Error> for Vec { - fn desubstitute( - c: &RawKey, - keyspace: &str, - ctx: &DecompressCtx, - ) -> anyhow::Result> { - ctx.db.read_registry( - check_keyspace!( - keyspace, - RegistryKeyspace::script_code | RegistryKeyspace::witness - ), - *c, - ) +impl RegistryDesubstitutableBy, anyhow::Error> for ScriptCode { + fn desubstitute(c: &RawKey, ctx: &DecompressCtx) -> anyhow::Result { + ctx.db.read_registry(RegistryKeyspace::script_code, *c) } } diff --git a/crates/compression/src/context/prepare.rs b/crates/compression/src/context/prepare.rs index 950cb2dc607..fccabf62d61 100644 --- a/crates/compression/src/context/prepare.rs +++ b/crates/compression/src/context/prepare.rs @@ -1,6 +1,5 @@ use std::collections::HashSet; -use anyhow::bail; use fuel_core_types::{ fuel_compression::{ RawKey, @@ -12,7 +11,6 @@ use fuel_core_types::{ use crate::{ db::RocksDb, tables::{ - check_keyspace, PerRegistryKeyspace, RegistryKeyspace, }, @@ -42,60 +40,25 @@ fn registry_prepare( } impl RegistrySubstitutableBy, anyhow::Error> for Address { - fn substitute( - &self, - keyspace: &str, - ctx: &mut PrepareCtx<'_>, - ) -> anyhow::Result { - registry_prepare( - check_keyspace!(keyspace, RegistryKeyspace::address), - self, - ctx, - ) + fn substitute(&self, ctx: &mut PrepareCtx<'_>) -> anyhow::Result { + registry_prepare(RegistryKeyspace::address, self, ctx) } } impl RegistrySubstitutableBy, anyhow::Error> for AssetId { - fn substitute( - &self, - keyspace: &str, - ctx: &mut PrepareCtx<'_>, - ) -> anyhow::Result { - registry_prepare( - check_keyspace!(keyspace, RegistryKeyspace::asset_id), - self, - ctx, - ) + fn substitute(&self, ctx: &mut PrepareCtx<'_>) -> anyhow::Result { + registry_prepare(RegistryKeyspace::asset_id, self, ctx) } } impl RegistrySubstitutableBy, anyhow::Error> for ContractId { - fn substitute( - &self, - keyspace: &str, - ctx: &mut PrepareCtx<'_>, - ) -> anyhow::Result { - registry_prepare( - check_keyspace!(keyspace, RegistryKeyspace::contract_id), - self, - ctx, - ) + fn substitute(&self, ctx: &mut PrepareCtx<'_>) -> anyhow::Result { + registry_prepare(RegistryKeyspace::contract_id, self, ctx) } } -impl RegistrySubstitutableBy, anyhow::Error> for Vec { - fn substitute( - &self, - keyspace: &str, - ctx: &mut PrepareCtx<'_>, - ) -> anyhow::Result { - registry_prepare( - check_keyspace!( - keyspace, - RegistryKeyspace::script_code | RegistryKeyspace::witness - ), - self, - ctx, - ) +impl RegistrySubstitutableBy, anyhow::Error> for ScriptCode { + fn substitute(&self, ctx: &mut PrepareCtx<'_>) -> anyhow::Result { + registry_prepare(RegistryKeyspace::script_code, self, ctx) } } diff --git a/crates/compression/src/services/compress.rs b/crates/compression/src/services/compress.rs index 34e36a8fbef..d33c0ac711a 100644 --- a/crates/compression/src/services/compress.rs +++ b/crates/compression/src/services/compress.rs @@ -1,4 +1,7 @@ -use fuel_core_types::fuel_compression::CompressibleBy; +use fuel_core_types::{ + fuel_compression::CompressibleBy, + fuel_tx::Transaction, +}; use tokio::sync::mpsc; use fuel_core_types::blockchain::block::Block; @@ -64,7 +67,8 @@ pub fn compress(db: &mut RocksDb, block: Block) -> Result, CompressError db, accessed_keys: PerRegistryKeyspace::default(), }; - let _ = target.compress(&mut prepare_ctx)?; + let _ = + as CompressibleBy<_, _>>::compress(&target, &mut prepare_ctx)?; let mut ctx = CompressCtx { db: prepare_ctx.db, diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index a9615ebe3ef..09f7ae28d26 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -20,19 +20,6 @@ impl PostcardSerialized { } } -macro_rules! check_keyspace { - ($keyspace:expr, $expected:pat) => { - match RegistryKeyspace::from_str($keyspace) { - Some(val @ $expected) => val, - Some(other) => { - bail!("Keyspace {other:?} not valid for {}", stringify!($expected)) - } - None => bail!("Unknown keyspace {:?}", $keyspace), - } - }; -} -pub(crate) use check_keyspace; - macro_rules! tables { ($($name:ident: $type:ty),*$(,)?) => { #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -163,5 +150,4 @@ tables!( asset_id: AssetId, contract_id: ContractId, script_code: Vec, - witness: Vec, ); From b2bde19d86c1f170458755703ba5cc2b945020d1 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 4 Sep 2024 05:30:34 +0300 Subject: [PATCH 027/112] Update to latest vm changes --- crates/compression/src/context/compress.rs | 34 ++++++---- crates/compression/src/context/decompress.rs | 40 ++++++++--- crates/compression/src/context/prepare.rs | 36 +++++----- crates/compression/src/db.rs | 9 +-- crates/compression/src/eviction_policy.rs | 13 ++-- crates/compression/src/lib.rs | 66 ++++++++++--------- crates/compression/src/ports.rs | 33 +++++++--- crates/compression/src/services/compress.rs | 9 +-- crates/compression/src/services/decompress.rs | 7 +- crates/compression/src/tables.rs | 8 +-- crates/fuel-core/src/service/adapters.rs | 1 + .../src/service/adapters/da_compression.rs | 1 + 12 files changed, 158 insertions(+), 99 deletions(-) create mode 100644 crates/fuel-core/src/service/adapters/da_compression.rs diff --git a/crates/compression/src/context/compress.rs b/crates/compression/src/context/compress.rs index e8b1be1b495..dbca3b077c1 100644 --- a/crates/compression/src/context/compress.rs +++ b/crates/compression/src/context/compress.rs @@ -2,14 +2,16 @@ use std::collections::HashMap; use fuel_core_types::{ fuel_compression::{ - RawKey, - RegistrySubstitutableBy, + CompressibleBy, + RegistryKey, }, fuel_tx::{ Address, AssetId, + CompressibleTxId, ContractId, ScriptCode, + TxPointer, }, }; @@ -27,16 +29,16 @@ pub struct CompressCtx<'a> { pub db: &'a mut RocksDb, pub cache_evictor: CacheEvictor, /// Changes to the temporary registry, to be included in the compressed block header - pub changes: PerRegistryKeyspace>, + pub changes: PerRegistryKeyspace>, } fn registry_substitute( keyspace: RegistryKeyspace, value: &T, ctx: &mut CompressCtx<'_>, -) -> anyhow::Result { +) -> anyhow::Result { if *value == T::default() { - return Ok(RawKey::DEFAULT_VALUE); + return Ok(RegistryKey::DEFAULT_VALUE); } if let Some(found) = ctx.db.registry_index_lookup(keyspace, value)? { @@ -49,26 +51,32 @@ fn registry_substitute( Ok(key) } -impl RegistrySubstitutableBy, anyhow::Error> for Address { - fn substitute(&self, ctx: &mut CompressCtx<'_>) -> anyhow::Result { +impl<'a> CompressibleBy, anyhow::Error> for Address { + async fn compress(&self, ctx: &mut CompressCtx<'a>) -> anyhow::Result { registry_substitute(RegistryKeyspace::address, self, ctx) } } -impl RegistrySubstitutableBy, anyhow::Error> for AssetId { - fn substitute(&self, ctx: &mut CompressCtx<'_>) -> anyhow::Result { +impl<'a> CompressibleBy, anyhow::Error> for AssetId { + async fn compress(&self, ctx: &mut CompressCtx<'a>) -> anyhow::Result { registry_substitute(RegistryKeyspace::asset_id, self, ctx) } } -impl RegistrySubstitutableBy, anyhow::Error> for ContractId { - fn substitute(&self, ctx: &mut CompressCtx<'_>) -> anyhow::Result { +impl<'a> CompressibleBy, anyhow::Error> for ContractId { + async fn compress(&self, ctx: &mut CompressCtx<'a>) -> anyhow::Result { registry_substitute(RegistryKeyspace::contract_id, self, ctx) } } -impl RegistrySubstitutableBy, anyhow::Error> for ScriptCode { - fn substitute(&self, ctx: &mut CompressCtx<'_>) -> anyhow::Result { +impl<'a> CompressibleBy, anyhow::Error> for ScriptCode { + async fn compress(&self, ctx: &mut CompressCtx<'a>) -> anyhow::Result { registry_substitute(RegistryKeyspace::script_code, self, ctx) } } + +impl<'a> CompressibleBy, anyhow::Error> for CompressibleTxId { + async fn compress(&self, ctx: &mut CompressCtx<'a>) -> anyhow::Result { + todo!(); + } +} diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs index 9dff34a1919..8aa7e13efcb 100644 --- a/crates/compression/src/context/decompress.rs +++ b/crates/compression/src/context/decompress.rs @@ -1,13 +1,15 @@ use fuel_core_types::{ fuel_compression::{ - RawKey, - RegistryDesubstitutableBy, + DecompressibleBy, + RegistryKey, }, fuel_tx::{ Address, AssetId, + CompressibleTxId, ContractId, ScriptCode, + TxPointer, }, }; @@ -20,26 +22,44 @@ pub struct DecompressCtx<'a> { pub db: &'a RocksDb, } -impl RegistryDesubstitutableBy, anyhow::Error> for Address { - fn desubstitute(c: &RawKey, ctx: &DecompressCtx) -> anyhow::Result { +impl<'a> DecompressibleBy, anyhow::Error> for Address { + async fn decompress( + c: &RegistryKey, + ctx: &DecompressCtx<'a>, + ) -> anyhow::Result { ctx.db.read_registry(RegistryKeyspace::address, *c) } } -impl RegistryDesubstitutableBy, anyhow::Error> for AssetId { - fn desubstitute(c: &RawKey, ctx: &DecompressCtx) -> anyhow::Result { +impl<'a> DecompressibleBy, anyhow::Error> for AssetId { + async fn decompress( + c: &RegistryKey, + ctx: &DecompressCtx<'a>, + ) -> anyhow::Result { ctx.db.read_registry(RegistryKeyspace::asset_id, *c) } } -impl RegistryDesubstitutableBy, anyhow::Error> for ContractId { - fn desubstitute(c: &RawKey, ctx: &DecompressCtx) -> anyhow::Result { +impl<'a> DecompressibleBy, anyhow::Error> for ContractId { + async fn decompress( + c: &RegistryKey, + ctx: &DecompressCtx<'a>, + ) -> anyhow::Result { ctx.db.read_registry(RegistryKeyspace::contract_id, *c) } } -impl RegistryDesubstitutableBy, anyhow::Error> for ScriptCode { - fn desubstitute(c: &RawKey, ctx: &DecompressCtx) -> anyhow::Result { +impl<'a> DecompressibleBy, anyhow::Error> for ScriptCode { + async fn decompress( + c: &RegistryKey, + ctx: &DecompressCtx<'a>, + ) -> anyhow::Result { ctx.db.read_registry(RegistryKeyspace::script_code, *c) } } + +impl<'a> DecompressibleBy, anyhow::Error> for CompressibleTxId { + async fn decompress(c: &TxPointer, ctx: &DecompressCtx<'a>) -> anyhow::Result { + todo!(); + } +} diff --git a/crates/compression/src/context/prepare.rs b/crates/compression/src/context/prepare.rs index fccabf62d61..1c46477fcea 100644 --- a/crates/compression/src/context/prepare.rs +++ b/crates/compression/src/context/prepare.rs @@ -2,8 +2,8 @@ use std::collections::HashSet; use fuel_core_types::{ fuel_compression::{ - RawKey, - RegistrySubstitutableBy, + CompressibleBy, + RegistryKey, }, fuel_tx::*, }; @@ -17,48 +17,54 @@ use crate::{ }; /// Preparation pass through the block to collect all keys accessed during compression. -/// Returns placeholder. The resulting "compressed block" should be discarded. +/// Returns dummy values. The resulting "compressed block" should be discarded. pub struct PrepareCtx<'a> { /// Database handle pub db: &'a mut RocksDb, /// Keys accessed during compression. Will not be overwritten. - pub accessed_keys: PerRegistryKeyspace>, + pub accessed_keys: PerRegistryKeyspace>, } fn registry_prepare( keyspace: RegistryKeyspace, value: &T, ctx: &mut PrepareCtx<'_>, -) -> anyhow::Result { +) -> anyhow::Result { if *value == T::default() { - return Ok(RawKey::ZERO); + return Ok(RegistryKey::ZERO); } if let Some(found) = ctx.db.registry_index_lookup(keyspace, value)? { ctx.accessed_keys[keyspace].insert(found); } - Ok(RawKey::ZERO) + Ok(RegistryKey::ZERO) } -impl RegistrySubstitutableBy, anyhow::Error> for Address { - fn substitute(&self, ctx: &mut PrepareCtx<'_>) -> anyhow::Result { +impl<'a> CompressibleBy, anyhow::Error> for Address { + async fn compress(&self, ctx: &mut PrepareCtx<'a>) -> anyhow::Result { registry_prepare(RegistryKeyspace::address, self, ctx) } } -impl RegistrySubstitutableBy, anyhow::Error> for AssetId { - fn substitute(&self, ctx: &mut PrepareCtx<'_>) -> anyhow::Result { +impl<'a> CompressibleBy, anyhow::Error> for AssetId { + async fn compress(&self, ctx: &mut PrepareCtx<'a>) -> anyhow::Result { registry_prepare(RegistryKeyspace::asset_id, self, ctx) } } -impl RegistrySubstitutableBy, anyhow::Error> for ContractId { - fn substitute(&self, ctx: &mut PrepareCtx<'_>) -> anyhow::Result { +impl<'a> CompressibleBy, anyhow::Error> for ContractId { + async fn compress(&self, ctx: &mut PrepareCtx<'a>) -> anyhow::Result { registry_prepare(RegistryKeyspace::contract_id, self, ctx) } } -impl RegistrySubstitutableBy, anyhow::Error> for ScriptCode { - fn substitute(&self, ctx: &mut PrepareCtx<'_>) -> anyhow::Result { +impl<'a> CompressibleBy, anyhow::Error> for ScriptCode { + async fn compress(&self, ctx: &mut PrepareCtx<'a>) -> anyhow::Result { registry_prepare(RegistryKeyspace::script_code, self, ctx) } } + +impl<'a> CompressibleBy, anyhow::Error> for CompressibleTxId { + async fn compress(&self, ctx: &mut PrepareCtx<'a>) -> anyhow::Result { + Ok(TxPointer::default()) + } +} diff --git a/crates/compression/src/db.rs b/crates/compression/src/db.rs index 36126a26319..db673c523c0 100644 --- a/crates/compression/src/db.rs +++ b/crates/compression/src/db.rs @@ -2,12 +2,13 @@ use std::path::Path; use anyhow::bail; use fuel_core_types::{ - fuel_compression::RawKey, + fuel_compression::RegistryKey, fuel_types::BlockHeight, }; use crate::tables::RegistryKeyspace; +/// Database that holds data needed by the block compression only pub struct RocksDb { pub(crate) db: rocksdb::DB, } @@ -46,12 +47,12 @@ impl RocksDb { pub fn read_registry( &self, keyspace: RegistryKeyspace, - key: RawKey, + key: RegistryKey, ) -> anyhow::Result where T: serde::de::DeserializeOwned + Default, { - if key == RawKey::DEFAULT_VALUE { + if key == RegistryKey::DEFAULT_VALUE { return Ok(T::default()); } @@ -72,7 +73,7 @@ impl RocksDb { &self, keyspace: RegistryKeyspace, value: V, - ) -> anyhow::Result> { + ) -> anyhow::Result> { let db_key: Vec = keyspace.name().bytes().chain(core::iter::once(0)).collect(); let db_key = postcard::to_extend(&value, db_key).expect("Never fails"); diff --git a/crates/compression/src/eviction_policy.rs b/crates/compression/src/eviction_policy.rs index 81179678dd9..be524c97bb1 100644 --- a/crates/compression/src/eviction_policy.rs +++ b/crates/compression/src/eviction_policy.rs @@ -1,6 +1,6 @@ use std::collections::HashSet; -use fuel_core_types::fuel_compression::RawKey; +use fuel_core_types::fuel_compression::RegistryKey; use crate::tables::{ PerRegistryKeyspace, @@ -9,19 +9,22 @@ use crate::tables::{ pub struct CacheEvictor { /// Set of keys that must not be evicted - pub keep_keys: PerRegistryKeyspace>, + pub keep_keys: PerRegistryKeyspace>, } impl CacheEvictor { /// Get a key, evicting an old value if necessary - pub fn next_key(&mut self, keyspace: RegistryKeyspace) -> anyhow::Result { + pub fn next_key( + &mut self, + keyspace: RegistryKeyspace, + ) -> anyhow::Result { // Pick first key not in the set // TODO: this can be optimized by keeping a counter of the last key used // TODO: use a proper algo, maybe LRU? - let mut key = RawKey::ZERO; + let mut key = RegistryKey::ZERO; while self.keep_keys[keyspace].contains(&key) { key = key.next(); - assert_ne!(key, RawKey::ZERO, "Ran out of keys"); + assert_ne!(key, RegistryKey::ZERO, "Ran out of keys"); } self.keep_keys[keyspace].insert(key); diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index d475908adfb..b522ec2aaf3 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -57,8 +57,6 @@ struct CompressedBlockPayload { #[cfg(test)] mod tests { - use std::array; - use db::RocksDb; use fuel_core_types::{ blockchain::{ @@ -83,8 +81,8 @@ mod tests { use super::*; - #[test] - fn postcard_roundtrip() { + #[tokio::test] + async fn postcard_roundtrip() { let original = CompressedBlockPayload { registrations: RegistrationsPerTable::default(), header: Header { @@ -113,15 +111,16 @@ mod tests { assert!(transactions.is_empty()); } - #[test] - fn same_compact_tx_is_smaller_in_next_block() { + #[tokio::test] + async fn same_compact_tx_is_smaller_in_next_block() { let tx = Transaction::default_test_tx(); let tmpdir = TempDir::new().unwrap(); let mut db = RocksDb::open(tmpdir.path()).unwrap(); - let sizes: [usize; 3] = array::from_fn(|h| { - services::compress::compress( + let mut sizes = Vec::new(); + for i in 0..3 { + let compressed = services::compress::compress( &mut db, Block::new( PartialBlockHeader { @@ -133,7 +132,7 @@ mod tests { }, consensus: ConsensusHeader { prev_root: Bytes32::default(), - height: (h as u32).into(), + height: i.into(), time: Tai64::UNIX_EPOCH, generated: Empty, }, @@ -144,9 +143,10 @@ mod tests { ) .expect("Invalid block header"), ) - .unwrap() - .len() - }); + .await + .unwrap(); + sizes.push(compressed.len()); + } assert!(sizes[0] > sizes[1], "Size must decrease after first block"); assert!( @@ -155,15 +155,18 @@ mod tests { ); } - #[test] - fn compress_decompress_roundtrip() { + #[tokio::test] + async fn compress_decompress_roundtrip() { let tx = Transaction::default_test_tx(); let tmpdir = TempDir::new().unwrap(); let mut db = RocksDb::open(tmpdir.path()).unwrap(); - let original_blocks: [Block; 3] = array::from_fn(|h| { - Block::new( + let mut original_blocks = Vec::new(); + let mut compressed_blocks = Vec::new(); + + for i in 0..3 { + let block = Block::new( PartialBlockHeader { application: ApplicationHeader { da_height: DaBlockHeight::default(), @@ -173,7 +176,7 @@ mod tests { }, consensus: ConsensusHeader { prev_root: Bytes32::default(), - height: (h as u32).into(), + height: i.into(), time: Tai64::UNIX_EPOCH, generated: Empty, }, @@ -182,27 +185,28 @@ mod tests { &[], Bytes32::default(), ) - .expect("Invalid block header") - }); - - let compressed_bytes: [Vec; 3] = original_blocks - .clone() - .map(|block| services::compress::compress(&mut db, block).unwrap()); + .expect("Invalid block header"); + original_blocks.push(block.clone()); + compressed_blocks.push( + services::compress::compress(&mut db, block) + .await + .expect("Failed to compress"), + ); + } db.db.flush().unwrap(); drop(tmpdir); let tmpdir2 = TempDir::new().unwrap(); let mut db = RocksDb::open(tmpdir2.path()).unwrap(); - let decompressed_blocks: [PartialFuelBlock; 3] = array::from_fn(|h| { - services::decompress::decompress(&mut db, compressed_bytes[h].clone()) - .expect("Decompression failed") - }); - - for (original, decompressed) in - original_blocks.iter().zip(decompressed_blocks.iter()) + for (original, compressed) in original_blocks + .into_iter() + .zip(compressed_blocks.into_iter()) { - assert_eq!(PartialFuelBlock::from(original.clone()), *decompressed); + let decompressed = services::decompress::decompress(&mut db, compressed) + .await + .expect("Decompression failed"); + assert_eq!(PartialFuelBlock::from(original), decompressed); } } } diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index 38f270fea2b..0a02ea28883 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -1,15 +1,28 @@ -//! Ports this services requires to function. +//! Ports this service requires to function. -use fuel_core_types::blockchain::block::Block; +use fuel_core_types::fuel_tx::{ + TxId, + TxPointer, +}; #[async_trait::async_trait] -pub trait CompressPort { - /// Compress the next block. - async fn compress_next(&mut self, block: Block) -> anyhow::Result>; +pub trait TxIdRelationTxPointer { + async fn lookup_by_txid(&self, tx_id: TxId) -> anyhow::Result; + async fn lookup_by_txpointer(&self, tx_pointer: TxPointer) -> anyhow::Result; } -#[async_trait::async_trait] -pub trait DecompressPort { - /// Decompress the next block. - async fn decompress_next(&mut self, block: Vec) -> anyhow::Result; -} +// Exposed interfaces: where should these live? + +// use fuel_core_types::blockchain::block::Block; + +// #[async_trait::async_trait] +// pub trait CompressPort { +// /// Compress the next block. +// async fn compress_next(&mut self, block: Block) -> anyhow::Result>; +// } + +// #[async_trait::async_trait] +// pub trait DecompressPort { +// /// Decompress the next block. +// async fn decompress_next(&mut self, block: Vec) -> anyhow::Result; +// } diff --git a/crates/compression/src/services/compress.rs b/crates/compression/src/services/compress.rs index d33c0ac711a..3ad4436b826 100644 --- a/crates/compression/src/services/compress.rs +++ b/crates/compression/src/services/compress.rs @@ -49,14 +49,14 @@ pub async fn run(mut db: RocksDb, mut request_receiver: mpsc::Receiver { - let reply = compress(&mut db, block); + let reply = compress(&mut db, block).await; response.send(reply).await.expect("Failed to respond"); } } } } -pub fn compress(db: &mut RocksDb, block: Block) -> Result, CompressError> { +pub async fn compress(db: &mut RocksDb, block: Block) -> Result, CompressError> { if *block.header().height() != db.next_block_height()? { return Err(CompressError::NotLatest); } @@ -68,7 +68,8 @@ pub fn compress(db: &mut RocksDb, block: Block) -> Result, CompressError accessed_keys: PerRegistryKeyspace::default(), }; let _ = - as CompressibleBy<_, _>>::compress(&target, &mut prepare_ctx)?; + as CompressibleBy<_, _>>::compress(&target, &mut prepare_ctx) + .await?; let mut ctx = CompressCtx { db: prepare_ctx.db, @@ -77,7 +78,7 @@ pub fn compress(db: &mut RocksDb, block: Block) -> Result, CompressError }, changes: Default::default(), }; - let transactions = target.compress(&mut ctx)?; + let transactions = target.compress(&mut ctx).await?; let registrations = ctx.changes; let registrations = RegistrationsPerTable::try_from(registrations)?; diff --git a/crates/compression/src/services/decompress.rs b/crates/compression/src/services/decompress.rs index 50c381fec39..a5872aecc90 100644 --- a/crates/compression/src/services/decompress.rs +++ b/crates/compression/src/services/decompress.rs @@ -58,14 +58,14 @@ pub async fn run(mut db: RocksDb, mut request_receiver: mpsc::Receiver { - let reply = decompress(&mut db, block); + let reply = decompress(&mut db, block).await; response.send(reply).await.expect("Failed to respond"); } } } } -pub fn decompress( +pub async fn decompress( db: &mut RocksDb, block: Vec, ) -> Result { @@ -87,7 +87,8 @@ pub fn decompress( let transactions = as DecompressibleBy<_, _>>::decompress( &compressed.transactions, &ctx, - )?; + ) + .await?; Ok(PartialFuelBlock { header: PartialBlockHeader { diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index 09f7ae28d26..ca583205809 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -1,6 +1,6 @@ use super::db::RocksDb; use fuel_core_types::{ - fuel_compression::RawKey, + fuel_compression::RegistryKey, fuel_tx::{ Address, AssetId, @@ -75,13 +75,13 @@ macro_rules! tables { #[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)] pub struct RegistrationsPerTable { - $(pub $name: Vec<(RawKey, $type)>,)* + $(pub $name: Vec<(RegistryKey, $type)>,)* } - impl TryFrom>> for RegistrationsPerTable { + impl TryFrom>> for RegistrationsPerTable { type Error = anyhow::Error; - fn try_from(value: PerRegistryKeyspace>) -> Result { + fn try_from(value: PerRegistryKeyspace>) -> Result { let mut result = Self::default(); $( for (key, value) in value.$name.into_iter() { diff --git a/crates/fuel-core/src/service/adapters.rs b/crates/fuel-core/src/service/adapters.rs index 2caa2edfb68..876fc6abd0b 100644 --- a/crates/fuel-core/src/service/adapters.rs +++ b/crates/fuel-core/src/service/adapters.rs @@ -38,6 +38,7 @@ use crate::{ pub mod block_importer; pub mod consensus_module; pub mod consensus_parameters_provider; +pub mod da_compression; pub mod executor; pub mod fuel_gas_price_provider; pub mod gas_price_adapters; diff --git a/crates/fuel-core/src/service/adapters/da_compression.rs b/crates/fuel-core/src/service/adapters/da_compression.rs new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/crates/fuel-core/src/service/adapters/da_compression.rs @@ -0,0 +1 @@ + From 0431104e553ce2a36fbdc6ce49d9c47e1fa3ab7f Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 4 Sep 2024 06:04:51 +0300 Subject: [PATCH 028/112] Mock tx id and pointer lookups for tests --- Cargo.lock | 7 + crates/compression/Cargo.toml | 1 + crates/compression/src/compression_tests.rs | 171 ++++++++++++++++++ crates/compression/src/context/compress.rs | 4 +- crates/compression/src/context/decompress.rs | 4 +- crates/compression/src/context/prepare.rs | 2 +- crates/compression/src/lib.rs | 128 +------------ crates/compression/src/ports.rs | 10 +- crates/compression/src/services/compress.rs | 16 +- crates/compression/src/services/decompress.rs | 13 +- 10 files changed, 221 insertions(+), 135 deletions(-) create mode 100644 crates/compression/src/compression_tests.rs diff --git a/Cargo.lock b/Cargo.lock index 3d6314fbfdd..884fbca9c0b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1114,6 +1114,12 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" + [[package]] name = "bincode" version = "1.3.3" @@ -3354,6 +3360,7 @@ version = "0.35.0" dependencies = [ "anyhow", "async-trait", + "bimap", "bincode", "fuel-core-compression", "fuel-core-types", diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 23ca8400227..9a525017a0a 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -21,6 +21,7 @@ tokio = { workspace = true, features = ["sync"] } [dev-dependencies] bincode = { version = "1.3" } +bimap = { version = "0.6" } fuel-core-compression = { workspace = true, features = ["test-helpers"] } tempfile = "3" diff --git a/crates/compression/src/compression_tests.rs b/crates/compression/src/compression_tests.rs new file mode 100644 index 00000000000..f74b144a34e --- /dev/null +++ b/crates/compression/src/compression_tests.rs @@ -0,0 +1,171 @@ +use std::sync::{ + Arc, + Mutex, +}; + +use bimap::BiMap; +use fuel_core_types::{ + blockchain::{ + block::{ + Block, + PartialFuelBlock, + }, + header::{ + ApplicationHeader, + ConsensusHeader, + PartialBlockHeader, + }, + primitives::{ + DaBlockHeight, + Empty, + }, + }, + fuel_tx::{ + Bytes32, + Transaction, + TxId, + TxPointer, + }, + tai64::Tai64, +}; +use tempfile::TempDir; + +use crate::{ + db::RocksDb, + ports::{ + TxIdToPointer, + TxPointerToId, + }, + services, +}; + +/// Just stores the looked-up tx pointers in a map, instead of actually looking them up. +#[derive(Default)] +pub struct MockTxDb { + mapping: Arc>>, +} + +#[async_trait::async_trait] +impl TxIdToPointer for MockTxDb { + async fn lookup(&self, tx_id: TxId) -> anyhow::Result { + let mut g = self.mapping.lock().unwrap(); + if !g.contains_left(&tx_id) { + let key = g.len() as u32; // Just obtain an unique key + g.insert(tx_id, TxPointer::new(key.into(), 0)); + } + Ok(g.get_by_left(&tx_id).cloned().unwrap()) + } +} + +#[async_trait::async_trait] +impl TxPointerToId for MockTxDb { + async fn lookup(&self, tx_pointer: TxPointer) -> anyhow::Result { + let g = self.mapping.lock().unwrap(); + g.get_by_right(&tx_pointer).cloned().ok_or_else(|| { + anyhow::anyhow!("TxPointer not found in mock db: {:?}", tx_pointer) + }) + } +} + +#[tokio::test] +async fn same_compact_tx_is_smaller_in_next_block() { + let tx = Transaction::default_test_tx(); + + let tmpdir = TempDir::new().unwrap(); + + let mut db = RocksDb::open(tmpdir.path()).unwrap(); + let tx_db = MockTxDb::default(); + + let mut sizes = Vec::new(); + for i in 0..3 { + let compressed = services::compress::compress( + &mut db, + &tx_db, + Block::new( + PartialBlockHeader { + application: ApplicationHeader { + da_height: DaBlockHeight::default(), + consensus_parameters_version: 4, + state_transition_bytecode_version: 5, + generated: Empty, + }, + consensus: ConsensusHeader { + prev_root: Bytes32::default(), + height: i.into(), + time: Tai64::UNIX_EPOCH, + generated: Empty, + }, + }, + vec![tx.clone()], + &[], + Bytes32::default(), + ) + .expect("Invalid block header"), + ) + .await + .unwrap(); + sizes.push(compressed.len()); + } + + assert!(sizes[0] > sizes[1], "Size must decrease after first block"); + assert!( + sizes[1] == sizes[2], + "Size must be constant after first block" + ); +} + +#[tokio::test] +async fn compress_decompress_roundtrip() { + let tx = Transaction::default_test_tx(); + + let tmpdir = TempDir::new().unwrap(); + let mut db = RocksDb::open(tmpdir.path()).unwrap(); + let tx_db = MockTxDb::default(); + + let mut original_blocks = Vec::new(); + let mut compressed_blocks = Vec::new(); + + for i in 0..3 { + let block = Block::new( + PartialBlockHeader { + application: ApplicationHeader { + da_height: DaBlockHeight::default(), + consensus_parameters_version: 4, + state_transition_bytecode_version: 5, + generated: Empty, + }, + consensus: ConsensusHeader { + prev_root: Bytes32::default(), + height: i.into(), + time: Tai64::UNIX_EPOCH, + generated: Empty, + }, + }, + vec![tx.clone()], + &[], + Bytes32::default(), + ) + .expect("Invalid block header"); + original_blocks.push(block.clone()); + compressed_blocks.push( + services::compress::compress(&mut db, &tx_db, block) + .await + .expect("Failed to compress"), + ); + } + + db.db.flush().unwrap(); + drop(tmpdir); + let tmpdir2 = TempDir::new().unwrap(); + let mut db = RocksDb::open(tmpdir2.path()).unwrap(); + + for (original, compressed) in original_blocks + .into_iter() + .zip(compressed_blocks.into_iter()) + { + let decompressed = services::decompress::decompress(&mut db, &tx_db, compressed) + .await + .expect("Decompression failed"); + assert_eq!(PartialFuelBlock::from(original), decompressed); + } +} diff --git a/crates/compression/src/context/compress.rs b/crates/compression/src/context/compress.rs index dbca3b077c1..001ce99ebb8 100644 --- a/crates/compression/src/context/compress.rs +++ b/crates/compression/src/context/compress.rs @@ -18,6 +18,7 @@ use fuel_core_types::{ use crate::{ db::RocksDb, eviction_policy::CacheEvictor, + ports::TxIdToPointer, tables::{ PerRegistryKeyspace, PostcardSerialized, @@ -27,6 +28,7 @@ use crate::{ pub struct CompressCtx<'a> { pub db: &'a mut RocksDb, + pub tx_lookup: &'a dyn TxIdToPointer, pub cache_evictor: CacheEvictor, /// Changes to the temporary registry, to be included in the compressed block header pub changes: PerRegistryKeyspace>, @@ -77,6 +79,6 @@ impl<'a> CompressibleBy, anyhow::Error> for ScriptCode { impl<'a> CompressibleBy, anyhow::Error> for CompressibleTxId { async fn compress(&self, ctx: &mut CompressCtx<'a>) -> anyhow::Result { - todo!(); + ctx.tx_lookup.lookup(**self).await } } diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs index 8aa7e13efcb..d37355fa5ee 100644 --- a/crates/compression/src/context/decompress.rs +++ b/crates/compression/src/context/decompress.rs @@ -15,11 +15,13 @@ use fuel_core_types::{ use crate::{ db::RocksDb, + ports::TxPointerToId, tables::RegistryKeyspace, }; pub struct DecompressCtx<'a> { pub db: &'a RocksDb, + pub tx_lookup: &'a dyn TxPointerToId, } impl<'a> DecompressibleBy, anyhow::Error> for Address { @@ -60,6 +62,6 @@ impl<'a> DecompressibleBy, anyhow::Error> for ScriptCode { impl<'a> DecompressibleBy, anyhow::Error> for CompressibleTxId { async fn decompress(c: &TxPointer, ctx: &DecompressCtx<'a>) -> anyhow::Result { - todo!(); + Ok(ctx.tx_lookup.lookup(*c).await?.into()) } } diff --git a/crates/compression/src/context/prepare.rs b/crates/compression/src/context/prepare.rs index 1c46477fcea..4963905027a 100644 --- a/crates/compression/src/context/prepare.rs +++ b/crates/compression/src/context/prepare.rs @@ -64,7 +64,7 @@ impl<'a> CompressibleBy, anyhow::Error> for ScriptCode { } impl<'a> CompressibleBy, anyhow::Error> for CompressibleTxId { - async fn compress(&self, ctx: &mut PrepareCtx<'a>) -> anyhow::Result { + async fn compress(&self, _ctx: &mut PrepareCtx<'a>) -> anyhow::Result { Ok(TxPointer::default()) } } diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index b522ec2aaf3..5990d531a53 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -12,6 +12,9 @@ mod context { pub mod prepare; } +#[cfg(test)] +mod compression_tests; + use serde::{ Deserialize, Serialize, @@ -57,32 +60,10 @@ struct CompressedBlockPayload { #[cfg(test)] mod tests { - use db::RocksDb; - use fuel_core_types::{ - blockchain::{ - block::{ - Block, - PartialFuelBlock, - }, - header::{ - ApplicationHeader, - ConsensusHeader, - PartialBlockHeader, - }, - primitives::{ - DaBlockHeight, - Empty, - }, - }, - fuel_tx::Transaction, - tai64::Tai64, - }; - use tempfile::TempDir; - use super::*; - #[tokio::test] - async fn postcard_roundtrip() { + #[test] + fn postcard_roundtrip() { let original = CompressedBlockPayload { registrations: RegistrationsPerTable::default(), header: Header { @@ -110,103 +91,4 @@ mod tests { assert_eq!(header.height, 3u32.into()); assert!(transactions.is_empty()); } - - #[tokio::test] - async fn same_compact_tx_is_smaller_in_next_block() { - let tx = Transaction::default_test_tx(); - - let tmpdir = TempDir::new().unwrap(); - let mut db = RocksDb::open(tmpdir.path()).unwrap(); - - let mut sizes = Vec::new(); - for i in 0..3 { - let compressed = services::compress::compress( - &mut db, - Block::new( - PartialBlockHeader { - application: ApplicationHeader { - da_height: DaBlockHeight::default(), - consensus_parameters_version: 4, - state_transition_bytecode_version: 5, - generated: Empty, - }, - consensus: ConsensusHeader { - prev_root: Bytes32::default(), - height: i.into(), - time: Tai64::UNIX_EPOCH, - generated: Empty, - }, - }, - vec![tx.clone()], - &[], - Bytes32::default(), - ) - .expect("Invalid block header"), - ) - .await - .unwrap(); - sizes.push(compressed.len()); - } - - assert!(sizes[0] > sizes[1], "Size must decrease after first block"); - assert!( - sizes[1] == sizes[2], - "Size must be constant after first block" - ); - } - - #[tokio::test] - async fn compress_decompress_roundtrip() { - let tx = Transaction::default_test_tx(); - - let tmpdir = TempDir::new().unwrap(); - let mut db = RocksDb::open(tmpdir.path()).unwrap(); - - let mut original_blocks = Vec::new(); - let mut compressed_blocks = Vec::new(); - - for i in 0..3 { - let block = Block::new( - PartialBlockHeader { - application: ApplicationHeader { - da_height: DaBlockHeight::default(), - consensus_parameters_version: 4, - state_transition_bytecode_version: 5, - generated: Empty, - }, - consensus: ConsensusHeader { - prev_root: Bytes32::default(), - height: i.into(), - time: Tai64::UNIX_EPOCH, - generated: Empty, - }, - }, - vec![tx.clone()], - &[], - Bytes32::default(), - ) - .expect("Invalid block header"); - original_blocks.push(block.clone()); - compressed_blocks.push( - services::compress::compress(&mut db, block) - .await - .expect("Failed to compress"), - ); - } - - db.db.flush().unwrap(); - drop(tmpdir); - let tmpdir2 = TempDir::new().unwrap(); - let mut db = RocksDb::open(tmpdir2.path()).unwrap(); - - for (original, compressed) in original_blocks - .into_iter() - .zip(compressed_blocks.into_iter()) - { - let decompressed = services::decompress::decompress(&mut db, compressed) - .await - .expect("Decompression failed"); - assert_eq!(PartialFuelBlock::from(original), decompressed); - } - } } diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index 0a02ea28883..a899547ae33 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -6,9 +6,13 @@ use fuel_core_types::fuel_tx::{ }; #[async_trait::async_trait] -pub trait TxIdRelationTxPointer { - async fn lookup_by_txid(&self, tx_id: TxId) -> anyhow::Result; - async fn lookup_by_txpointer(&self, tx_pointer: TxPointer) -> anyhow::Result; +pub trait TxIdToPointer { + async fn lookup(&self, tx_id: TxId) -> anyhow::Result; +} + +#[async_trait::async_trait] +pub trait TxPointerToId { + async fn lookup(&self, tx_pointer: TxPointer) -> anyhow::Result; } // Exposed interfaces: where should these live? diff --git a/crates/compression/src/services/compress.rs b/crates/compression/src/services/compress.rs index 3ad4436b826..114c4888e4c 100644 --- a/crates/compression/src/services/compress.rs +++ b/crates/compression/src/services/compress.rs @@ -13,6 +13,7 @@ use crate::{ }, db::RocksDb, eviction_policy::CacheEvictor, + ports::TxIdToPointer, tables::{ PerRegistryKeyspace, RegistrationsPerTable, @@ -45,18 +46,26 @@ impl From for CompressError { } } -pub async fn run(mut db: RocksDb, mut request_receiver: mpsc::Receiver) { +pub async fn run( + mut db: RocksDb, + tx_lookup: Box, + mut request_receiver: mpsc::Receiver, +) { while let Some(req) = request_receiver.recv().await { match req { TaskRequest::Compress { block, response } => { - let reply = compress(&mut db, block).await; + let reply = compress(&mut db, &*tx_lookup, block).await; response.send(reply).await.expect("Failed to respond"); } } } } -pub async fn compress(db: &mut RocksDb, block: Block) -> Result, CompressError> { +pub async fn compress( + db: &mut RocksDb, + tx_lookup: &dyn TxIdToPointer, + block: Block, +) -> Result, CompressError> { if *block.header().height() != db.next_block_height()? { return Err(CompressError::NotLatest); } @@ -73,6 +82,7 @@ pub async fn compress(db: &mut RocksDb, block: Block) -> Result, Compres let mut ctx = CompressCtx { db: prepare_ctx.db, + tx_lookup, cache_evictor: CacheEvictor { keep_keys: prepare_ctx.accessed_keys, }, diff --git a/crates/compression/src/services/decompress.rs b/crates/compression/src/services/decompress.rs index a5872aecc90..795c6d5c049 100644 --- a/crates/compression/src/services/decompress.rs +++ b/crates/compression/src/services/decompress.rs @@ -17,6 +17,7 @@ use fuel_core_types::{ use crate::{ context::decompress::DecompressCtx, db::RocksDb, + ports::TxPointerToId, CompressedBlockPayload, }; @@ -54,11 +55,16 @@ impl From for DecompressError { } } -pub async fn run(mut db: RocksDb, mut request_receiver: mpsc::Receiver) { +pub async fn run( + mut db: RocksDb, + tx_lookup: Box, + + mut request_receiver: mpsc::Receiver, +) { while let Some(req) = request_receiver.recv().await { match req { TaskRequest::Decompress { block, response } => { - let reply = decompress(&mut db, block).await; + let reply = decompress(&mut db, &*tx_lookup, block).await; response.send(reply).await.expect("Failed to respond"); } } @@ -67,6 +73,7 @@ pub async fn run(mut db: RocksDb, mut request_receiver: mpsc::Receiver, ) -> Result { if block.is_empty() || block[0] != 0 { @@ -82,7 +89,7 @@ pub async fn decompress( compressed.registrations.write_to_db(db)?; - let ctx = DecompressCtx { db }; + let ctx = DecompressCtx { db, tx_lookup }; let transactions = as DecompressibleBy<_, _>>::decompress( &compressed.transactions, From 9b2965b3ebe4c333da2089fced41245b782a738b Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 4 Sep 2024 06:14:41 +0300 Subject: [PATCH 029/112] cargo sort --- Cargo.toml | 2 +- crates/compression/Cargo.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index df830f9cf0c..47f8770103d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,10 +6,10 @@ members = [ "bin/fuel-core", "bin/fuel-core-client", "bin/keygen", - "crates/compression", "crates/chain-config", "crates/client", "crates/compression", + "crates/compression", "crates/database", "crates/fuel-core", "crates/fuel-gas-price-algorithm", diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 9a525017a0a..571fb1a9b6a 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -20,12 +20,12 @@ serde = { version = "1.0", features = ["derive"] } tokio = { workspace = true, features = ["sync"] } [dev-dependencies] -bincode = { version = "1.3" } bimap = { version = "0.6" } +bincode = { version = "1.3" } fuel-core-compression = { workspace = true, features = ["test-helpers"] } tempfile = "3" [features] default = ["rocksdb"] rocksdb = ["dep:rocksdb"] -test-helpers = ["fuel-core-types/test-helpers", "fuel-core-types/random", "fuel-core-types/std"] \ No newline at end of file +test-helpers = ["fuel-core-types/test-helpers", "fuel-core-types/random", "fuel-core-types/std"] From dad1b0fe8a3396019d74d029fff0f6cd51a9021a Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 5 Sep 2024 17:55:11 +0300 Subject: [PATCH 030/112] Update to match fuel-vm changes --- Cargo.lock | 12 +------ crates/compression/src/compression_tests.rs | 33 +++++++++++-------- crates/compression/src/context/compress.rs | 15 +++++---- crates/compression/src/context/decompress.rs | 15 +++++---- crates/compression/src/context/prepare.rs | 9 +++-- crates/compression/src/ports.rs | 10 +++--- crates/compression/src/services/compress.rs | 6 ++-- crates/compression/src/services/decompress.rs | 6 ++-- crates/fuel-core/src/query/message/test.rs | 3 +- 9 files changed, 57 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 884fbca9c0b..0d0d4af4dc0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3161,8 +3161,8 @@ name = "fuel-compression" version = "0.56.0" dependencies = [ "fuel-derive", + "fuel-types", "serde", - "serde-big-array", ] [[package]] @@ -3889,7 +3889,6 @@ dependencies = [ name = "fuel-types" version = "0.56.0" dependencies = [ - "fuel-compression", "fuel-derive", "hex", "rand", @@ -7929,15 +7928,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde-big-array" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" -dependencies = [ - "serde", -] - [[package]] name = "serde_derive" version = "1.0.209" diff --git a/crates/compression/src/compression_tests.rs b/crates/compression/src/compression_tests.rs index f74b144a34e..ee635ca7d64 100644 --- a/crates/compression/src/compression_tests.rs +++ b/crates/compression/src/compression_tests.rs @@ -23,8 +23,8 @@ use fuel_core_types::{ fuel_tx::{ Bytes32, Transaction, - TxId, TxPointer, + UtxoId, }, tai64::Tai64, }; @@ -33,8 +33,8 @@ use tempfile::TempDir; use crate::{ db::RocksDb, ports::{ - TxIdToPointer, - TxPointerToId, + TxPointerToUtxoId, + UtxoIdToPointer, }, services, }; @@ -42,28 +42,33 @@ use crate::{ /// Just stores the looked-up tx pointers in a map, instead of actually looking them up. #[derive(Default)] pub struct MockTxDb { - mapping: Arc>>, + mapping: Arc>>, } #[async_trait::async_trait] -impl TxIdToPointer for MockTxDb { - async fn lookup(&self, tx_id: TxId) -> anyhow::Result { +impl UtxoIdToPointer for MockTxDb { + async fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result<(TxPointer, u16)> { let mut g = self.mapping.lock().unwrap(); - if !g.contains_left(&tx_id) { + if !g.contains_left(&utxo_id) { let key = g.len() as u32; // Just obtain an unique key - g.insert(tx_id, TxPointer::new(key.into(), 0)); + g.insert(utxo_id, (TxPointer::new(key.into(), 0), 0)); } - Ok(g.get_by_left(&tx_id).cloned().unwrap()) + Ok(g.get_by_left(&utxo_id).cloned().unwrap()) } } #[async_trait::async_trait] -impl TxPointerToId for MockTxDb { - async fn lookup(&self, tx_pointer: TxPointer) -> anyhow::Result { +impl TxPointerToUtxoId for MockTxDb { + async fn lookup(&self, tx_pointer: TxPointer, index: u16) -> anyhow::Result { let g = self.mapping.lock().unwrap(); - g.get_by_right(&tx_pointer).cloned().ok_or_else(|| { - anyhow::anyhow!("TxPointer not found in mock db: {:?}", tx_pointer) - }) + g.get_by_right(&(tx_pointer, index)) + .cloned() + .ok_or_else(|| { + anyhow::anyhow!( + "(TxPointer, index) not found in mock db: {:?}", + (tx_pointer, index) + ) + }) } } diff --git a/crates/compression/src/context/compress.rs b/crates/compression/src/context/compress.rs index 001ce99ebb8..52dd122478f 100644 --- a/crates/compression/src/context/compress.rs +++ b/crates/compression/src/context/compress.rs @@ -8,17 +8,17 @@ use fuel_core_types::{ fuel_tx::{ Address, AssetId, - CompressibleTxId, ContractId, ScriptCode, TxPointer, + UtxoId, }, }; use crate::{ db::RocksDb, eviction_policy::CacheEvictor, - ports::TxIdToPointer, + ports::UtxoIdToPointer, tables::{ PerRegistryKeyspace, PostcardSerialized, @@ -28,7 +28,7 @@ use crate::{ pub struct CompressCtx<'a> { pub db: &'a mut RocksDb, - pub tx_lookup: &'a dyn TxIdToPointer, + pub tx_lookup: &'a dyn UtxoIdToPointer, pub cache_evictor: CacheEvictor, /// Changes to the temporary registry, to be included in the compressed block header pub changes: PerRegistryKeyspace>, @@ -77,8 +77,11 @@ impl<'a> CompressibleBy, anyhow::Error> for ScriptCode { } } -impl<'a> CompressibleBy, anyhow::Error> for CompressibleTxId { - async fn compress(&self, ctx: &mut CompressCtx<'a>) -> anyhow::Result { - ctx.tx_lookup.lookup(**self).await +impl<'a> CompressibleBy, anyhow::Error> for UtxoId { + async fn compress( + &self, + ctx: &mut CompressCtx<'a>, + ) -> anyhow::Result<(TxPointer, u16)> { + ctx.tx_lookup.lookup(*self).await } } diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs index d37355fa5ee..acdcfbe0a15 100644 --- a/crates/compression/src/context/decompress.rs +++ b/crates/compression/src/context/decompress.rs @@ -6,22 +6,22 @@ use fuel_core_types::{ fuel_tx::{ Address, AssetId, - CompressibleTxId, ContractId, ScriptCode, TxPointer, + UtxoId, }, }; use crate::{ db::RocksDb, - ports::TxPointerToId, + ports::TxPointerToUtxoId, tables::RegistryKeyspace, }; pub struct DecompressCtx<'a> { pub db: &'a RocksDb, - pub tx_lookup: &'a dyn TxPointerToId, + pub tx_lookup: &'a dyn TxPointerToUtxoId, } impl<'a> DecompressibleBy, anyhow::Error> for Address { @@ -60,8 +60,11 @@ impl<'a> DecompressibleBy, anyhow::Error> for ScriptCode { } } -impl<'a> DecompressibleBy, anyhow::Error> for CompressibleTxId { - async fn decompress(c: &TxPointer, ctx: &DecompressCtx<'a>) -> anyhow::Result { - Ok(ctx.tx_lookup.lookup(*c).await?.into()) +impl<'a> DecompressibleBy, anyhow::Error> for UtxoId { + async fn decompress( + (ptr, i): &(TxPointer, u16), + ctx: &DecompressCtx<'a>, + ) -> anyhow::Result { + Ok(ctx.tx_lookup.lookup(*ptr, *i).await?.into()) } } diff --git a/crates/compression/src/context/prepare.rs b/crates/compression/src/context/prepare.rs index 4963905027a..30a437e6379 100644 --- a/crates/compression/src/context/prepare.rs +++ b/crates/compression/src/context/prepare.rs @@ -63,8 +63,11 @@ impl<'a> CompressibleBy, anyhow::Error> for ScriptCode { } } -impl<'a> CompressibleBy, anyhow::Error> for CompressibleTxId { - async fn compress(&self, _ctx: &mut PrepareCtx<'a>) -> anyhow::Result { - Ok(TxPointer::default()) +impl<'a> CompressibleBy, anyhow::Error> for UtxoId { + async fn compress( + &self, + _ctx: &mut PrepareCtx<'a>, + ) -> anyhow::Result<(TxPointer, u16)> { + Ok((TxPointer::default(), 0)) } } diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index a899547ae33..d57b74fa8ac 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -1,18 +1,18 @@ //! Ports this service requires to function. use fuel_core_types::fuel_tx::{ - TxId, TxPointer, + UtxoId, }; #[async_trait::async_trait] -pub trait TxIdToPointer { - async fn lookup(&self, tx_id: TxId) -> anyhow::Result; +pub trait UtxoIdToPointer { + async fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result<(TxPointer, u16)>; } #[async_trait::async_trait] -pub trait TxPointerToId { - async fn lookup(&self, tx_pointer: TxPointer) -> anyhow::Result; +pub trait TxPointerToUtxoId { + async fn lookup(&self, tx_pointer: TxPointer, index: u16) -> anyhow::Result; } // Exposed interfaces: where should these live? diff --git a/crates/compression/src/services/compress.rs b/crates/compression/src/services/compress.rs index 114c4888e4c..15d3fdcc465 100644 --- a/crates/compression/src/services/compress.rs +++ b/crates/compression/src/services/compress.rs @@ -13,7 +13,7 @@ use crate::{ }, db::RocksDb, eviction_policy::CacheEvictor, - ports::TxIdToPointer, + ports::UtxoIdToPointer, tables::{ PerRegistryKeyspace, RegistrationsPerTable, @@ -48,7 +48,7 @@ impl From for CompressError { pub async fn run( mut db: RocksDb, - tx_lookup: Box, + tx_lookup: Box, mut request_receiver: mpsc::Receiver, ) { while let Some(req) = request_receiver.recv().await { @@ -63,7 +63,7 @@ pub async fn run( pub async fn compress( db: &mut RocksDb, - tx_lookup: &dyn TxIdToPointer, + tx_lookup: &dyn UtxoIdToPointer, block: Block, ) -> Result, CompressError> { if *block.header().height() != db.next_block_height()? { diff --git a/crates/compression/src/services/decompress.rs b/crates/compression/src/services/decompress.rs index 795c6d5c049..08ab593463f 100644 --- a/crates/compression/src/services/decompress.rs +++ b/crates/compression/src/services/decompress.rs @@ -17,7 +17,7 @@ use fuel_core_types::{ use crate::{ context::decompress::DecompressCtx, db::RocksDb, - ports::TxPointerToId, + ports::TxPointerToUtxoId, CompressedBlockPayload, }; @@ -57,7 +57,7 @@ impl From for DecompressError { pub async fn run( mut db: RocksDb, - tx_lookup: Box, + tx_lookup: Box, mut request_receiver: mpsc::Receiver, ) { @@ -73,7 +73,7 @@ pub async fn run( pub async fn decompress( db: &mut RocksDb, - tx_lookup: &dyn TxPointerToId, + tx_lookup: &dyn TxPointerToUtxoId, block: Vec, ) -> Result { if block.is_empty() || block[0] != 0 { diff --git a/crates/fuel-core/src/query/message/test.rs b/crates/fuel-core/src/query/message/test.rs index 9e95db644d6..350004ba31d 100644 --- a/crates/fuel-core/src/query/message/test.rs +++ b/crates/fuel-core/src/query/message/test.rs @@ -12,8 +12,9 @@ use fuel_core_types::{ Transaction, }, fuel_types::{ + AssetId, BlockHeight, - *, + ContractId, }, tai64::Tai64, }; From dc6b40a9881c5725c3ffce8323b59cbc52f6e9e5 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 6 Sep 2024 13:14:34 +0300 Subject: [PATCH 031/112] Update to match latest fuel-vm changes --- Cargo.lock | 1 + crates/compression/Cargo.toml | 1 + crates/compression/src/compression_tests.rs | 110 +++++-- crates/compression/src/context/compress.rs | 36 ++- crates/compression/src/context/decompress.rs | 300 +++++++++++++++++- crates/compression/src/context/prepare.rs | 39 ++- crates/compression/src/db.rs | 2 - crates/compression/src/ports.rs | 35 +- crates/compression/src/services/compress.rs | 10 +- crates/compression/src/services/decompress.rs | 12 +- crates/compression/src/tables.rs | 2 - crates/fuel-core/src/schema/tx/input.rs | 6 +- 12 files changed, 485 insertions(+), 69 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0d0d4af4dc0..4497999155e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3365,6 +3365,7 @@ dependencies = [ "fuel-core-compression", "fuel-core-types", "postcard", + "rand", "rocksdb", "serde", "tempfile", diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 571fb1a9b6a..1783651c02d 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -23,6 +23,7 @@ tokio = { workspace = true, features = ["sync"] } bimap = { version = "0.6" } bincode = { version = "1.3" } fuel-core-compression = { workspace = true, features = ["test-helpers"] } +rand = { workspace = true } tempfile = "3" [features] diff --git a/crates/compression/src/compression_tests.rs b/crates/compression/src/compression_tests.rs index ee635ca7d64..1d07b25ec93 100644 --- a/crates/compression/src/compression_tests.rs +++ b/crates/compression/src/compression_tests.rs @@ -1,6 +1,9 @@ -use std::sync::{ - Arc, - Mutex, +use std::{ + collections::HashMap, + sync::{ + Arc, + Mutex, + }, }; use bimap::BiMap; @@ -22,18 +25,27 @@ use fuel_core_types::{ }, fuel_tx::{ Bytes32, + CompressedUtxoId, + Finalizable, + Input, Transaction, + TransactionBuilder, TxPointer, UtxoId, }, + fuel_types::Nonce, + fuel_vm::SecretKey, tai64::Tai64, }; +use rand::Rng; use tempfile::TempDir; use crate::{ db::RocksDb, ports::{ - TxPointerToUtxoId, + CoinInfo, + HistoryLookup, + MessageInfo, UtxoIdToPointer, }, services, @@ -42,39 +54,65 @@ use crate::{ /// Just stores the looked-up tx pointers in a map, instead of actually looking them up. #[derive(Default)] pub struct MockTxDb { - mapping: Arc>>, + utxo_id_mapping: Arc>>, + coins: HashMap, +} + +impl MockTxDb { + fn create_coin(&mut self, rng: &mut R, info: CoinInfo) -> UtxoId { + let utxo_id: UtxoId = rng.gen(); + self.coins.insert(utxo_id, info); + utxo_id + } } #[async_trait::async_trait] impl UtxoIdToPointer for MockTxDb { - async fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result<(TxPointer, u16)> { - let mut g = self.mapping.lock().unwrap(); + async fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result { + let mut g = self.utxo_id_mapping.lock().unwrap(); if !g.contains_left(&utxo_id) { let key = g.len() as u32; // Just obtain an unique key - g.insert(utxo_id, (TxPointer::new(key.into(), 0), 0)); + g.insert( + utxo_id, + CompressedUtxoId { + tx_pointer: TxPointer::new(key.into(), 0), + output_index: 0, + }, + ); } Ok(g.get_by_left(&utxo_id).cloned().unwrap()) } } #[async_trait::async_trait] -impl TxPointerToUtxoId for MockTxDb { - async fn lookup(&self, tx_pointer: TxPointer, index: u16) -> anyhow::Result { - let g = self.mapping.lock().unwrap(); - g.get_by_right(&(tx_pointer, index)) +impl HistoryLookup for MockTxDb { + async fn utxo_id(&self, c: &CompressedUtxoId) -> anyhow::Result { + let g = self.utxo_id_mapping.lock().unwrap(); + g.get_by_right(&c).cloned().ok_or_else(|| { + anyhow::anyhow!("CompressedUtxoId not found in mock db: {:?}", c) + }) + } + + async fn coin(&self, utxo_id: &UtxoId) -> anyhow::Result { + self.coins + .get(&utxo_id) .cloned() - .ok_or_else(|| { - anyhow::anyhow!( - "(TxPointer, index) not found in mock db: {:?}", - (tx_pointer, index) - ) - }) + .ok_or_else(|| anyhow::anyhow!("Coin not found in mock db: {:?}", utxo_id)) + } + + async fn message(&self, nonce: &Nonce) -> anyhow::Result { + todo!(); } } #[tokio::test] async fn same_compact_tx_is_smaller_in_next_block() { - let tx = Transaction::default_test_tx(); + let tx: Transaction = + TransactionBuilder::script(vec![1, 2, 3, 4, 5, 6, 7, 8], vec![]) + .max_fee_limit(0) + .add_random_fee_input() + .finalize() + .into(); let tmpdir = TempDir::new().unwrap(); @@ -121,16 +159,44 @@ async fn same_compact_tx_is_smaller_in_next_block() { #[tokio::test] async fn compress_decompress_roundtrip() { - let tx = Transaction::default_test_tx(); + use rand::{ + Rng, + SeedableRng, + }; + let mut rng = rand::rngs::StdRng::seed_from_u64(2322u64); let tmpdir = TempDir::new().unwrap(); let mut db = RocksDb::open(tmpdir.path()).unwrap(); - let tx_db = MockTxDb::default(); + let mut tx_db = MockTxDb::default(); let mut original_blocks = Vec::new(); let mut compressed_blocks = Vec::new(); for i in 0..3 { + let secret_key = SecretKey::random(&mut rng); + + let coin_utxo_id = tx_db.create_coin( + &mut rng, + CoinInfo { + owner: Input::owner(&secret_key.public_key()), + amount: (i as u64) * 1000, + asset_id: Default::default(), + }, + ); + + let tx: Transaction = + TransactionBuilder::script(vec![1, 2, 3, 4, 5, 6, 7, 8], vec![]) + .max_fee_limit(0) + .add_unsigned_coin_input( + secret_key, + coin_utxo_id, + (i as u64) * 1000, + Default::default(), + Default::default(), + ) + .finalize() + .into(); + let block = Block::new( PartialBlockHeader { application: ApplicationHeader { @@ -146,7 +212,7 @@ async fn compress_decompress_roundtrip() { generated: Empty, }, }, - vec![tx.clone()], + vec![tx], &[], Bytes32::default(), ) diff --git a/crates/compression/src/context/compress.rs b/crates/compression/src/context/compress.rs index 52dd122478f..b23ee07360f 100644 --- a/crates/compression/src/context/compress.rs +++ b/crates/compression/src/context/compress.rs @@ -6,11 +6,12 @@ use fuel_core_types::{ RegistryKey, }, fuel_tx::{ + input::PredicateCode, Address, AssetId, + CompressedUtxoId, ContractId, ScriptCode, - TxPointer, UtxoId, }, }; @@ -54,34 +55,55 @@ fn registry_substitute( } impl<'a> CompressibleBy, anyhow::Error> for Address { - async fn compress(&self, ctx: &mut CompressCtx<'a>) -> anyhow::Result { + async fn compress_with( + &self, + ctx: &mut CompressCtx<'a>, + ) -> anyhow::Result { registry_substitute(RegistryKeyspace::address, self, ctx) } } impl<'a> CompressibleBy, anyhow::Error> for AssetId { - async fn compress(&self, ctx: &mut CompressCtx<'a>) -> anyhow::Result { + async fn compress_with( + &self, + ctx: &mut CompressCtx<'a>, + ) -> anyhow::Result { registry_substitute(RegistryKeyspace::asset_id, self, ctx) } } impl<'a> CompressibleBy, anyhow::Error> for ContractId { - async fn compress(&self, ctx: &mut CompressCtx<'a>) -> anyhow::Result { + async fn compress_with( + &self, + ctx: &mut CompressCtx<'a>, + ) -> anyhow::Result { registry_substitute(RegistryKeyspace::contract_id, self, ctx) } } impl<'a> CompressibleBy, anyhow::Error> for ScriptCode { - async fn compress(&self, ctx: &mut CompressCtx<'a>) -> anyhow::Result { + async fn compress_with( + &self, + ctx: &mut CompressCtx<'a>, + ) -> anyhow::Result { + registry_substitute(RegistryKeyspace::script_code, self, ctx) + } +} + +impl<'a> CompressibleBy, anyhow::Error> for PredicateCode { + async fn compress_with( + &self, + ctx: &mut CompressCtx<'a>, + ) -> anyhow::Result { registry_substitute(RegistryKeyspace::script_code, self, ctx) } } impl<'a> CompressibleBy, anyhow::Error> for UtxoId { - async fn compress( + async fn compress_with( &self, ctx: &mut CompressCtx<'a>, - ) -> anyhow::Result<(TxPointer, u16)> { + ) -> anyhow::Result { ctx.tx_lookup.lookup(*self).await } } diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs index acdcfbe0a15..89e4108fe8b 100644 --- a/crates/compression/src/context/decompress.rs +++ b/crates/compression/src/context/decompress.rs @@ -1,31 +1,50 @@ use fuel_core_types::{ + fuel_asm::Word, fuel_compression::{ DecompressibleBy, RegistryKey, }, fuel_tx::{ + input::{ + self, + coin::{ + self, + Coin, + CompressedCoin, + }, + message::{ + self, + CompressedMessage, + Message, + }, + Empty, + PredicateCode, + }, + output, Address, AssetId, + CompressedUtxoId, ContractId, + Mint, ScriptCode, - TxPointer, + Transaction, UtxoId, }, }; use crate::{ db::RocksDb, - ports::TxPointerToUtxoId, + ports::HistoryLookup, tables::RegistryKeyspace, }; pub struct DecompressCtx<'a> { pub db: &'a RocksDb, - pub tx_lookup: &'a dyn TxPointerToUtxoId, + pub lookup: &'a dyn HistoryLookup, } impl<'a> DecompressibleBy, anyhow::Error> for Address { - async fn decompress( + async fn decompress_with( c: &RegistryKey, ctx: &DecompressCtx<'a>, ) -> anyhow::Result { @@ -34,7 +53,7 @@ impl<'a> DecompressibleBy, anyhow::Error> for Address { } impl<'a> DecompressibleBy, anyhow::Error> for AssetId { - async fn decompress( + async fn decompress_with( c: &RegistryKey, ctx: &DecompressCtx<'a>, ) -> anyhow::Result { @@ -43,7 +62,7 @@ impl<'a> DecompressibleBy, anyhow::Error> for AssetId { } impl<'a> DecompressibleBy, anyhow::Error> for ContractId { - async fn decompress( + async fn decompress_with( c: &RegistryKey, ctx: &DecompressCtx<'a>, ) -> anyhow::Result { @@ -52,7 +71,16 @@ impl<'a> DecompressibleBy, anyhow::Error> for ContractId { } impl<'a> DecompressibleBy, anyhow::Error> for ScriptCode { - async fn decompress( + async fn decompress_with( + c: &RegistryKey, + ctx: &DecompressCtx<'a>, + ) -> anyhow::Result { + ctx.db.read_registry(RegistryKeyspace::script_code, *c) + } +} + +impl<'a> DecompressibleBy, anyhow::Error> for PredicateCode { + async fn decompress_with( c: &RegistryKey, ctx: &DecompressCtx<'a>, ) -> anyhow::Result { @@ -61,10 +89,262 @@ impl<'a> DecompressibleBy, anyhow::Error> for ScriptCode { } impl<'a> DecompressibleBy, anyhow::Error> for UtxoId { - async fn decompress( - (ptr, i): &(TxPointer, u16), + async fn decompress_with( + c: &CompressedUtxoId, ctx: &DecompressCtx<'a>, ) -> anyhow::Result { - Ok(ctx.tx_lookup.lookup(*ptr, *i).await?.into()) + ctx.lookup.utxo_id(c).await + } +} + +impl<'a> DecompressibleBy, anyhow::Error> for Mint { + async fn decompress_with( + c: &Self::Compressed, + ctx: &DecompressCtx<'a>, + ) -> Result { + Ok(Transaction::mint( + Default::default(), // TODO: what should this we do with this? + >::decompress_with( + &c.input_contract, + ctx, + ) + .await?, + >::decompress_with( + &c.output_contract, + ctx, + ) + .await?, + >::decompress_with(&c.mint_amount, ctx).await?, + >::decompress_with(&c.mint_asset_id, ctx).await?, + >::decompress_with(&c.gas_price, ctx).await?, + )) + } +} + +impl<'a> DecompressibleBy, anyhow::Error> for Coin { + async fn decompress_with( + c: &CompressedCoin, + ctx: &DecompressCtx<'a>, + ) -> Result, anyhow::Error> { + let utxo_id = UtxoId::decompress_with(&c.utxo_id, ctx).await?; + let coin_info = ctx.lookup.coin(&utxo_id).await?; + Ok(Coin { + utxo_id, + owner: coin_info.owner, + amount: coin_info.amount, + asset_id: coin_info.asset_id, + tx_pointer: Default::default(), + witness_index: c.witness_index, + predicate_gas_used: c.predicate_gas_used, + predicate: + ::Predicate::decompress_with( + &c.predicate, + ctx, + ) + .await?, + predicate_data: c.predicate_data.clone(), + }) + } +} + +impl<'a> DecompressibleBy, anyhow::Error> for Coin { + async fn decompress_with( + c: &CompressedCoin, + ctx: &DecompressCtx<'a>, + ) -> Result, anyhow::Error> { + let utxo_id = UtxoId::decompress_with(&c.utxo_id, ctx).await?; + let coin_info = ctx.lookup.coin(&utxo_id).await?; + Ok(Coin { + utxo_id, + owner: coin_info.owner, + amount: coin_info.amount, + asset_id: coin_info.asset_id, + tx_pointer: Default::default(), + witness_index: c.witness_index, + predicate_gas_used: Empty::default(), + predicate: Empty::default(), + predicate_data: Empty::default(), + }) + } +} + +impl<'a> DecompressibleBy, anyhow::Error> for Coin { + async fn decompress_with( + c: &CompressedCoin, + ctx: &DecompressCtx<'a>, + ) -> Result, anyhow::Error> { + let utxo_id = UtxoId::decompress_with(&c.utxo_id, ctx).await?; + let coin_info = ctx.lookup.coin(&utxo_id).await?; + Ok(Coin { + utxo_id, + owner: coin_info.owner, + amount: coin_info.amount, + asset_id: coin_info.asset_id, + tx_pointer: Default::default(), + witness_index: Empty::default(), + predicate_gas_used: c.predicate_gas_used, + predicate: + ::Predicate::decompress_with( + &c.predicate, + ctx, + ) + .await?, + predicate_data: c.predicate_data.clone(), + }) + } +} + +impl<'a> DecompressibleBy, anyhow::Error> + for Message +{ + async fn decompress_with( + c: &CompressedMessage, + ctx: &DecompressCtx<'a>, + ) -> Result, anyhow::Error> { + let msg = ctx.lookup.message(&c.nonce).await?; + Ok(Message { + sender: msg.sender, + recipient: msg.recipient, + amount: msg.amount, + nonce: c.nonce, + witness_index: c.witness_index, + predicate_gas_used: c.predicate_gas_used, + data: msg.data.clone(), + predicate: + ::Predicate::decompress_with( + &c.predicate, + ctx, + ) + .await?, + predicate_data: c.predicate_data.clone(), + }) + } +} +impl<'a> DecompressibleBy, anyhow::Error> + for Message> +{ + async fn decompress_with( + c: &CompressedMessage< + message::specifications::MessageData, + >, + ctx: &DecompressCtx<'a>, + ) -> Result< + Message>, + anyhow::Error, + > { + let msg = ctx.lookup.message(&c.nonce).await?; + Ok(Message { + sender: msg.sender, + recipient: msg.recipient, + amount: msg.amount, + nonce: c.nonce, + witness_index: c.witness_index, + predicate_gas_used: Empty::default(), + data: msg.data.clone(), + predicate: < as message::MessageSpecification>::Predicate as DecompressibleBy< + _, + anyhow::Error, + >>::decompress_with(&c.predicate, ctx) + .await?, + predicate_data: Empty::default(), + }) + } +} +impl<'a> DecompressibleBy, anyhow::Error> + for Message> +{ + async fn decompress_with( + c: &CompressedMessage< + message::specifications::MessageData, + >, + ctx: &DecompressCtx<'a>, + ) -> Result< + Message>, + anyhow::Error, + > { + let msg = ctx.lookup.message(&c.nonce).await?; + Ok(Message { + sender: msg.sender, + recipient: msg.recipient, + amount: msg.amount, + nonce: c.nonce, + witness_index: Empty::default(), + predicate_gas_used: c.predicate_gas_used, + data: msg.data.clone(), + predicate: as message::MessageSpecification>::Predicate::decompress_with( + &c.predicate, + ctx, + ) + .await?, + predicate_data: c.predicate_data.clone(), + }) + } +} +impl<'a> DecompressibleBy, anyhow::Error> + for Message> +{ + async fn decompress_with( + c: &CompressedMessage< + message::specifications::MessageCoin, + >, + ctx: &DecompressCtx<'a>, + ) -> Result< + Message>, + anyhow::Error, + > { + let msg = ctx.lookup.message(&c.nonce).await?; + Ok(Message { + sender: msg.sender, + recipient: msg.recipient, + amount: msg.amount, + nonce: c.nonce, + witness_index: c.witness_index, + predicate_gas_used: Empty::default(), + data: Empty::default(), + predicate: < as message::MessageSpecification>::Predicate as DecompressibleBy< + _, + anyhow::Error, + >>::decompress_with(&c.predicate, ctx) + .await?, + predicate_data: Empty::default(), + }) + } +} +impl<'a> DecompressibleBy, anyhow::Error> + for Message> +{ + async fn decompress_with( + c: &CompressedMessage< + message::specifications::MessageCoin, + >, + ctx: &DecompressCtx<'a>, + ) -> Result< + Message>, + anyhow::Error, + > { + let msg = ctx.lookup.message(&c.nonce).await?; + Ok(Message { + sender: msg.sender, + recipient: msg.recipient, + amount: msg.amount, + nonce: c.nonce, + witness_index: Empty::default(), + predicate_gas_used: c.predicate_gas_used, + data: Empty::default(), + predicate: as message::MessageSpecification>::Predicate::decompress_with( + &c.predicate, + ctx, + ) + .await?, + predicate_data: c.predicate_data.clone(), + }) } } diff --git a/crates/compression/src/context/prepare.rs b/crates/compression/src/context/prepare.rs index 30a437e6379..50bd16486f5 100644 --- a/crates/compression/src/context/prepare.rs +++ b/crates/compression/src/context/prepare.rs @@ -7,6 +7,7 @@ use fuel_core_types::{ }, fuel_tx::*, }; +use input::PredicateCode; use crate::{ db::RocksDb, @@ -40,34 +41,58 @@ fn registry_prepare( } impl<'a> CompressibleBy, anyhow::Error> for Address { - async fn compress(&self, ctx: &mut PrepareCtx<'a>) -> anyhow::Result { + async fn compress_with( + &self, + ctx: &mut PrepareCtx<'a>, + ) -> anyhow::Result { registry_prepare(RegistryKeyspace::address, self, ctx) } } impl<'a> CompressibleBy, anyhow::Error> for AssetId { - async fn compress(&self, ctx: &mut PrepareCtx<'a>) -> anyhow::Result { + async fn compress_with( + &self, + ctx: &mut PrepareCtx<'a>, + ) -> anyhow::Result { registry_prepare(RegistryKeyspace::asset_id, self, ctx) } } impl<'a> CompressibleBy, anyhow::Error> for ContractId { - async fn compress(&self, ctx: &mut PrepareCtx<'a>) -> anyhow::Result { + async fn compress_with( + &self, + ctx: &mut PrepareCtx<'a>, + ) -> anyhow::Result { registry_prepare(RegistryKeyspace::contract_id, self, ctx) } } impl<'a> CompressibleBy, anyhow::Error> for ScriptCode { - async fn compress(&self, ctx: &mut PrepareCtx<'a>) -> anyhow::Result { + async fn compress_with( + &self, + ctx: &mut PrepareCtx<'a>, + ) -> anyhow::Result { + registry_prepare(RegistryKeyspace::script_code, self, ctx) + } +} + +impl<'a> CompressibleBy, anyhow::Error> for PredicateCode { + async fn compress_with( + &self, + ctx: &mut PrepareCtx<'a>, + ) -> anyhow::Result { registry_prepare(RegistryKeyspace::script_code, self, ctx) } } impl<'a> CompressibleBy, anyhow::Error> for UtxoId { - async fn compress( + async fn compress_with( &self, _ctx: &mut PrepareCtx<'a>, - ) -> anyhow::Result<(TxPointer, u16)> { - Ok((TxPointer::default(), 0)) + ) -> anyhow::Result { + Ok(CompressedUtxoId { + tx_pointer: TxPointer::default(), + output_index: 0, + }) } } diff --git a/crates/compression/src/db.rs b/crates/compression/src/db.rs index db673c523c0..384d9b7be51 100644 --- a/crates/compression/src/db.rs +++ b/crates/compression/src/db.rs @@ -60,8 +60,6 @@ impl RocksDb { keyspace.name().bytes().chain(core::iter::once(0)).collect(); let db_key = postcard::to_extend(&key, db_key).expect("Never fails"); - println!("read_registry {:?}", &db_key); - let cf = self.db.cf_handle("temporal").unwrap(); let Some(bytes) = self.db.get_cf(&cf, &db_key)? else { bail!("Key {keyspace:?}:{key:?} not found"); diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index d57b74fa8ac..127b0233c14 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -1,18 +1,41 @@ //! Ports this service requires to function. -use fuel_core_types::fuel_tx::{ - TxPointer, - UtxoId, +use fuel_core_types::{ + fuel_tx::{ + Address, + AssetId, + CompressedUtxoId, + UtxoId, + Word, + }, + fuel_types::Nonce, }; #[async_trait::async_trait] pub trait UtxoIdToPointer { - async fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result<(TxPointer, u16)>; + async fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result; } #[async_trait::async_trait] -pub trait TxPointerToUtxoId { - async fn lookup(&self, tx_pointer: TxPointer, index: u16) -> anyhow::Result; +pub trait HistoryLookup { + async fn utxo_id(&self, c: &CompressedUtxoId) -> anyhow::Result; + async fn coin(&self, utxo_id: &UtxoId) -> anyhow::Result; + async fn message(&self, nonce: &Nonce) -> anyhow::Result; +} + +#[derive(Debug, Clone)] +pub struct CoinInfo { + pub owner: Address, + pub amount: u64, + pub asset_id: AssetId, +} + +#[derive(Debug, Clone)] +pub struct MessageInfo { + pub sender: Address, + pub recipient: Address, + pub amount: Word, + pub data: Vec, } // Exposed interfaces: where should these live? diff --git a/crates/compression/src/services/compress.rs b/crates/compression/src/services/compress.rs index 15d3fdcc465..24489f90ef7 100644 --- a/crates/compression/src/services/compress.rs +++ b/crates/compression/src/services/compress.rs @@ -76,9 +76,11 @@ pub async fn compress( db, accessed_keys: PerRegistryKeyspace::default(), }; - let _ = - as CompressibleBy<_, _>>::compress(&target, &mut prepare_ctx) - .await?; + let _ = as CompressibleBy<_, _>>::compress_with( + &target, + &mut prepare_ctx, + ) + .await?; let mut ctx = CompressCtx { db: prepare_ctx.db, @@ -88,7 +90,7 @@ pub async fn compress( }, changes: Default::default(), }; - let transactions = target.compress(&mut ctx).await?; + let transactions = target.compress_with(&mut ctx).await?; let registrations = ctx.changes; let registrations = RegistrationsPerTable::try_from(registrations)?; diff --git a/crates/compression/src/services/decompress.rs b/crates/compression/src/services/decompress.rs index 08ab593463f..141fd01c8ee 100644 --- a/crates/compression/src/services/decompress.rs +++ b/crates/compression/src/services/decompress.rs @@ -17,7 +17,7 @@ use fuel_core_types::{ use crate::{ context::decompress::DecompressCtx, db::RocksDb, - ports::TxPointerToUtxoId, + ports::HistoryLookup, CompressedBlockPayload, }; @@ -57,14 +57,14 @@ impl From for DecompressError { pub async fn run( mut db: RocksDb, - tx_lookup: Box, + lookup: Box, mut request_receiver: mpsc::Receiver, ) { while let Some(req) = request_receiver.recv().await { match req { TaskRequest::Decompress { block, response } => { - let reply = decompress(&mut db, &*tx_lookup, block).await; + let reply = decompress(&mut db, &*lookup, block).await; response.send(reply).await.expect("Failed to respond"); } } @@ -73,7 +73,7 @@ pub async fn run( pub async fn decompress( db: &mut RocksDb, - tx_lookup: &dyn TxPointerToUtxoId, + lookup: &dyn HistoryLookup, block: Vec, ) -> Result { if block.is_empty() || block[0] != 0 { @@ -89,9 +89,9 @@ pub async fn decompress( compressed.registrations.write_to_db(db)?; - let ctx = DecompressCtx { db, tx_lookup }; + let ctx = DecompressCtx { db, lookup }; - let transactions = as DecompressibleBy<_, _>>::decompress( + let transactions = as DecompressibleBy<_, _>>::decompress_with( &compressed.transactions, &ctx, ) diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index ca583205809..ae972ac2ff0 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -121,8 +121,6 @@ macro_rules! tables { let db_key: Vec = key_table_prefix.iter().copied().chain(raw_key.clone()).collect(); let db_value = postcard::to_stdvec(&value).expect("Never fails"); - println!("write_to_db {:?}", &db_key); - batch.put_cf(&cf_registry, db_key.clone(), db_value.clone()); // Remove the overwritten value from index, if any diff --git a/crates/fuel-core/src/schema/tx/input.rs b/crates/fuel-core/src/schema/tx/input.rs index 226526b888b..5245a346212 100644 --- a/crates/fuel-core/src/schema/tx/input.rs +++ b/crates/fuel-core/src/schema/tx/input.rs @@ -196,7 +196,7 @@ impl From<&fuel_tx::Input> for Input { tx_pointer: TxPointer(*tx_pointer), witness_index: Default::default(), predicate_gas_used: (*predicate_gas_used).into(), - predicate: HexString(predicate.clone()), + predicate: HexString(predicate.to_vec()), predicate_data: HexString(predicate_data.clone()), }), fuel_tx::Input::Contract(contract) => Input::Contract(contract.into()), @@ -239,7 +239,7 @@ impl From<&fuel_tx::Input> for Input { witness_index: Default::default(), predicate_gas_used: (*predicate_gas_used).into(), data: HexString(Default::default()), - predicate: HexString(predicate.clone()), + predicate: HexString(predicate.to_vec()), predicate_data: HexString(predicate_data.clone()), }), fuel_tx::Input::MessageDataSigned( @@ -283,7 +283,7 @@ impl From<&fuel_tx::Input> for Input { witness_index: Default::default(), predicate_gas_used: (*predicate_gas_used).into(), data: HexString(data.clone()), - predicate: HexString(predicate.clone()), + predicate: HexString(predicate.to_vec()), predicate_data: HexString(predicate_data.clone()), }), } From 902619da0e47109cc6bb6b7d00bd4e88d5e397f2 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 9 Sep 2024 12:14:02 +0300 Subject: [PATCH 032/112] Update to match fuel-vm changes --- crates/compression/src/compression_tests.rs | 7 +- crates/compression/src/context/compress.rs | 17 +- crates/compression/src/context/decompress.rs | 312 +++++------------- crates/compression/src/context/prepare.rs | 17 +- crates/compression/src/services/compress.rs | 18 +- crates/compression/src/services/decompress.rs | 2 +- 6 files changed, 107 insertions(+), 266 deletions(-) diff --git a/crates/compression/src/compression_tests.rs b/crates/compression/src/compression_tests.rs index 1d07b25ec93..57b9832d6be 100644 --- a/crates/compression/src/compression_tests.rs +++ b/crates/compression/src/compression_tests.rs @@ -100,7 +100,7 @@ impl HistoryLookup for MockTxDb { .ok_or_else(|| anyhow::anyhow!("Coin not found in mock db: {:?}", utxo_id)) } - async fn message(&self, nonce: &Nonce) -> anyhow::Result { + async fn message(&self, _nonce: &Nonce) -> anyhow::Result { todo!(); } } @@ -159,10 +159,7 @@ async fn same_compact_tx_is_smaller_in_next_block() { #[tokio::test] async fn compress_decompress_roundtrip() { - use rand::{ - Rng, - SeedableRng, - }; + use rand::SeedableRng; let mut rng = rand::rngs::StdRng::seed_from_u64(2322u64); let tmpdir = TempDir::new().unwrap(); diff --git a/crates/compression/src/context/compress.rs b/crates/compression/src/context/compress.rs index b23ee07360f..d2d3c6c75d3 100644 --- a/crates/compression/src/context/compress.rs +++ b/crates/compression/src/context/compress.rs @@ -3,6 +3,7 @@ use std::collections::HashMap; use fuel_core_types::{ fuel_compression::{ CompressibleBy, + ContextError, RegistryKey, }, fuel_tx::{ @@ -35,6 +36,10 @@ pub struct CompressCtx<'a> { pub changes: PerRegistryKeyspace>, } +impl ContextError for CompressCtx<'_> { + type Error = anyhow::Error; +} + fn registry_substitute( keyspace: RegistryKeyspace, value: &T, @@ -54,7 +59,7 @@ fn registry_substitute( Ok(key) } -impl<'a> CompressibleBy, anyhow::Error> for Address { +impl<'a> CompressibleBy> for Address { async fn compress_with( &self, ctx: &mut CompressCtx<'a>, @@ -63,7 +68,7 @@ impl<'a> CompressibleBy, anyhow::Error> for Address { } } -impl<'a> CompressibleBy, anyhow::Error> for AssetId { +impl<'a> CompressibleBy> for AssetId { async fn compress_with( &self, ctx: &mut CompressCtx<'a>, @@ -72,7 +77,7 @@ impl<'a> CompressibleBy, anyhow::Error> for AssetId { } } -impl<'a> CompressibleBy, anyhow::Error> for ContractId { +impl<'a> CompressibleBy> for ContractId { async fn compress_with( &self, ctx: &mut CompressCtx<'a>, @@ -81,7 +86,7 @@ impl<'a> CompressibleBy, anyhow::Error> for ContractId { } } -impl<'a> CompressibleBy, anyhow::Error> for ScriptCode { +impl<'a> CompressibleBy> for ScriptCode { async fn compress_with( &self, ctx: &mut CompressCtx<'a>, @@ -90,7 +95,7 @@ impl<'a> CompressibleBy, anyhow::Error> for ScriptCode { } } -impl<'a> CompressibleBy, anyhow::Error> for PredicateCode { +impl<'a> CompressibleBy> for PredicateCode { async fn compress_with( &self, ctx: &mut CompressCtx<'a>, @@ -99,7 +104,7 @@ impl<'a> CompressibleBy, anyhow::Error> for PredicateCode { } } -impl<'a> CompressibleBy, anyhow::Error> for UtxoId { +impl<'a> CompressibleBy> for UtxoId { async fn compress_with( &self, ctx: &mut CompressCtx<'a>, diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs index 89e4108fe8b..634de30b3ab 100644 --- a/crates/compression/src/context/decompress.rs +++ b/crates/compression/src/context/decompress.rs @@ -1,26 +1,24 @@ use fuel_core_types::{ - fuel_asm::Word, fuel_compression::{ + Compressible, + ContextError, + Decompress, DecompressibleBy, RegistryKey, }, fuel_tx::{ input::{ - self, coin::{ - self, Coin, - CompressedCoin, + CoinSpecification, }, message::{ - self, - CompressedMessage, Message, + MessageSpecification, }, - Empty, + AsField, PredicateCode, }, - output, Address, AssetId, CompressedUtxoId, @@ -43,7 +41,11 @@ pub struct DecompressCtx<'a> { pub lookup: &'a dyn HistoryLookup, } -impl<'a> DecompressibleBy, anyhow::Error> for Address { +impl<'a> ContextError for DecompressCtx<'a> { + type Error = anyhow::Error; +} + +impl<'a> DecompressibleBy> for Address { async fn decompress_with( c: &RegistryKey, ctx: &DecompressCtx<'a>, @@ -52,7 +54,7 @@ impl<'a> DecompressibleBy, anyhow::Error> for Address { } } -impl<'a> DecompressibleBy, anyhow::Error> for AssetId { +impl<'a> DecompressibleBy> for AssetId { async fn decompress_with( c: &RegistryKey, ctx: &DecompressCtx<'a>, @@ -61,7 +63,7 @@ impl<'a> DecompressibleBy, anyhow::Error> for AssetId { } } -impl<'a> DecompressibleBy, anyhow::Error> for ContractId { +impl<'a> DecompressibleBy> for ContractId { async fn decompress_with( c: &RegistryKey, ctx: &DecompressCtx<'a>, @@ -70,7 +72,7 @@ impl<'a> DecompressibleBy, anyhow::Error> for ContractId { } } -impl<'a> DecompressibleBy, anyhow::Error> for ScriptCode { +impl<'a> DecompressibleBy> for ScriptCode { async fn decompress_with( c: &RegistryKey, ctx: &DecompressCtx<'a>, @@ -79,7 +81,7 @@ impl<'a> DecompressibleBy, anyhow::Error> for ScriptCode { } } -impl<'a> DecompressibleBy, anyhow::Error> for PredicateCode { +impl<'a> DecompressibleBy> for PredicateCode { async fn decompress_with( c: &RegistryKey, ctx: &DecompressCtx<'a>, @@ -88,7 +90,7 @@ impl<'a> DecompressibleBy, anyhow::Error> for PredicateCode { } } -impl<'a> DecompressibleBy, anyhow::Error> for UtxoId { +impl<'a> DecompressibleBy> for UtxoId { async fn decompress_with( c: &CompressedUtxoId, ctx: &DecompressCtx<'a>, @@ -97,254 +99,88 @@ impl<'a> DecompressibleBy, anyhow::Error> for UtxoId { } } -impl<'a> DecompressibleBy, anyhow::Error> for Mint { - async fn decompress_with( - c: &Self::Compressed, - ctx: &DecompressCtx<'a>, - ) -> Result { - Ok(Transaction::mint( - Default::default(), // TODO: what should this we do with this? - >::decompress_with( - &c.input_contract, - ctx, - ) - .await?, - >::decompress_with( - &c.output_contract, - ctx, - ) - .await?, - >::decompress_with(&c.mint_amount, ctx).await?, - >::decompress_with(&c.mint_asset_id, ctx).await?, - >::decompress_with(&c.gas_price, ctx).await?, - )) - } -} - -impl<'a> DecompressibleBy, anyhow::Error> for Coin { - async fn decompress_with( - c: &CompressedCoin, - ctx: &DecompressCtx<'a>, - ) -> Result, anyhow::Error> { - let utxo_id = UtxoId::decompress_with(&c.utxo_id, ctx).await?; - let coin_info = ctx.lookup.coin(&utxo_id).await?; - Ok(Coin { - utxo_id, - owner: coin_info.owner, - amount: coin_info.amount, - asset_id: coin_info.asset_id, - tx_pointer: Default::default(), - witness_index: c.witness_index, - predicate_gas_used: c.predicate_gas_used, - predicate: - ::Predicate::decompress_with( - &c.predicate, - ctx, - ) - .await?, - predicate_data: c.predicate_data.clone(), - }) - } -} - -impl<'a> DecompressibleBy, anyhow::Error> for Coin { - async fn decompress_with( - c: &CompressedCoin, - ctx: &DecompressCtx<'a>, - ) -> Result, anyhow::Error> { - let utxo_id = UtxoId::decompress_with(&c.utxo_id, ctx).await?; - let coin_info = ctx.lookup.coin(&utxo_id).await?; - Ok(Coin { - utxo_id, - owner: coin_info.owner, - amount: coin_info.amount, - asset_id: coin_info.asset_id, - tx_pointer: Default::default(), - witness_index: c.witness_index, - predicate_gas_used: Empty::default(), - predicate: Empty::default(), - predicate_data: Empty::default(), - }) - } -} - -impl<'a> DecompressibleBy, anyhow::Error> for Coin { +impl<'a, Specification> DecompressibleBy> for Coin +where + Specification: CoinSpecification, + Specification::Predicate: DecompressibleBy>, + Specification::PredicateData: DecompressibleBy>, + Specification::PredicateGasUsed: DecompressibleBy>, + Specification::Witness: DecompressibleBy>, +{ async fn decompress_with( - c: &CompressedCoin, + c: & as Compressible>::Compressed, ctx: &DecompressCtx<'a>, - ) -> Result, anyhow::Error> { + ) -> anyhow::Result> { let utxo_id = UtxoId::decompress_with(&c.utxo_id, ctx).await?; let coin_info = ctx.lookup.coin(&utxo_id).await?; - Ok(Coin { + let witness_index = c.witness_index.decompress(ctx).await?; + let predicate_gas_used = c.predicate_gas_used.decompress(ctx).await?; + let predicate = c.predicate.decompress(ctx).await?; + let predicate_data = c.predicate_data.decompress(ctx).await?; + Ok(Self { utxo_id, owner: coin_info.owner, amount: coin_info.amount, asset_id: coin_info.asset_id, tx_pointer: Default::default(), - witness_index: Empty::default(), - predicate_gas_used: c.predicate_gas_used, - predicate: - ::Predicate::decompress_with( - &c.predicate, - ctx, - ) - .await?, - predicate_data: c.predicate_data.clone(), + witness_index, + predicate_gas_used, + predicate, + predicate_data, }) } } -impl<'a> DecompressibleBy, anyhow::Error> - for Message +impl<'a, Specification> DecompressibleBy> for Message +where + Specification: MessageSpecification, + Specification::Data: DecompressibleBy> + Default, + Specification::Predicate: DecompressibleBy>, + Specification::PredicateData: DecompressibleBy>, + Specification::PredicateGasUsed: DecompressibleBy>, + Specification::Witness: DecompressibleBy>, { async fn decompress_with( - c: &CompressedMessage, + c: & as Compressible>::Compressed, ctx: &DecompressCtx<'a>, - ) -> Result, anyhow::Error> { + ) -> anyhow::Result> { let msg = ctx.lookup.message(&c.nonce).await?; - Ok(Message { + let witness_index = c.witness_index.decompress(ctx).await?; + let predicate_gas_used = c.predicate_gas_used.decompress(ctx).await?; + let predicate = c.predicate.decompress(ctx).await?; + let predicate_data = c.predicate_data.decompress(ctx).await?; + let mut message: Message = Message { sender: msg.sender, recipient: msg.recipient, amount: msg.amount, nonce: c.nonce, - witness_index: c.witness_index, - predicate_gas_used: c.predicate_gas_used, - data: msg.data.clone(), - predicate: - ::Predicate::decompress_with( - &c.predicate, - ctx, - ) - .await?, - predicate_data: c.predicate_data.clone(), - }) - } -} -impl<'a> DecompressibleBy, anyhow::Error> - for Message> -{ - async fn decompress_with( - c: &CompressedMessage< - message::specifications::MessageData, - >, - ctx: &DecompressCtx<'a>, - ) -> Result< - Message>, - anyhow::Error, - > { - let msg = ctx.lookup.message(&c.nonce).await?; - Ok(Message { - sender: msg.sender, - recipient: msg.recipient, - amount: msg.amount, - nonce: c.nonce, - witness_index: c.witness_index, - predicate_gas_used: Empty::default(), - data: msg.data.clone(), - predicate: < as message::MessageSpecification>::Predicate as DecompressibleBy< - _, - anyhow::Error, - >>::decompress_with(&c.predicate, ctx) - .await?, - predicate_data: Empty::default(), - }) - } -} -impl<'a> DecompressibleBy, anyhow::Error> - for Message> -{ - async fn decompress_with( - c: &CompressedMessage< - message::specifications::MessageData, - >, - ctx: &DecompressCtx<'a>, - ) -> Result< - Message>, - anyhow::Error, - > { - let msg = ctx.lookup.message(&c.nonce).await?; - Ok(Message { - sender: msg.sender, - recipient: msg.recipient, - amount: msg.amount, - nonce: c.nonce, - witness_index: Empty::default(), - predicate_gas_used: c.predicate_gas_used, - data: msg.data.clone(), - predicate: as message::MessageSpecification>::Predicate::decompress_with( - &c.predicate, - ctx, - ) - .await?, - predicate_data: c.predicate_data.clone(), - }) - } -} -impl<'a> DecompressibleBy, anyhow::Error> - for Message> -{ - async fn decompress_with( - c: &CompressedMessage< - message::specifications::MessageCoin, - >, - ctx: &DecompressCtx<'a>, - ) -> Result< - Message>, - anyhow::Error, - > { - let msg = ctx.lookup.message(&c.nonce).await?; - Ok(Message { - sender: msg.sender, - recipient: msg.recipient, - amount: msg.amount, - nonce: c.nonce, - witness_index: c.witness_index, - predicate_gas_used: Empty::default(), - data: Empty::default(), - predicate: < as message::MessageSpecification>::Predicate as DecompressibleBy< - _, - anyhow::Error, - >>::decompress_with(&c.predicate, ctx) - .await?, - predicate_data: Empty::default(), - }) + witness_index, + predicate_gas_used, + data: Default::default(), + predicate, + predicate_data, + }; + + if let Some(data) = message.data.as_mut_field() { + data.clone_from(&msg.data) + } + + Ok(message) } } -impl<'a> DecompressibleBy, anyhow::Error> - for Message> -{ + +impl<'a> DecompressibleBy> for Mint { async fn decompress_with( - c: &CompressedMessage< - message::specifications::MessageCoin, - >, + c: &Self::Compressed, ctx: &DecompressCtx<'a>, - ) -> Result< - Message>, - anyhow::Error, - > { - let msg = ctx.lookup.message(&c.nonce).await?; - Ok(Message { - sender: msg.sender, - recipient: msg.recipient, - amount: msg.amount, - nonce: c.nonce, - witness_index: Empty::default(), - predicate_gas_used: c.predicate_gas_used, - data: Empty::default(), - predicate: as message::MessageSpecification>::Predicate::decompress_with( - &c.predicate, - ctx, - ) - .await?, - predicate_data: c.predicate_data.clone(), - }) + ) -> anyhow::Result { + Ok(Transaction::mint( + Default::default(), // TODO: what should this we do with this? + c.input_contract.decompress(ctx).await?, + c.output_contract.decompress(ctx).await?, + c.mint_amount.decompress(ctx).await?, + c.mint_asset_id.decompress(ctx).await?, + c.gas_price.decompress(ctx).await?, + )) } } diff --git a/crates/compression/src/context/prepare.rs b/crates/compression/src/context/prepare.rs index 50bd16486f5..04bea6a2524 100644 --- a/crates/compression/src/context/prepare.rs +++ b/crates/compression/src/context/prepare.rs @@ -3,6 +3,7 @@ use std::collections::HashSet; use fuel_core_types::{ fuel_compression::{ CompressibleBy, + ContextError, RegistryKey, }, fuel_tx::*, @@ -26,6 +27,10 @@ pub struct PrepareCtx<'a> { pub accessed_keys: PerRegistryKeyspace>, } +impl ContextError for PrepareCtx<'_> { + type Error = anyhow::Error; +} + fn registry_prepare( keyspace: RegistryKeyspace, value: &T, @@ -40,7 +45,7 @@ fn registry_prepare( Ok(RegistryKey::ZERO) } -impl<'a> CompressibleBy, anyhow::Error> for Address { +impl<'a> CompressibleBy> for Address { async fn compress_with( &self, ctx: &mut PrepareCtx<'a>, @@ -49,7 +54,7 @@ impl<'a> CompressibleBy, anyhow::Error> for Address { } } -impl<'a> CompressibleBy, anyhow::Error> for AssetId { +impl<'a> CompressibleBy> for AssetId { async fn compress_with( &self, ctx: &mut PrepareCtx<'a>, @@ -58,7 +63,7 @@ impl<'a> CompressibleBy, anyhow::Error> for AssetId { } } -impl<'a> CompressibleBy, anyhow::Error> for ContractId { +impl<'a> CompressibleBy> for ContractId { async fn compress_with( &self, ctx: &mut PrepareCtx<'a>, @@ -67,7 +72,7 @@ impl<'a> CompressibleBy, anyhow::Error> for ContractId { } } -impl<'a> CompressibleBy, anyhow::Error> for ScriptCode { +impl<'a> CompressibleBy> for ScriptCode { async fn compress_with( &self, ctx: &mut PrepareCtx<'a>, @@ -76,7 +81,7 @@ impl<'a> CompressibleBy, anyhow::Error> for ScriptCode { } } -impl<'a> CompressibleBy, anyhow::Error> for PredicateCode { +impl<'a> CompressibleBy> for PredicateCode { async fn compress_with( &self, ctx: &mut PrepareCtx<'a>, @@ -85,7 +90,7 @@ impl<'a> CompressibleBy, anyhow::Error> for PredicateCode { } } -impl<'a> CompressibleBy, anyhow::Error> for UtxoId { +impl<'a> CompressibleBy> for UtxoId { async fn compress_with( &self, _ctx: &mut PrepareCtx<'a>, diff --git a/crates/compression/src/services/compress.rs b/crates/compression/src/services/compress.rs index 24489f90ef7..ece929b6b9c 100644 --- a/crates/compression/src/services/compress.rs +++ b/crates/compression/src/services/compress.rs @@ -30,17 +30,17 @@ pub struct Task { pub enum TaskRequest { Compress { block: Block, - response: mpsc::Sender, CompressError>>, + response: mpsc::Sender, Error>>, }, } #[derive(Debug)] -pub enum CompressError { +pub enum Error { /// Only the next sequential block can be compressed NotLatest, Other(anyhow::Error), } -impl From for CompressError { +impl From for Error { fn from(err: anyhow::Error) -> Self { Self::Other(err) } @@ -65,9 +65,9 @@ pub async fn compress( db: &mut RocksDb, tx_lookup: &dyn UtxoIdToPointer, block: Block, -) -> Result, CompressError> { +) -> Result, Error> { if *block.header().height() != db.next_block_height()? { - return Err(CompressError::NotLatest); + return Err(Error::NotLatest); } let target = block.transactions().to_vec(); @@ -76,11 +76,9 @@ pub async fn compress( db, accessed_keys: PerRegistryKeyspace::default(), }; - let _ = as CompressibleBy<_, _>>::compress_with( - &target, - &mut prepare_ctx, - ) - .await?; + let _ = + as CompressibleBy<_>>::compress_with(&target, &mut prepare_ctx) + .await?; let mut ctx = CompressCtx { db: prepare_ctx.db, diff --git a/crates/compression/src/services/decompress.rs b/crates/compression/src/services/decompress.rs index 141fd01c8ee..2a289555e09 100644 --- a/crates/compression/src/services/decompress.rs +++ b/crates/compression/src/services/decompress.rs @@ -91,7 +91,7 @@ pub async fn decompress( let ctx = DecompressCtx { db, lookup }; - let transactions = as DecompressibleBy<_, _>>::decompress_with( + let transactions = as DecompressibleBy<_>>::decompress_with( &compressed.transactions, &ctx, ) From 3749902310f829c02323edf323acf582af5b3957 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 10 Sep 2024 13:02:13 +0300 Subject: [PATCH 033/112] Swap to the github version of fuel-vm --- Cargo.lock | 9 +++++++++ Cargo.toml | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 4497999155e..4a09c3f6739 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3149,6 +3149,7 @@ dependencies = [ [[package]] name = "fuel-asm" version = "0.56.0" +source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" dependencies = [ "bitflags 2.6.0", "fuel-types", @@ -3159,6 +3160,7 @@ dependencies = [ [[package]] name = "fuel-compression" version = "0.56.0" +source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" dependencies = [ "fuel-derive", "fuel-types", @@ -3812,6 +3814,7 @@ dependencies = [ [[package]] name = "fuel-crypto" version = "0.56.0" +source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" dependencies = [ "coins-bip32", "coins-bip39", @@ -3831,6 +3834,7 @@ dependencies = [ [[package]] name = "fuel-derive" version = "0.56.0" +source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" dependencies = [ "proc-macro2", "quote", @@ -3850,6 +3854,7 @@ dependencies = [ [[package]] name = "fuel-merkle" version = "0.56.0" +source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" dependencies = [ "derive_more", "digest 0.10.7", @@ -3863,10 +3868,12 @@ dependencies = [ [[package]] name = "fuel-storage" version = "0.56.0" +source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" [[package]] name = "fuel-tx" version = "0.56.0" +source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" dependencies = [ "bitflags 2.6.0", "derivative", @@ -3889,6 +3896,7 @@ dependencies = [ [[package]] name = "fuel-types" version = "0.56.0" +source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" dependencies = [ "fuel-derive", "hex", @@ -3899,6 +3907,7 @@ dependencies = [ [[package]] name = "fuel-vm" version = "0.56.0" +source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 47f8770103d..7fc344d2377 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -88,7 +88,7 @@ fuel-core-xtask = { version = "0.0.0", path = "./xtask" } fuel-gas-price-algorithm = { version = "0.35.0", path = "crates/fuel-gas-price-algorithm" } # Fuel dependencies -fuel-vm-private = { path = "../fuel-vm/fuel-vm", package = "fuel-vm", default-features = false } +fuel-vm-private = { git = "https://github.com/FuelLabs/fuel-vm", package = "fuel-vm", default-features = false } # fuel-vm-private = { version = "0.56.0", package = "fuel-vm", default-features = false } # Common dependencies From f79f6b3bbee31c6ed4f050a8c5ad517df692deb3 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 11 Sep 2024 03:21:45 +0300 Subject: [PATCH 034/112] Remove macOS .DS_Store files --- crates/.DS_Store | Bin 6148 -> 0 bytes crates/compression/.DS_Store | Bin 6148 -> 0 bytes crates/compression/src/.DS_Store | Bin 6148 -> 0 bytes 3 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 crates/.DS_Store delete mode 100644 crates/compression/.DS_Store delete mode 100644 crates/compression/src/.DS_Store diff --git a/crates/.DS_Store b/crates/.DS_Store deleted file mode 100644 index 3c7ea968b1301b2beaf2b2d2c841a790cbffb6e8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKJ8Hu~5S>X}2;8`IxmU;y7NMNL7ckf%7#!KiB(*A^D@V(l4*?;kaN#Dr5i@Uh zG;f7oq0xwlwx9bKk(G!va6`FTn48@e~t9E@g-M4>?cx z{nuvbkEi}UPQOiOmI_b-DnJFO02R1X0qec6W*Nvx1*iZO_*KBZ4+U;m6URXRbRhT$ z09+vLhPBTUz+wqtO&kM}foV{ILDd{FH0a2etgDG*V9-T#_|SZ^=7ggDbevzjT(kx< zQUNM3Rp2?cE9?KS@IU7NDTzBOKn4Dl0=nonU5i)B-a30Z>$L^GgWNyd0`h(h%tVH*;V z(1UO;njL?U0h+rtXn+TO=;F*x=kGL>_kJjSY^TL>hU$DOK?@)YF=pIZQFU(~2T;5t^aZg2vxXqYiSyZtDXQ5QXv=t&eKGr$Zm1IxjH*}bgda{Mm; zB{RSbERX@(A8b@Y$6%^aZ5>#sCjcT{BbA^{*G0;)4LSx>jo5-BbSk1w73PT{bUONN z6XzIAHR^N_=J6rSmxXzu2=#UB-&Wxu9F1Hu1I)lI16f@y(fNP!`}==3iEGRNGw`n% z5V@w?tmBZ(**Z5kI%{pzJ5&;iOEpeY&`?J)#?nz-Mpc4-n+!z9V5$*4D0~r6G;qNT HER=ySPTOGu diff --git a/crates/compression/src/.DS_Store b/crates/compression/src/.DS_Store deleted file mode 100644 index 598f12c8b7116d6d6dbdc62982b93bf94a4a78f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKy-EZ@4F000EZAIWIZq(?2E%a{7FJevvUe83!pf~$?>qVcK8k)xMsfGX!bU_A zn0%R;B$NGM=LdkSp0?+}5?pcg$f3O zfnXpQ2nK%40MBfd=EN~{Fc1s`1D_1&{*c%dbHUNkwhk(L0uc2YZ9-cw&Ki>n=7OUm zXDH&KL=Tnn#1IeXcq)Fm;OOY#kn;GDvhv5{MQnBEPv#EEj-i8rU|`C?p?8;h{$KEy zsXp?jA<+v4f`Na=fGpP6>lHt$oUMDGr)O WM@P+~;}%Ygi+~XlDj4_$2HpV#e?a&E From e3f8f615ae1f315937d6459a425ab0b1e0aece95 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 11 Sep 2024 04:26:04 +0300 Subject: [PATCH 035/112] Merge da compression db into the offchain db --- Cargo.lock | 3 +- crates/compression/Cargo.toml | 10 +- crates/compression/src/compression_tests.rs | 72 ++++++---- crates/compression/src/context/compress.rs | 43 +++--- crates/compression/src/context/decompress.rs | 119 ++++++++++------- crates/compression/src/context/prepare.rs | 37 +++--- crates/compression/src/db.rs | 118 ----------------- crates/compression/src/lib.rs | 3 +- crates/compression/src/ports.rs | 49 ++++++- crates/compression/src/services/compress.rs | 61 ++------- crates/compression/src/services/decompress.rs | 69 +++------- crates/compression/src/tables.rs | 53 +++----- crates/fuel-core/Cargo.toml | 2 + crates/fuel-core/src/graphql_api/database.rs | 16 +++ crates/fuel-core/src/graphql_api/ports.rs | 18 +++ crates/fuel-core/src/graphql_api/storage.rs | 12 ++ .../src/graphql_api/storage/da_compression.rs | 123 +++++++++++++++++ .../src/graphql_api/worker_service.rs | 124 +++++++++++++++++- crates/fuel-core/src/lib.rs | 2 +- crates/fuel-core/src/query.rs | 2 + crates/fuel-core/src/query/da_compressed.rs | 16 +++ crates/fuel-core/src/schema.rs | 2 + crates/fuel-core/src/schema/da_compressed.rs | 48 +++++++ .../service/adapters/graphql_api/off_chain.rs | 18 ++- crates/types/src/blockchain/block.rs | 1 + 25 files changed, 628 insertions(+), 393 deletions(-) delete mode 100644 crates/compression/src/db.rs create mode 100644 crates/fuel-core/src/graphql_api/storage/da_compression.rs create mode 100644 crates/fuel-core/src/query/da_compressed.rs create mode 100644 crates/fuel-core/src/schema/da_compressed.rs diff --git a/Cargo.lock b/Cargo.lock index 4a09c3f6739..bf9c057b0b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3181,6 +3181,7 @@ dependencies = [ "enum-iterator", "fuel-core", "fuel-core-chain-config", + "fuel-core-compression", "fuel-core-consensus-module", "fuel-core-database", "fuel-core-executor", @@ -3368,9 +3369,9 @@ dependencies = [ "fuel-core-types", "postcard", "rand", - "rocksdb", "serde", "tempfile", + "thiserror", "tokio", ] diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 1783651c02d..149e2dbbb79 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -15,18 +15,16 @@ anyhow = { workspace = true } async-trait = { workspace = true } fuel-core-types = { workspace = true, features = ["serde", "da-compression"] } postcard = { version = "1.0", features = ["use-std"] } -rocksdb = { version = "0.21", default-features = false, optional = true } serde = { version = "1.0", features = ["derive"] } -tokio = { workspace = true, features = ["sync"] } +thiserror = { workspace = true } +rand = { workspace = true, optional = true} [dev-dependencies] bimap = { version = "0.6" } bincode = { version = "1.3" } fuel-core-compression = { workspace = true, features = ["test-helpers"] } -rand = { workspace = true } tempfile = "3" +tokio = { workspace = true, features = ["sync"] } [features] -default = ["rocksdb"] -rocksdb = ["dep:rocksdb"] -test-helpers = ["fuel-core-types/test-helpers", "fuel-core-types/random", "fuel-core-types/std"] +test-helpers = ["dep:rand", "fuel-core-types/test-helpers", "fuel-core-types/random", "fuel-core-types/std"] diff --git a/crates/compression/src/compression_tests.rs b/crates/compression/src/compression_tests.rs index 57b9832d6be..720899e52d0 100644 --- a/crates/compression/src/compression_tests.rs +++ b/crates/compression/src/compression_tests.rs @@ -38,26 +38,57 @@ use fuel_core_types::{ tai64::Tai64, }; use rand::Rng; -use tempfile::TempDir; use crate::{ - db::RocksDb, ports::{ CoinInfo, HistoryLookup, MessageInfo, + TemporalRegistry, UtxoIdToPointer, }, services, }; -/// Just stores the looked-up tx pointers in a map, instead of actually looking them up. #[derive(Default)] pub struct MockTxDb { utxo_id_mapping: Arc>>, coins: HashMap, } +impl TemporalRegistry for &mut MockTxDb { + fn read_registry( + &self, + keyspace: crate::RegistryKeyspace, + key: fuel_core_types::fuel_compression::RegistryKey, + ) -> anyhow::Result> { + todo!() + } + + fn write_registry( + &mut self, + keyspace: crate::RegistryKeyspace, + key: fuel_core_types::fuel_compression::RegistryKey, + value: Vec, + ) -> anyhow::Result<()> { + todo!() + } + + fn registry_index_lookup( + &self, + keyspace: crate::RegistryKeyspace, + value: Vec, + ) -> anyhow::Result> { + todo!() + } + + fn next_block_height( + &self, + ) -> anyhow::Result { + todo!() + } +} + impl MockTxDb { fn create_coin(&mut self, rng: &mut R, info: CoinInfo) -> UtxoId { let utxo_id: UtxoId = rng.gen(); @@ -66,9 +97,8 @@ impl MockTxDb { } } -#[async_trait::async_trait] -impl UtxoIdToPointer for MockTxDb { - async fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result { +impl UtxoIdToPointer for &mut MockTxDb { + fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result { let mut g = self.utxo_id_mapping.lock().unwrap(); if !g.contains_left(&utxo_id) { let key = g.len() as u32; // Just obtain an unique key @@ -84,23 +114,22 @@ impl UtxoIdToPointer for MockTxDb { } } -#[async_trait::async_trait] -impl HistoryLookup for MockTxDb { - async fn utxo_id(&self, c: &CompressedUtxoId) -> anyhow::Result { +impl HistoryLookup for &mut MockTxDb { + fn utxo_id(&self, c: &CompressedUtxoId) -> anyhow::Result { let g = self.utxo_id_mapping.lock().unwrap(); g.get_by_right(&c).cloned().ok_or_else(|| { anyhow::anyhow!("CompressedUtxoId not found in mock db: {:?}", c) }) } - async fn coin(&self, utxo_id: &UtxoId) -> anyhow::Result { + fn coin(&self, utxo_id: &UtxoId) -> anyhow::Result { self.coins .get(&utxo_id) .cloned() .ok_or_else(|| anyhow::anyhow!("Coin not found in mock db: {:?}", utxo_id)) } - async fn message(&self, _nonce: &Nonce) -> anyhow::Result { + fn message(&self, _nonce: &Nonce) -> anyhow::Result { todo!(); } } @@ -114,17 +143,13 @@ async fn same_compact_tx_is_smaller_in_next_block() { .finalize() .into(); - let tmpdir = TempDir::new().unwrap(); - - let mut db = RocksDb::open(tmpdir.path()).unwrap(); - let tx_db = MockTxDb::default(); + let mut tx_db = MockTxDb::default(); let mut sizes = Vec::new(); for i in 0..3 { let compressed = services::compress::compress( - &mut db, - &tx_db, - Block::new( + &mut tx_db, + &Block::new( PartialBlockHeader { application: ApplicationHeader { da_height: DaBlockHeight::default(), @@ -162,8 +187,6 @@ async fn compress_decompress_roundtrip() { use rand::SeedableRng; let mut rng = rand::rngs::StdRng::seed_from_u64(2322u64); - let tmpdir = TempDir::new().unwrap(); - let mut db = RocksDb::open(tmpdir.path()).unwrap(); let mut tx_db = MockTxDb::default(); let mut original_blocks = Vec::new(); @@ -216,22 +239,17 @@ async fn compress_decompress_roundtrip() { .expect("Invalid block header"); original_blocks.push(block.clone()); compressed_blocks.push( - services::compress::compress(&mut db, &tx_db, block) + services::compress::compress(&mut tx_db, &block) .await .expect("Failed to compress"), ); } - db.db.flush().unwrap(); - drop(tmpdir); - let tmpdir2 = TempDir::new().unwrap(); - let mut db = RocksDb::open(tmpdir2.path()).unwrap(); - for (original, compressed) in original_blocks .into_iter() .zip(compressed_blocks.into_iter()) { - let decompressed = services::decompress::decompress(&mut db, &tx_db, compressed) + let decompressed = services::decompress::decompress(&mut tx_db, compressed) .await .expect("Decompression failed"); assert_eq!(PartialFuelBlock::from(original), decompressed); diff --git a/crates/compression/src/context/compress.rs b/crates/compression/src/context/compress.rs index d2d3c6c75d3..988f6f47367 100644 --- a/crates/compression/src/context/compress.rs +++ b/crates/compression/src/context/compress.rs @@ -18,9 +18,8 @@ use fuel_core_types::{ }; use crate::{ - db::RocksDb, eviction_policy::CacheEvictor, - ports::UtxoIdToPointer, + services::compress::CompressDb, tables::{ PerRegistryKeyspace, PostcardSerialized, @@ -28,28 +27,28 @@ use crate::{ }, }; -pub struct CompressCtx<'a> { - pub db: &'a mut RocksDb, - pub tx_lookup: &'a dyn UtxoIdToPointer, +pub struct CompressCtx { + pub db: D, pub cache_evictor: CacheEvictor, /// Changes to the temporary registry, to be included in the compressed block header pub changes: PerRegistryKeyspace>, } -impl ContextError for CompressCtx<'_> { +impl ContextError for CompressCtx { type Error = anyhow::Error; } -fn registry_substitute( +fn registry_substitute( keyspace: RegistryKeyspace, value: &T, - ctx: &mut CompressCtx<'_>, + ctx: &mut CompressCtx, ) -> anyhow::Result { if *value == T::default() { return Ok(RegistryKey::DEFAULT_VALUE); } - if let Some(found) = ctx.db.registry_index_lookup(keyspace, value)? { + let ser_value = postcard::to_stdvec(value)?; + if let Some(found) = ctx.db.registry_index_lookup(keyspace, ser_value)? { return Ok(found); } @@ -59,56 +58,56 @@ fn registry_substitute( Ok(key) } -impl<'a> CompressibleBy> for Address { +impl CompressibleBy> for Address { async fn compress_with( &self, - ctx: &mut CompressCtx<'a>, + ctx: &mut CompressCtx, ) -> anyhow::Result { registry_substitute(RegistryKeyspace::address, self, ctx) } } -impl<'a> CompressibleBy> for AssetId { +impl CompressibleBy> for AssetId { async fn compress_with( &self, - ctx: &mut CompressCtx<'a>, + ctx: &mut CompressCtx, ) -> anyhow::Result { registry_substitute(RegistryKeyspace::asset_id, self, ctx) } } -impl<'a> CompressibleBy> for ContractId { +impl CompressibleBy> for ContractId { async fn compress_with( &self, - ctx: &mut CompressCtx<'a>, + ctx: &mut CompressCtx, ) -> anyhow::Result { registry_substitute(RegistryKeyspace::contract_id, self, ctx) } } -impl<'a> CompressibleBy> for ScriptCode { +impl CompressibleBy> for ScriptCode { async fn compress_with( &self, - ctx: &mut CompressCtx<'a>, + ctx: &mut CompressCtx, ) -> anyhow::Result { registry_substitute(RegistryKeyspace::script_code, self, ctx) } } -impl<'a> CompressibleBy> for PredicateCode { +impl CompressibleBy> for PredicateCode { async fn compress_with( &self, - ctx: &mut CompressCtx<'a>, + ctx: &mut CompressCtx, ) -> anyhow::Result { registry_substitute(RegistryKeyspace::script_code, self, ctx) } } -impl<'a> CompressibleBy> for UtxoId { +impl CompressibleBy> for UtxoId { async fn compress_with( &self, - ctx: &mut CompressCtx<'a>, + ctx: &mut CompressCtx, ) -> anyhow::Result { - ctx.tx_lookup.lookup(*self).await + ctx.db.lookup(*self) } } diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs index 634de30b3ab..9833e654e81 100644 --- a/crates/compression/src/context/decompress.rs +++ b/crates/compression/src/context/decompress.rs @@ -31,88 +31,104 @@ use fuel_core_types::{ }; use crate::{ - db::RocksDb, - ports::HistoryLookup, + services::decompress::{ + DecompressDb, + DecompressError, + }, tables::RegistryKeyspace, }; -pub struct DecompressCtx<'a> { - pub db: &'a RocksDb, - pub lookup: &'a dyn HistoryLookup, +pub struct DecompressCtx { + pub db: D, +} + +impl ContextError for DecompressCtx { + type Error = DecompressError; } -impl<'a> ContextError for DecompressCtx<'a> { - type Error = anyhow::Error; +fn registry_desubstitute< + D: DecompressDb, + T: serde::de::DeserializeOwned + Default + PartialEq, +>( + keyspace: RegistryKeyspace, + key: RegistryKey, + ctx: &DecompressCtx, +) -> Result { + if key == RegistryKey::DEFAULT_VALUE { + return Ok(T::default()); + } + Ok(postcard::from_bytes(&ctx.db.read_registry(keyspace, key)?)?) } -impl<'a> DecompressibleBy> for Address { +impl DecompressibleBy> for Address { async fn decompress_with( c: &RegistryKey, - ctx: &DecompressCtx<'a>, - ) -> anyhow::Result { - ctx.db.read_registry(RegistryKeyspace::address, *c) + ctx: &DecompressCtx, + ) -> Result { + registry_desubstitute(RegistryKeyspace::address, *c, ctx) } } -impl<'a> DecompressibleBy> for AssetId { +impl DecompressibleBy> for AssetId { async fn decompress_with( c: &RegistryKey, - ctx: &DecompressCtx<'a>, - ) -> anyhow::Result { - ctx.db.read_registry(RegistryKeyspace::asset_id, *c) + ctx: &DecompressCtx, + ) -> Result { + registry_desubstitute(RegistryKeyspace::asset_id, *c, ctx) } } -impl<'a> DecompressibleBy> for ContractId { +impl DecompressibleBy> for ContractId { async fn decompress_with( c: &RegistryKey, - ctx: &DecompressCtx<'a>, - ) -> anyhow::Result { - ctx.db.read_registry(RegistryKeyspace::contract_id, *c) + ctx: &DecompressCtx, + ) -> Result { + registry_desubstitute(RegistryKeyspace::contract_id, *c, ctx) } } -impl<'a> DecompressibleBy> for ScriptCode { +impl DecompressibleBy> for ScriptCode { async fn decompress_with( c: &RegistryKey, - ctx: &DecompressCtx<'a>, - ) -> anyhow::Result { - ctx.db.read_registry(RegistryKeyspace::script_code, *c) + ctx: &DecompressCtx, + ) -> Result { + registry_desubstitute(RegistryKeyspace::script_code, *c, ctx) } } -impl<'a> DecompressibleBy> for PredicateCode { +impl DecompressibleBy> for PredicateCode { async fn decompress_with( c: &RegistryKey, - ctx: &DecompressCtx<'a>, - ) -> anyhow::Result { - ctx.db.read_registry(RegistryKeyspace::script_code, *c) + ctx: &DecompressCtx, + ) -> Result { + registry_desubstitute(RegistryKeyspace::predicate_code, *c, ctx) } } -impl<'a> DecompressibleBy> for UtxoId { +impl DecompressibleBy> for UtxoId { async fn decompress_with( c: &CompressedUtxoId, - ctx: &DecompressCtx<'a>, - ) -> anyhow::Result { - ctx.lookup.utxo_id(c).await + ctx: &DecompressCtx, + ) -> Result { + Ok(ctx.db.utxo_id(c)?) } } -impl<'a, Specification> DecompressibleBy> for Coin +impl DecompressibleBy> for Coin where + D: DecompressDb, Specification: CoinSpecification, - Specification::Predicate: DecompressibleBy>, - Specification::PredicateData: DecompressibleBy>, - Specification::PredicateGasUsed: DecompressibleBy>, - Specification::Witness: DecompressibleBy>, + Specification::Predicate: DecompressibleBy>, + Specification::PredicateData: DecompressibleBy>, + Specification::PredicateGasUsed: DecompressibleBy>, + Specification::Witness: DecompressibleBy>, { async fn decompress_with( c: & as Compressible>::Compressed, - ctx: &DecompressCtx<'a>, - ) -> anyhow::Result> { + ctx: &DecompressCtx, + ) -> Result, DecompressError> { let utxo_id = UtxoId::decompress_with(&c.utxo_id, ctx).await?; - let coin_info = ctx.lookup.coin(&utxo_id).await?; + let coin_info = ctx.db.coin(&utxo_id)?; let witness_index = c.witness_index.decompress(ctx).await?; let predicate_gas_used = c.predicate_gas_used.decompress(ctx).await?; let predicate = c.predicate.decompress(ctx).await?; @@ -131,20 +147,21 @@ where } } -impl<'a, Specification> DecompressibleBy> for Message +impl DecompressibleBy> for Message where + D: DecompressDb, Specification: MessageSpecification, - Specification::Data: DecompressibleBy> + Default, - Specification::Predicate: DecompressibleBy>, - Specification::PredicateData: DecompressibleBy>, - Specification::PredicateGasUsed: DecompressibleBy>, - Specification::Witness: DecompressibleBy>, + Specification::Data: DecompressibleBy> + Default, + Specification::Predicate: DecompressibleBy>, + Specification::PredicateData: DecompressibleBy>, + Specification::PredicateGasUsed: DecompressibleBy>, + Specification::Witness: DecompressibleBy>, { async fn decompress_with( c: & as Compressible>::Compressed, - ctx: &DecompressCtx<'a>, - ) -> anyhow::Result> { - let msg = ctx.lookup.message(&c.nonce).await?; + ctx: &DecompressCtx, + ) -> Result, DecompressError> { + let msg = ctx.db.message(&c.nonce)?; let witness_index = c.witness_index.decompress(ctx).await?; let predicate_gas_used = c.predicate_gas_used.decompress(ctx).await?; let predicate = c.predicate.decompress(ctx).await?; @@ -169,11 +186,11 @@ where } } -impl<'a> DecompressibleBy> for Mint { +impl DecompressibleBy> for Mint { async fn decompress_with( c: &Self::Compressed, - ctx: &DecompressCtx<'a>, - ) -> anyhow::Result { + ctx: &DecompressCtx, + ) -> Result { Ok(Transaction::mint( Default::default(), // TODO: what should this we do with this? c.input_contract.decompress(ctx).await?, diff --git a/crates/compression/src/context/prepare.rs b/crates/compression/src/context/prepare.rs index 04bea6a2524..e97f8defb21 100644 --- a/crates/compression/src/context/prepare.rs +++ b/crates/compression/src/context/prepare.rs @@ -11,7 +11,7 @@ use fuel_core_types::{ use input::PredicateCode; use crate::{ - db::RocksDb, + services::compress::CompressDb, tables::{ PerRegistryKeyspace, RegistryKeyspace, @@ -20,80 +20,81 @@ use crate::{ /// Preparation pass through the block to collect all keys accessed during compression. /// Returns dummy values. The resulting "compressed block" should be discarded. -pub struct PrepareCtx<'a> { +pub struct PrepareCtx { /// Database handle - pub db: &'a mut RocksDb, + pub db: D, /// Keys accessed during compression. Will not be overwritten. pub accessed_keys: PerRegistryKeyspace>, } -impl ContextError for PrepareCtx<'_> { +impl ContextError for PrepareCtx { type Error = anyhow::Error; } -fn registry_prepare( +fn registry_prepare( keyspace: RegistryKeyspace, value: &T, - ctx: &mut PrepareCtx<'_>, + ctx: &mut PrepareCtx, ) -> anyhow::Result { if *value == T::default() { return Ok(RegistryKey::ZERO); } + let value = postcard::to_stdvec(value)?; if let Some(found) = ctx.db.registry_index_lookup(keyspace, value)? { ctx.accessed_keys[keyspace].insert(found); } Ok(RegistryKey::ZERO) } -impl<'a> CompressibleBy> for Address { +impl CompressibleBy> for Address { async fn compress_with( &self, - ctx: &mut PrepareCtx<'a>, + ctx: &mut PrepareCtx, ) -> anyhow::Result { registry_prepare(RegistryKeyspace::address, self, ctx) } } -impl<'a> CompressibleBy> for AssetId { +impl CompressibleBy> for AssetId { async fn compress_with( &self, - ctx: &mut PrepareCtx<'a>, + ctx: &mut PrepareCtx, ) -> anyhow::Result { registry_prepare(RegistryKeyspace::asset_id, self, ctx) } } -impl<'a> CompressibleBy> for ContractId { +impl CompressibleBy> for ContractId { async fn compress_with( &self, - ctx: &mut PrepareCtx<'a>, + ctx: &mut PrepareCtx, ) -> anyhow::Result { registry_prepare(RegistryKeyspace::contract_id, self, ctx) } } -impl<'a> CompressibleBy> for ScriptCode { +impl CompressibleBy> for ScriptCode { async fn compress_with( &self, - ctx: &mut PrepareCtx<'a>, + ctx: &mut PrepareCtx, ) -> anyhow::Result { registry_prepare(RegistryKeyspace::script_code, self, ctx) } } -impl<'a> CompressibleBy> for PredicateCode { +impl CompressibleBy> for PredicateCode { async fn compress_with( &self, - ctx: &mut PrepareCtx<'a>, + ctx: &mut PrepareCtx, ) -> anyhow::Result { registry_prepare(RegistryKeyspace::script_code, self, ctx) } } -impl<'a> CompressibleBy> for UtxoId { +impl CompressibleBy> for UtxoId { async fn compress_with( &self, - _ctx: &mut PrepareCtx<'a>, + _ctx: &mut PrepareCtx, ) -> anyhow::Result { Ok(CompressedUtxoId { tx_pointer: TxPointer::default(), diff --git a/crates/compression/src/db.rs b/crates/compression/src/db.rs deleted file mode 100644 index 384d9b7be51..00000000000 --- a/crates/compression/src/db.rs +++ /dev/null @@ -1,118 +0,0 @@ -use std::path::Path; - -use anyhow::bail; -use fuel_core_types::{ - fuel_compression::RegistryKey, - fuel_types::BlockHeight, -}; - -use crate::tables::RegistryKeyspace; - -/// Database that holds data needed by the block compression only -pub struct RocksDb { - pub(crate) db: rocksdb::DB, -} - -impl RocksDb { - pub fn open>(path: P) -> anyhow::Result { - use rocksdb::{ - ColumnFamilyDescriptor, - Options, - DB, - }; - - let mut db_opts = Options::default(); - db_opts.create_missing_column_families(true); - db_opts.create_if_missing(true); - Ok(Self { - db: DB::open_cf_descriptors( - &db_opts, - path, - vec![ - // Meta table holding misc data - ColumnFamilyDescriptor::new("meta", Options::default()), - // Temporal registry key:value pairs, with key as - // null-separated (table, key) pair - ColumnFamilyDescriptor::new("temporal", Options::default()), - // Reverse index into temporal registry values, with key as - // null-separated (table, indexed_value) pair - ColumnFamilyDescriptor::new("temporal_index", Options::default()), - ], - )?, - }) - } -} - -impl RocksDb { - pub fn read_registry( - &self, - keyspace: RegistryKeyspace, - key: RegistryKey, - ) -> anyhow::Result - where - T: serde::de::DeserializeOwned + Default, - { - if key == RegistryKey::DEFAULT_VALUE { - return Ok(T::default()); - } - - let db_key: Vec = - keyspace.name().bytes().chain(core::iter::once(0)).collect(); - let db_key = postcard::to_extend(&key, db_key).expect("Never fails"); - - let cf = self.db.cf_handle("temporal").unwrap(); - let Some(bytes) = self.db.get_cf(&cf, &db_key)? else { - bail!("Key {keyspace:?}:{key:?} not found"); - }; - Ok(postcard::from_bytes(&bytes)?) - } - - pub fn registry_index_lookup( - &self, - keyspace: RegistryKeyspace, - value: V, - ) -> anyhow::Result> { - let db_key: Vec = - keyspace.name().bytes().chain(core::iter::once(0)).collect(); - let db_key = postcard::to_extend(&value, db_key).expect("Never fails"); - - let cf_index = self.db.cf_handle("temporal_index").unwrap(); - let Some(k) = self.db.get_cf(&cf_index, db_key)? else { - return Ok(None); - }; - Ok(Some(postcard::from_bytes(&k)?)) - } -} - -impl RocksDb { - pub fn next_block_height(&self) -> anyhow::Result { - let cf_meta = self.db.cf_handle("meta").unwrap(); - let Some(bytes) = self.db.get_cf(&cf_meta, b"current_block")? else { - return Ok(BlockHeight::default()); - }; - debug_assert!(bytes.len() == 4); - let mut buffer = [0u8; 4]; - buffer.copy_from_slice(&bytes[..]); - Ok(BlockHeight::from(buffer)) - } - - pub fn increment_block_height(&self) -> anyhow::Result<()> { - // TODO: potential TOCTOU bug here - let cf_meta = self.db.cf_handle("meta").unwrap(); - let old_bh = match self.db.get_cf(&cf_meta, b"current_block")? { - Some(bytes) => { - debug_assert!(bytes.len() == 4); - let mut buffer = [0u8; 4]; - buffer.copy_from_slice(&bytes[..]); - BlockHeight::from(buffer) - } - None => BlockHeight::default(), - }; - let new_bh = old_bh - .succ() - .ok_or_else(|| anyhow::anyhow!("Block height overflow"))?; - self.db - .put_cf(&cf_meta, b"current_block", new_bh.to_bytes())?; - Ok(()) - } -} diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 5990d531a53..74d2d80c291 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -1,4 +1,3 @@ -pub mod db; mod eviction_policy; pub mod ports; mod tables; @@ -12,6 +11,8 @@ mod context { pub mod prepare; } +pub use tables::RegistryKeyspace; + #[cfg(test)] mod compression_tests; diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index 127b0233c14..08c6481cc79 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -1,6 +1,7 @@ //! Ports this service requires to function. use fuel_core_types::{ + fuel_compression::RegistryKey, fuel_tx::{ Address, AssetId, @@ -8,19 +9,53 @@ use fuel_core_types::{ UtxoId, Word, }, - fuel_types::Nonce, + fuel_types::{ + BlockHeight, + Nonce, + }, }; -#[async_trait::async_trait] +use crate::tables::RegistryKeyspace; + +/// Rolling cache for compression. +/// Holds the latest state which can be event sourced from the compressed blocks. +/// The changes done using this trait in a single call to `compress` or `decompress` +/// must be committed atomically, after which block height must be incremented. +pub trait TemporalRegistry { + /// Reads a value from the registry at its current height. + fn read_registry( + &self, + keyspace: RegistryKeyspace, + key: RegistryKey, + ) -> anyhow::Result>; + + /// Reads a value from the registry at its current height. + fn write_registry( + &mut self, + keyspace: RegistryKeyspace, + key: RegistryKey, + value: Vec, + ) -> anyhow::Result<()>; + + /// Lookup registry key by the value. + fn registry_index_lookup( + &self, + keyspace: RegistryKeyspace, + value: Vec, + ) -> anyhow::Result>; + + /// Get the block height for the next block, i.e. the block currently being processed. + fn next_block_height(&self) -> anyhow::Result; +} + pub trait UtxoIdToPointer { - async fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result; + fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result; } -#[async_trait::async_trait] pub trait HistoryLookup { - async fn utxo_id(&self, c: &CompressedUtxoId) -> anyhow::Result; - async fn coin(&self, utxo_id: &UtxoId) -> anyhow::Result; - async fn message(&self, nonce: &Nonce) -> anyhow::Result; + fn utxo_id(&self, c: &CompressedUtxoId) -> anyhow::Result; + fn coin(&self, utxo_id: &UtxoId) -> anyhow::Result; + fn message(&self, nonce: &Nonce) -> anyhow::Result; } #[derive(Debug, Clone)] diff --git a/crates/compression/src/services/compress.rs b/crates/compression/src/services/compress.rs index ece929b6b9c..9252d11d76d 100644 --- a/crates/compression/src/services/compress.rs +++ b/crates/compression/src/services/compress.rs @@ -1,19 +1,19 @@ use fuel_core_types::{ + blockchain::block::Block, fuel_compression::CompressibleBy, fuel_tx::Transaction, }; -use tokio::sync::mpsc; - -use fuel_core_types::blockchain::block::Block; use crate::{ context::{ compress::CompressCtx, prepare::PrepareCtx, }, - db::RocksDb, eviction_policy::CacheEvictor, - ports::UtxoIdToPointer, + ports::{ + TemporalRegistry, + UtxoIdToPointer, + }, tables::{ PerRegistryKeyspace, RegistrationsPerTable, @@ -22,50 +22,18 @@ use crate::{ Header, }; -/// Task handle -pub struct Task { - request_receiver: mpsc::Receiver, -} - -pub enum TaskRequest { - Compress { - block: Block, - response: mpsc::Sender, Error>>, - }, -} - -#[derive(Debug)] +#[derive(Debug, thiserror::Error)] pub enum Error { - /// Only the next sequential block can be compressed + #[error("Only the next sequential block can be compressed")] NotLatest, - Other(anyhow::Error), -} -impl From for Error { - fn from(err: anyhow::Error) -> Self { - Self::Other(err) - } + #[error("Unknown compression error")] + Other(#[from] anyhow::Error), } -pub async fn run( - mut db: RocksDb, - tx_lookup: Box, - mut request_receiver: mpsc::Receiver, -) { - while let Some(req) = request_receiver.recv().await { - match req { - TaskRequest::Compress { block, response } => { - let reply = compress(&mut db, &*tx_lookup, block).await; - response.send(reply).await.expect("Failed to respond"); - } - } - } -} +pub trait CompressDb: TemporalRegistry + UtxoIdToPointer {} +impl CompressDb for T where T: TemporalRegistry + UtxoIdToPointer {} -pub async fn compress( - db: &mut RocksDb, - tx_lookup: &dyn UtxoIdToPointer, - block: Block, -) -> Result, Error> { +pub async fn compress(db: D, block: &Block) -> Result, Error> { if *block.header().height() != db.next_block_height()? { return Err(Error::NotLatest); } @@ -82,7 +50,6 @@ pub async fn compress( let mut ctx = CompressCtx { db: prepare_ctx.db, - tx_lookup, cache_evictor: CacheEvictor { keep_keys: prepare_ctx.accessed_keys, }, @@ -93,9 +60,7 @@ pub async fn compress( let registrations = RegistrationsPerTable::try_from(registrations)?; // Apply changes to the db - // TODO: these two operations should be atomic together - registrations.write_to_db(db)?; - db.increment_block_height()?; + registrations.write_to_registry(&mut ctx.db)?; // Construct the actual compacted block let compact = CompressedBlockPayload { diff --git a/crates/compression/src/services/decompress.rs b/crates/compression/src/services/decompress.rs index 2a289555e09..50c9245dc78 100644 --- a/crates/compression/src/services/decompress.rs +++ b/crates/compression/src/services/decompress.rs @@ -1,5 +1,3 @@ -use tokio::sync::mpsc; - use fuel_core_types::{ blockchain::{ block::PartialFuelBlock, @@ -16,64 +14,31 @@ use fuel_core_types::{ use crate::{ context::decompress::DecompressCtx, - db::RocksDb, - ports::HistoryLookup, + ports::{ + HistoryLookup, + TemporalRegistry, + }, CompressedBlockPayload, }; -/// Task handle -pub struct Task { - request_receiver: mpsc::Receiver, -} - -pub enum TaskRequest { - Decompress { - block: Vec, - response: mpsc::Sender>, - }, -} - -#[derive(Debug)] +#[derive(Debug, thiserror::Error)] pub enum DecompressError { - /// Only the next sequential block can be decompressed + #[error("Only the next sequential block can be decompressed")] NotLatest, - /// Unknown compression version + #[error("Unknown compression version")] UnknownVersion, - /// Deserialization error - Postcard(postcard::Error), + #[error("Deserialization error: {0}")] + Postcard(#[from] postcard::Error), /// Other errors - Other(anyhow::Error), -} -impl From for DecompressError { - fn from(err: postcard::Error) -> Self { - Self::Postcard(err) - } -} -impl From for DecompressError { - fn from(err: anyhow::Error) -> Self { - Self::Other(err) - } + #[error("Unknown error: {0}")] + Other(#[from] anyhow::Error), } -pub async fn run( - mut db: RocksDb, - lookup: Box, - - mut request_receiver: mpsc::Receiver, -) { - while let Some(req) = request_receiver.recv().await { - match req { - TaskRequest::Decompress { block, response } => { - let reply = decompress(&mut db, &*lookup, block).await; - response.send(reply).await.expect("Failed to respond"); - } - } - } -} +pub trait DecompressDb: TemporalRegistry + HistoryLookup {} +impl DecompressDb for T where T: TemporalRegistry + HistoryLookup {} -pub async fn decompress( - db: &mut RocksDb, - lookup: &dyn HistoryLookup, +pub async fn decompress( + mut db: D, block: Vec, ) -> Result { if block.is_empty() || block[0] != 0 { @@ -87,9 +52,9 @@ pub async fn decompress( // return Err(DecompressError::NotLatest); // } - compressed.registrations.write_to_db(db)?; + compressed.registrations.write_to_registry(&mut db)?; - let ctx = DecompressCtx { db, lookup }; + let ctx = DecompressCtx { db }; let transactions = as DecompressibleBy<_>>::decompress_with( &compressed.transactions, diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index ae972ac2ff0..57ee6d65028 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -1,4 +1,3 @@ -use super::db::RocksDb; use fuel_core_types::{ fuel_compression::RegistryKey, fuel_tx::{ @@ -9,7 +8,7 @@ use fuel_core_types::{ }; use std::collections::HashMap; -use rocksdb::WriteBatchWithTransaction; +use crate::ports::TemporalRegistry; /// Type-erased (serialized) data #[derive(Debug, Clone)] @@ -22,7 +21,8 @@ impl PostcardSerialized { macro_rules! tables { ($($name:ident: $type:ty),*$(,)?) => { - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] + #[doc = "RegistryKey namespaces"] + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] #[allow(non_camel_case_types)] // Match names in structs exactly pub enum RegistryKeyspace { $( @@ -47,7 +47,6 @@ macro_rules! tables { } } - #[derive(Debug, Clone, Default)] pub struct PerRegistryKeyspace { $(pub $name: T,)* @@ -103,40 +102,13 @@ macro_rules! tables { true } - pub(crate) fn write_to_db(&self, db: &mut RocksDb) -> anyhow::Result<()> { - let mut batch = WriteBatchWithTransaction::::default(); - let cf_registry = db.db.cf_handle("temporal").unwrap(); - let cf_index = db.db.cf_handle("temporal_index").unwrap(); - + pub(crate) fn write_to_registry(&self, registry: &mut R) -> anyhow::Result<()> { $( - let mut key_table_prefix: Vec = stringify!($name).bytes().collect(); - key_table_prefix.reserve(4); - key_table_prefix.push(0); - for (key, value) in self.$name.iter() { - // Get key bytes - let raw_key = postcard::to_stdvec(&key).expect("Never fails"); - - // Write new value - let db_key: Vec = key_table_prefix.iter().copied().chain(raw_key.clone()).collect(); - let db_value = postcard::to_stdvec(&value).expect("Never fails"); - - batch.put_cf(&cf_registry, db_key.clone(), db_value.clone()); - - // Remove the overwritten value from index, if any - if let Some(old_value) = db.db.get_cf(&cf_registry, db_key.clone())? { - let index_value: Vec = key_table_prefix.iter().copied().chain(old_value).collect(); - batch.delete_cf(&cf_index, index_value); - } - - // Add the new value to the index - let index_key: Vec = key_table_prefix.iter().copied().chain(db_value).collect(); - batch.put_cf(&cf_index, index_key, raw_key); + registry.write_registry(RegistryKeyspace::$name, *key, postcard::to_stdvec(value)?)?; } - )* - db.db.write(batch)?; Ok(()) } } @@ -148,4 +120,19 @@ tables!( asset_id: AssetId, contract_id: ContractId, script_code: Vec, + predicate_code: Vec, ); + +// TODO: move inside the macro when this stabilizes: https://github.com/rust-lang/rust/pull/122808 +#[cfg(any(test, feature = "test-helpers"))] +impl rand::prelude::Distribution for rand::distributions::Standard { + fn sample(&self, rng: &mut R) -> RegistryKeyspace { + match rng.gen_range(0..5) { + 0 => RegistryKeyspace::address, + 1 => RegistryKeyspace::asset_id, + 2 => RegistryKeyspace::contract_id, + 3 => RegistryKeyspace::script_code, + _ => RegistryKeyspace::predicate_code, + } + } +} diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index f1bfab1c31b..56de99ba502 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -23,6 +23,7 @@ derive_more = { version = "0.99" } enum-iterator = { workspace = true } fuel-core-chain-config = { workspace = true, features = ["std"] } fuel-core-consensus-module = { workspace = true } +fuel-core-compression = { workspace = true } fuel-core-database = { workspace = true } fuel-core-executor = { workspace = true, features = ["std"] } fuel-core-gas-price-service = { workspace = true } @@ -95,6 +96,7 @@ test-helpers = [ "fuel-core-p2p?/test-helpers", "fuel-core-storage/test-helpers", "fuel-core-chain-config/test-helpers", + "fuel-core-compression/test-helpers", "fuel-core-txpool/test-helpers", "fuel-core-services/test-helpers", "fuel-core-importer/test-helpers", diff --git a/crates/fuel-core/src/graphql_api/database.rs b/crates/fuel-core/src/graphql_api/database.rs index ca1dd6ca972..53d2dbb39bf 100644 --- a/crates/fuel-core/src/graphql_api/database.rs +++ b/crates/fuel-core/src/graphql_api/database.rs @@ -69,6 +69,8 @@ use std::{ sync::Arc, }; +use super::ports::DatabaseDaCompressedBlocks; + mod arc_wrapper; /// The on-chain view of the database used by the [`ReadView`] to fetch on-chain data. @@ -210,6 +212,16 @@ impl DatabaseBlocks for ReadView { } } +impl DatabaseDaCompressedBlocks for ReadView { + fn da_compressed_block(&self, id: &BlockHeight) -> StorageResult> { + self.off_chain.da_compressed_block(id) + } + + fn latest_height(&self) -> StorageResult { + self.on_chain.latest_height() + } +} + impl StorageInspect for ReadView where M: Mappable, @@ -286,6 +298,10 @@ impl OffChainDatabase for ReadView { self.off_chain.block_height(block_id) } + fn da_compressed_block(&self, height: &BlockHeight) -> StorageResult> { + self.off_chain.da_compressed_block(height) + } + fn tx_status(&self, tx_id: &TxId) -> StorageResult { self.off_chain.tx_status(tx_id) } diff --git a/crates/fuel-core/src/graphql_api/ports.rs b/crates/fuel-core/src/graphql_api/ports.rs index aae69a95d02..22fdca96164 100644 --- a/crates/fuel-core/src/graphql_api/ports.rs +++ b/crates/fuel-core/src/graphql_api/ports.rs @@ -69,6 +69,8 @@ use std::sync::Arc; pub trait OffChainDatabase: Send + Sync { fn block_height(&self, block_id: &BlockId) -> StorageResult; + fn da_compressed_block(&self, height: &BlockHeight) -> StorageResult>; + fn tx_status(&self, tx_id: &TxId) -> StorageResult; fn owned_coins_ids( @@ -150,6 +152,14 @@ pub trait DatabaseBlocks { fn consensus(&self, id: &BlockHeight) -> StorageResult; } +/// Trait that specifies all the getters required for DA compressed blocks. +pub trait DatabaseDaCompressedBlocks { + /// Get a DA compressed block by its height. + fn da_compressed_block(&self, height: &BlockHeight) -> StorageResult>; + + fn latest_height(&self) -> StorageResult; +} + /// Trait that specifies all the getters required for messages. pub trait DatabaseMessages: StorageInspect { fn all_messages( @@ -267,6 +277,11 @@ pub mod worker { }, }, graphql_api::storage::{ + da_compression::{ + DaCompressedBlocks, + DaCompressionTemporalRegistry, + DaCompressionTemporalRegistryIndex, + }, old::{ OldFuelBlockConsensus, OldFuelBlocks, @@ -320,6 +335,9 @@ pub mod worker { + StorageMutate + StorageMutate + StorageMutate + + StorageMutate + + StorageMutate + + StorageMutate { fn record_tx_id_owner( &mut self, diff --git a/crates/fuel-core/src/graphql_api/storage.rs b/crates/fuel-core/src/graphql_api/storage.rs index 1b77c07cbdc..585cf917017 100644 --- a/crates/fuel-core/src/graphql_api/storage.rs +++ b/crates/fuel-core/src/graphql_api/storage.rs @@ -39,6 +39,7 @@ use statistic::StatisticTable; pub mod blocks; pub mod coins; pub mod contracts; +pub mod da_compression; pub mod messages; pub mod old; pub mod statistic; @@ -93,6 +94,17 @@ pub enum Column { /// Existence of a key in this column means that the message has been spent. /// See [`SpentMessages`](messages::SpentMessages) SpentMessages = 13, + /// DA compression and postcard serialized blocks. + /// See [`DaCompressedBlocks`](da_compressed::DaCompressedBlocks) + DaCompressedBlocks = 14, + /// DA compression metadata. + DaCompressionMetadata = 15, + /// Temporal registry for DA compression. + /// See [`DaCompressionTemporalRegistry`](da_compression::DaCompressionTemporalRegistry) + DaCompressionTemporalRegistry = 16, + /// Temporal registry lookup index for DA compression. + /// See [`DaCompressionTemporalRegistryIndex`](da_compression::DaCompressionTemporalRegistryIndex) + DaCompressionTemporalRegistryIndex = 17, } impl Column { diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression.rs b/crates/fuel-core/src/graphql_api/storage/da_compression.rs new file mode 100644 index 00000000000..30f4c636704 --- /dev/null +++ b/crates/fuel-core/src/graphql_api/storage/da_compression.rs @@ -0,0 +1,123 @@ +use fuel_core_compression::RegistryKeyspace; +use fuel_core_storage::{ + blueprint::plain::Plain, + codec::{ + postcard::Postcard, + primitive::Primitive, + raw::Raw, + }, + structured_storage::TableWithBlueprint, + Mappable, +}; +use fuel_core_types::{ + fuel_compression::RegistryKey, + fuel_types::BlockHeight, +}; + +pub struct DaCompressedBlocks; + +impl Mappable for DaCompressedBlocks { + type Key = Self::OwnedKey; + type OwnedKey = BlockHeight; + type Value = Self::OwnedValue; + type OwnedValue = Vec; +} + +impl TableWithBlueprint for DaCompressedBlocks { + type Blueprint = Plain, Raw>; + type Column = super::Column; + + fn column() -> Self::Column { + Self::Column::DaCompressedBlocks + } +} + +pub struct DaCompressionTemporalRegistry; + +impl Mappable for DaCompressionTemporalRegistry { + type Key = Self::OwnedKey; + type OwnedKey = (RegistryKeyspace, RegistryKey); + type Value = Self::OwnedValue; + // This a postcard-encoded value, where the original type depends on the keyspace. + type OwnedValue = Vec; +} + +impl TableWithBlueprint for DaCompressionTemporalRegistry { + type Blueprint = Plain; + type Column = super::Column; + + fn column() -> Self::Column { + Self::Column::DaCompressionTemporalRegistry + } +} + +pub struct DaCompressionTemporalRegistryIndex; + +impl Mappable for DaCompressionTemporalRegistryIndex { + type Key = Self::OwnedKey; + // The second value is a postcard-encoded value, where the original type depends on the keyspace. + // TODO: should we hash the secodn part of this key? + type OwnedKey = (RegistryKeyspace, Vec); + type Value = Self::OwnedValue; + type OwnedValue = RegistryKey; +} + +impl TableWithBlueprint for DaCompressionTemporalRegistryIndex { + type Blueprint = Plain; + type Column = super::Column; + + fn column() -> Self::Column { + Self::Column::DaCompressionTemporalRegistryIndex + } +} + +#[cfg(test)] +mod tests { + use fuel_core_types::fuel_crypto::coins_bip32::ecdsa::signature::digest::typenum::Pow; + + use super::*; + + fn generate_registry_key( + rng: &mut impl rand::Rng, + ) -> (RegistryKeyspace, RegistryKey) { + let keyspace: RegistryKeyspace = rng.gen(); + + let raw_key: u32 = rng.gen_range(0..2u32.pow(32) - 2); + let key = RegistryKey::try_from(raw_key).unwrap(); + + (keyspace, key) + } + + fn generate_registry_index_key( + rng: &mut impl rand::Rng, + ) -> (RegistryKeyspace, Vec) { + let keyspace: RegistryKeyspace = rng.gen(); + + let mut bytes: Vec = vec![0u8; rng.gen_range(0..1234)]; + rng.fill(bytes.as_mut_slice()); + + (keyspace, bytes) + } + + fuel_core_storage::basic_storage_tests!( + DaCompressedBlocks, + ::Key::default(), + ::Value::default() + ); + + fuel_core_storage::basic_storage_tests!( + DaCompressionTemporalRegistry, + (RegistryKeyspace::address, RegistryKey::ZERO), + ::Value::default(), + ::Value::default(), + generate_registry_key + ); + + fuel_core_storage::basic_storage_tests!( + DaCompressionTemporalRegistryIndex, + (RegistryKeyspace::address, Vec::default()), + RegistryKey::ZERO, + RegistryKey::ZERO, + generate_registry_index_key + ); +} diff --git a/crates/fuel-core/src/graphql_api/worker_service.rs b/crates/fuel-core/src/graphql_api/worker_service.rs index 1c19788d194..0a05c7c16c0 100644 --- a/crates/fuel-core/src/graphql_api/worker_service.rs +++ b/crates/fuel-core/src/graphql_api/worker_service.rs @@ -5,8 +5,10 @@ use super::storage::old::{ }; use crate::{ fuel_core_graphql_api::{ - ports, - ports::worker::OffChainDatabaseTransaction, + ports::{ + self, + worker::OffChainDatabaseTransaction, + }, storage::{ blocks::FuelBlockIdsToHeights, coins::{ @@ -21,7 +23,22 @@ use crate::{ }, }, }, - graphql_api::storage::relayed_transactions::RelayedTransactionStatuses, + graphql_api::storage::{ + da_compression::{ + DaCompressedBlocks, + DaCompressionTemporalRegistry, + DaCompressionTemporalRegistryIndex, + }, + relayed_transactions::RelayedTransactionStatuses, + }, +}; +use fuel_core_compression::{ + ports::{ + TemporalRegistry, + UtxoIdToPointer, + }, + services::compress::compress, + RegistryKeyspace, }; use fuel_core_metrics::graphql_metrics::graphql_metrics; use fuel_core_services::{ @@ -33,9 +50,12 @@ use fuel_core_services::{ StateWatcher, }; use fuel_core_storage::{ + not_found, Error as StorageError, Result as StorageResult, StorageAsMut, + StorageAsRef, + StorageInspect, }; use fuel_core_txpool::types::TxId; use fuel_core_types::{ @@ -134,7 +154,7 @@ where let height = block.header().height(); let block_id = block.id(); transaction - .storage::() + .storage_as_mut::() .insert(&block_id, height)?; let total_tx_count = transaction @@ -146,6 +166,8 @@ where &mut transaction, )?; + da_compress_block(&block, &result.events, &mut transaction)?; + transaction.commit()?; for status in result.tx_status.iter() { @@ -161,6 +183,100 @@ where } } +fn da_compress_block( + block: &Block, + events: &[Event], + transaction: &mut T, +) -> anyhow::Result<()> +where + T: OffChainDatabaseTransaction, + T: StorageInspect, +{ + struct DbTx<'a, Tx>(&'a mut Tx, &'a [Event]); + + impl<'a, Tx> TemporalRegistry for DbTx<'a, Tx> + where + Tx: OffChainDatabaseTransaction, + Tx: StorageInspect, + { + fn read_registry( + &self, + keyspace: RegistryKeyspace, + key: fuel_core_types::fuel_compression::RegistryKey, + ) -> anyhow::Result> { + Ok(self + .0 + .storage_as_ref::() + .get(&(keyspace, key))? + .ok_or(not_found!(DaCompressionTemporalRegistry))? + .into_owned()) + } + + fn write_registry( + &mut self, + keyspace: RegistryKeyspace, + key: fuel_core_types::fuel_compression::RegistryKey, + value: Vec, + ) -> anyhow::Result<()> { + // Write the actual value + self.0 + .storage_as_mut::() + .insert(&(keyspace, key), &value)?; + + // Remove the overwritten value from index, if any + self.0 + .storage_as_mut::() + .remove(&(keyspace, value.clone()))?; + + // Add the new value to the index + self.0 + .storage_as_mut::() + .insert(&(keyspace, value), &key)?; + + Ok(()) + } + + fn registry_index_lookup( + &self, + keyspace: RegistryKeyspace, + value: Vec, + ) -> anyhow::Result> + { + Ok(self + .0 + .storage_as_ref::() + .get(&(keyspace, value))? + .map(|v| v.into_owned())) + } + + fn next_block_height(&self) -> anyhow::Result { + todo!() + } + } + + impl<'a, Tx> UtxoIdToPointer for DbTx<'a, Tx> + where + Tx: OffChainDatabaseTransaction, + { + fn lookup( + &self, + _utxo_id: fuel_core_types::fuel_tx::UtxoId, + ) -> anyhow::Result { + todo!(); + } + } + + let compressed = compress(DbTx(transaction, events), block) + .now_or_never() + .expect("The current implementation resolved all futures instantly")?; + + transaction + .storage_as_mut::() + .insert(&block.header().consensus().height, &compressed)?; + + Ok(()) +} + /// Process the executor events and update the indexes for the messages and coins. pub fn process_executor_events<'a, Iter, T>( events: Iter, diff --git a/crates/fuel-core/src/lib.rs b/crates/fuel-core/src/lib.rs index f78b69ff45e..31f2319a886 100644 --- a/crates/fuel-core/src/lib.rs +++ b/crates/fuel-core/src/lib.rs @@ -1,7 +1,7 @@ #![deny(clippy::arithmetic_side_effects)] #![deny(clippy::cast_possible_truncation)] #![deny(unused_crate_dependencies)] -#![deny(warnings)] +// #![deny(warnings)] use crate::service::genesis::NotifyCancel; use tokio_util::sync::CancellationToken; diff --git a/crates/fuel-core/src/query.rs b/crates/fuel-core/src/query.rs index c5d3d0f6988..fc2dc79ea9b 100644 --- a/crates/fuel-core/src/query.rs +++ b/crates/fuel-core/src/query.rs @@ -9,6 +9,8 @@ mod subscriptions; mod tx; mod upgrades; +pub mod da_compressed; + // TODO: Remove reexporting of everything pub use balance::*; pub use blob::*; diff --git a/crates/fuel-core/src/query/da_compressed.rs b/crates/fuel-core/src/query/da_compressed.rs new file mode 100644 index 00000000000..669e55d584e --- /dev/null +++ b/crates/fuel-core/src/query/da_compressed.rs @@ -0,0 +1,16 @@ +use crate::graphql_api::ports::DatabaseDaCompressedBlocks; +use fuel_core_storage::Result as StorageResult; +use fuel_core_types::fuel_types::BlockHeight; + +pub trait DaCompressedBlockData: Send + Sync { + fn da_compressed_block(&self, id: &BlockHeight) -> StorageResult>; +} + +impl DaCompressedBlockData for D +where + D: DatabaseDaCompressedBlocks + ?Sized + Send + Sync, +{ + fn da_compressed_block(&self, height: &BlockHeight) -> StorageResult> { + self.da_compressed_block(height) + } +} diff --git a/crates/fuel-core/src/schema.rs b/crates/fuel-core/src/schema.rs index 747f2740151..bd9e550d448 100644 --- a/crates/fuel-core/src/schema.rs +++ b/crates/fuel-core/src/schema.rs @@ -32,6 +32,7 @@ pub mod block; pub mod chain; pub mod coins; pub mod contract; +pub mod da_compressed; pub mod dap; pub mod health; pub mod message; @@ -54,6 +55,7 @@ pub struct Query( tx::TxQuery, health::HealthQuery, coins::CoinQuery, + da_compressed::DaCompressedBlockQuery, contract::ContractQuery, contract::ContractBalanceQuery, node_info::NodeQuery, diff --git a/crates/fuel-core/src/schema/da_compressed.rs b/crates/fuel-core/src/schema/da_compressed.rs new file mode 100644 index 00000000000..faa95e8dcb3 --- /dev/null +++ b/crates/fuel-core/src/schema/da_compressed.rs @@ -0,0 +1,48 @@ +use super::ReadViewProvider; +use crate::{ + fuel_core_graphql_api::{ + IntoApiResult, + QUERY_COSTS, + }, + query::da_compressed::DaCompressedBlockData, + schema::scalars::U32, +}; +use async_graphql::{ + Context, + Object, +}; + +pub struct DaCompressedBlock { + bytes: Vec, +} + +impl From> for DaCompressedBlock { + fn from(bytes: Vec) -> Self { + Self { bytes } + } +} + +#[Object] +impl DaCompressedBlock { + async fn bytes(&self) -> Vec { + self.bytes.clone() + } +} + +#[derive(Default)] +pub struct DaCompressedBlockQuery; + +#[Object] +impl DaCompressedBlockQuery { + #[graphql(complexity = "2 * QUERY_COSTS.storage_read + child_complexity")] + async fn block( + &self, + ctx: &Context<'_>, + #[graphql(desc = "Height of the block")] height: U32, + ) -> async_graphql::Result> { + let query = ctx.read_view()?; + query + .da_compressed_block(&height.0.into()) + .into_api_result() + } +} diff --git a/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs index fdd1d3183bc..9006a2a0cd3 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs @@ -15,10 +15,13 @@ use crate::{ transactions::OwnedTransactionIndexCursor, }, }, - graphql_api::storage::old::{ - OldFuelBlockConsensus, - OldFuelBlocks, - OldTransactions, + graphql_api::storage::{ + da_compression::DaCompressedBlocks, + old::{ + OldFuelBlockConsensus, + OldFuelBlocks, + OldTransactions, + }, }, }; use fuel_core_storage::{ @@ -69,6 +72,13 @@ impl OffChainDatabase for OffChainIterableKeyValueView { .and_then(|height| height.ok_or(not_found!("BlockHeight"))) } + fn da_compressed_block(&self, height: &BlockHeight) -> StorageResult> { + self.storage_as_ref::() + .get(height)? + .ok_or_else(|| not_found!("DaCompressedBlock")) + .map(std::borrow::Cow::into_owned) + } + fn tx_status(&self, tx_id: &TxId) -> StorageResult { self.get_tx_status(tx_id) .transpose() diff --git a/crates/types/src/blockchain/block.rs b/crates/types/src/blockchain/block.rs index a58d3abf8ca..8380b977006 100644 --- a/crates/types/src/blockchain/block.rs +++ b/crates/types/src/blockchain/block.rs @@ -60,6 +60,7 @@ pub struct BlockV1 { } /// Fuel `Block` with transactions represented by their id only. +/// Note that this is different from the DA compressed blocks. pub type CompressedBlock = Block; /// Fuel block with all transaction data included From 2d5ea210fcb666f6c6967469dd7af9e9d71c89f7 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 11 Sep 2024 15:27:04 +0300 Subject: [PATCH 036/112] Remove tests based on mock registry, these are already tested in fuel-vm --- crates/compression/src/compression_tests.rs | 257 ------------------ crates/compression/src/lib.rs | 3 - .../src/graphql_api/storage/da_compression.rs | 4 +- 3 files changed, 1 insertion(+), 263 deletions(-) delete mode 100644 crates/compression/src/compression_tests.rs diff --git a/crates/compression/src/compression_tests.rs b/crates/compression/src/compression_tests.rs deleted file mode 100644 index 720899e52d0..00000000000 --- a/crates/compression/src/compression_tests.rs +++ /dev/null @@ -1,257 +0,0 @@ -use std::{ - collections::HashMap, - sync::{ - Arc, - Mutex, - }, -}; - -use bimap::BiMap; -use fuel_core_types::{ - blockchain::{ - block::{ - Block, - PartialFuelBlock, - }, - header::{ - ApplicationHeader, - ConsensusHeader, - PartialBlockHeader, - }, - primitives::{ - DaBlockHeight, - Empty, - }, - }, - fuel_tx::{ - Bytes32, - CompressedUtxoId, - Finalizable, - Input, - Transaction, - TransactionBuilder, - TxPointer, - UtxoId, - }, - fuel_types::Nonce, - fuel_vm::SecretKey, - tai64::Tai64, -}; -use rand::Rng; - -use crate::{ - ports::{ - CoinInfo, - HistoryLookup, - MessageInfo, - TemporalRegistry, - UtxoIdToPointer, - }, - services, -}; - -#[derive(Default)] -pub struct MockTxDb { - utxo_id_mapping: Arc>>, - coins: HashMap, -} - -impl TemporalRegistry for &mut MockTxDb { - fn read_registry( - &self, - keyspace: crate::RegistryKeyspace, - key: fuel_core_types::fuel_compression::RegistryKey, - ) -> anyhow::Result> { - todo!() - } - - fn write_registry( - &mut self, - keyspace: crate::RegistryKeyspace, - key: fuel_core_types::fuel_compression::RegistryKey, - value: Vec, - ) -> anyhow::Result<()> { - todo!() - } - - fn registry_index_lookup( - &self, - keyspace: crate::RegistryKeyspace, - value: Vec, - ) -> anyhow::Result> { - todo!() - } - - fn next_block_height( - &self, - ) -> anyhow::Result { - todo!() - } -} - -impl MockTxDb { - fn create_coin(&mut self, rng: &mut R, info: CoinInfo) -> UtxoId { - let utxo_id: UtxoId = rng.gen(); - self.coins.insert(utxo_id, info); - utxo_id - } -} - -impl UtxoIdToPointer for &mut MockTxDb { - fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result { - let mut g = self.utxo_id_mapping.lock().unwrap(); - if !g.contains_left(&utxo_id) { - let key = g.len() as u32; // Just obtain an unique key - g.insert( - utxo_id, - CompressedUtxoId { - tx_pointer: TxPointer::new(key.into(), 0), - output_index: 0, - }, - ); - } - Ok(g.get_by_left(&utxo_id).cloned().unwrap()) - } -} - -impl HistoryLookup for &mut MockTxDb { - fn utxo_id(&self, c: &CompressedUtxoId) -> anyhow::Result { - let g = self.utxo_id_mapping.lock().unwrap(); - g.get_by_right(&c).cloned().ok_or_else(|| { - anyhow::anyhow!("CompressedUtxoId not found in mock db: {:?}", c) - }) - } - - fn coin(&self, utxo_id: &UtxoId) -> anyhow::Result { - self.coins - .get(&utxo_id) - .cloned() - .ok_or_else(|| anyhow::anyhow!("Coin not found in mock db: {:?}", utxo_id)) - } - - fn message(&self, _nonce: &Nonce) -> anyhow::Result { - todo!(); - } -} - -#[tokio::test] -async fn same_compact_tx_is_smaller_in_next_block() { - let tx: Transaction = - TransactionBuilder::script(vec![1, 2, 3, 4, 5, 6, 7, 8], vec![]) - .max_fee_limit(0) - .add_random_fee_input() - .finalize() - .into(); - - let mut tx_db = MockTxDb::default(); - - let mut sizes = Vec::new(); - for i in 0..3 { - let compressed = services::compress::compress( - &mut tx_db, - &Block::new( - PartialBlockHeader { - application: ApplicationHeader { - da_height: DaBlockHeight::default(), - consensus_parameters_version: 4, - state_transition_bytecode_version: 5, - generated: Empty, - }, - consensus: ConsensusHeader { - prev_root: Bytes32::default(), - height: i.into(), - time: Tai64::UNIX_EPOCH, - generated: Empty, - }, - }, - vec![tx.clone()], - &[], - Bytes32::default(), - ) - .expect("Invalid block header"), - ) - .await - .unwrap(); - sizes.push(compressed.len()); - } - - assert!(sizes[0] > sizes[1], "Size must decrease after first block"); - assert!( - sizes[1] == sizes[2], - "Size must be constant after first block" - ); -} - -#[tokio::test] -async fn compress_decompress_roundtrip() { - use rand::SeedableRng; - let mut rng = rand::rngs::StdRng::seed_from_u64(2322u64); - - let mut tx_db = MockTxDb::default(); - - let mut original_blocks = Vec::new(); - let mut compressed_blocks = Vec::new(); - - for i in 0..3 { - let secret_key = SecretKey::random(&mut rng); - - let coin_utxo_id = tx_db.create_coin( - &mut rng, - CoinInfo { - owner: Input::owner(&secret_key.public_key()), - amount: (i as u64) * 1000, - asset_id: Default::default(), - }, - ); - - let tx: Transaction = - TransactionBuilder::script(vec![1, 2, 3, 4, 5, 6, 7, 8], vec![]) - .max_fee_limit(0) - .add_unsigned_coin_input( - secret_key, - coin_utxo_id, - (i as u64) * 1000, - Default::default(), - Default::default(), - ) - .finalize() - .into(); - - let block = Block::new( - PartialBlockHeader { - application: ApplicationHeader { - da_height: DaBlockHeight::default(), - consensus_parameters_version: 4, - state_transition_bytecode_version: 5, - generated: Empty, - }, - consensus: ConsensusHeader { - prev_root: Bytes32::default(), - height: i.into(), - time: Tai64::UNIX_EPOCH, - generated: Empty, - }, - }, - vec![tx], - &[], - Bytes32::default(), - ) - .expect("Invalid block header"); - original_blocks.push(block.clone()); - compressed_blocks.push( - services::compress::compress(&mut tx_db, &block) - .await - .expect("Failed to compress"), - ); - } - - for (original, compressed) in original_blocks - .into_iter() - .zip(compressed_blocks.into_iter()) - { - let decompressed = services::decompress::decompress(&mut tx_db, compressed) - .await - .expect("Decompression failed"); - assert_eq!(PartialFuelBlock::from(original), decompressed); - } -} diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 74d2d80c291..efa906f4a22 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -13,9 +13,6 @@ mod context { pub use tables::RegistryKeyspace; -#[cfg(test)] -mod compression_tests; - use serde::{ Deserialize, Serialize, diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression.rs b/crates/fuel-core/src/graphql_api/storage/da_compression.rs index 30f4c636704..410103dc153 100644 --- a/crates/fuel-core/src/graphql_api/storage/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/storage/da_compression.rs @@ -73,8 +73,6 @@ impl TableWithBlueprint for DaCompressionTemporalRegistryIndex { #[cfg(test)] mod tests { - use fuel_core_types::fuel_crypto::coins_bip32::ecdsa::signature::digest::typenum::Pow; - use super::*; fn generate_registry_key( @@ -82,7 +80,7 @@ mod tests { ) -> (RegistryKeyspace, RegistryKey) { let keyspace: RegistryKeyspace = rng.gen(); - let raw_key: u32 = rng.gen_range(0..2u32.pow(32) - 2); + let raw_key: u32 = rng.gen_range(0..2u32.pow(24) - 2); let key = RegistryKey::try_from(raw_key).unwrap(); (keyspace, key) From cb858128c94a6817d4f7f09dd75f73017a5f1784 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 16 Sep 2024 17:44:13 +0200 Subject: [PATCH 037/112] Fix the GraphQL endpoint; it now actually returns the compressed blocks --- crates/client/assets/schema.sdl | 10 + crates/client/src/client.rs | 18 ++ crates/client/src/client/schema.rs | 1 + .../client/src/client/schema/da_compressed.rs | 237 ++++++++++++++++++ crates/compression/src/ports.rs | 3 - crates/compression/src/services/compress.rs | 6 +- .../src/graphql_api/worker_service.rs | 35 ++- crates/fuel-core/src/schema/da_compressed.rs | 2 +- tests/tests/da_compression.rs | 58 +++++ tests/tests/lib.rs | 1 + 10 files changed, 352 insertions(+), 19 deletions(-) create mode 100644 crates/client/src/client/schema/da_compressed.rs create mode 100644 tests/tests/da_compression.rs diff --git a/crates/client/assets/schema.sdl b/crates/client/assets/schema.sdl index 8929f76b0ef..f659b0f1aed 100644 --- a/crates/client/assets/schema.sdl +++ b/crates/client/assets/schema.sdl @@ -283,6 +283,10 @@ enum ContractParametersVersion { V1 } +type DaCompressedBlock { + bytes: [Int!]! +} + union DependentCost = LightOperation | HeavyOperation type DryRunFailureStatus { @@ -941,6 +945,12 @@ type Query { """ excludedIds: ExcludeInput ): [[CoinType!]!]! + daCompressedBlock( + """ + Height of the block + """ + height: U32! + ): DaCompressedBlock contract( """ ID of the Contract diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index 512bac9e5ce..56819dd7c1d 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -862,6 +862,24 @@ impl FuelClient { Ok(block) } + pub async fn da_compressed_block( + &self, + height: BlockHeight, + ) -> io::Result>> { + let query = schema::block::BlockByIdQuery::build(BlockByIdArgs { + id: Some((*id).into()), + }); + + let block = self + .query(query) + .await? + .block + .map(TryInto::try_into) + .transpose()?; + + Ok(block) + } + /// Retrieve a blob by its ID pub async fn blob(&self, id: BlobId) -> io::Result> { let query = schema::blob::BlobByIdQuery::build(BlobByIdArgs { id: id.into() }); diff --git a/crates/client/src/client/schema.rs b/crates/client/src/client/schema.rs index 648304425aa..7930d66c1ab 100644 --- a/crates/client/src/client/schema.rs +++ b/crates/client/src/client/schema.rs @@ -32,6 +32,7 @@ pub mod block; pub mod chain; pub mod coins; pub mod contract; +pub mod da_compressed; pub mod message; pub mod node_info; pub mod upgrades; diff --git a/crates/client/src/client/schema/da_compressed.rs b/crates/client/src/client/schema/da_compressed.rs new file mode 100644 index 00000000000..e159d44bc9a --- /dev/null +++ b/crates/client/src/client/schema/da_compressed.rs @@ -0,0 +1,237 @@ +use super::Bytes32; +use crate::client::schema::{ + schema, + BlockId, + ConnectionArgs, + PageInfo, + Signature, + Tai64Timestamp, + TransactionId, + U16, + U32, + U64, +}; +use fuel_core_types::{ + fuel_crypto, + fuel_types::BlockHeight, +}; + +#[derive(cynic::QueryVariables, Debug)] +pub struct BlockByIdArgs { + pub id: Option, +} + +#[derive(cynic::QueryFragment, Clone, Debug)] +#[cynic( + schema_path = "./assets/schema.sdl", + graphql_type = "Query", + variables = "BlockByIdArgs" +)] +pub struct BlockByIdQuery { + #[arguments(id: $id)] + pub block: Option, +} + +#[derive(cynic::QueryVariables, Debug)] +pub struct BlockByHeightArgs { + pub height: Option, +} + +#[derive(cynic::QueryFragment, Clone, Debug)] +#[cynic( + schema_path = "./assets/schema.sdl", + graphql_type = "Query", + variables = "BlockByHeightArgs" +)] +pub struct BlockByHeightQuery { + #[arguments(height: $height)] + pub block: Option, +} + +#[derive(cynic::QueryFragment, Clone, Debug)] +#[cynic( + schema_path = "./assets/schema.sdl", + graphql_type = "Query", + variables = "ConnectionArgs" +)] +pub struct BlocksQuery { + #[arguments(after: $after, before: $before, first: $first, last: $last)] + pub blocks: BlockConnection, +} + +#[derive(cynic::QueryFragment, Clone, Debug)] +#[cynic(schema_path = "./assets/schema.sdl")] +pub struct BlockConnection { + pub edges: Vec, + pub page_info: PageInfo, +} + +#[derive(cynic::QueryFragment, Clone, Debug)] +#[cynic(schema_path = "./assets/schema.sdl")] +pub struct BlockEdge { + pub cursor: String, + pub node: Block, +} + +#[derive(cynic::Enum, Clone, Debug)] +#[cynic(schema_path = "./assets/schema.sdl")] +pub enum BlockVersion { + V1, +} + +/// Block with transaction ids +#[derive(cynic::QueryFragment, Clone, Debug)] +#[cynic(schema_path = "./assets/schema.sdl")] +pub struct Block { + pub version: BlockVersion, + pub id: BlockId, + pub header: Header, + pub consensus: Consensus, + pub transaction_ids: Vec, +} + +#[derive(cynic::QueryFragment, Clone, Debug)] +#[cynic(schema_path = "./assets/schema.sdl", graphql_type = "Block")] +pub struct BlockIdFragment { + pub id: BlockId, +} + +#[derive(cynic::QueryFragment, Clone, Debug)] +#[cynic(schema_path = "./assets/schema.sdl", graphql_type = "Block")] +pub struct BlockHeightFragment { + pub height: U32, +} + +#[derive(cynic::QueryVariables, Debug)] +pub struct ProduceBlockArgs { + pub start_timestamp: Option, + pub blocks_to_produce: U32, +} + +#[derive(cynic::QueryFragment, Clone, Debug)] +#[cynic( + schema_path = "./assets/schema.sdl", + variables = "ProduceBlockArgs", + graphql_type = "Mutation" +)] +pub struct BlockMutation { + #[arguments(blocksToProduce: $blocks_to_produce, startTimestamp: $start_timestamp)] + pub produce_blocks: U32, +} + +#[derive(cynic::Enum, Clone, Debug)] +#[cynic(schema_path = "./assets/schema.sdl")] +pub enum HeaderVersion { + V1, +} + +#[derive(cynic::QueryFragment, Clone, Debug)] +#[cynic(schema_path = "./assets/schema.sdl")] +pub struct Header { + pub version: HeaderVersion, + pub id: BlockId, + pub da_height: U64, + pub consensus_parameters_version: U32, + pub state_transition_bytecode_version: U32, + pub transactions_count: U16, + pub message_receipt_count: U32, + pub transactions_root: Bytes32, + pub message_outbox_root: Bytes32, + pub event_inbox_root: Bytes32, + pub height: U32, + pub prev_root: Bytes32, + pub time: Tai64Timestamp, + pub application_hash: Bytes32, +} + +#[derive(cynic::InlineFragments, Clone, Debug)] +#[cynic(schema_path = "./assets/schema.sdl")] +pub enum Consensus { + Genesis(Genesis), + PoAConsensus(PoAConsensus), + #[cynic(fallback)] + Unknown, +} + +#[derive(cynic::QueryFragment, Clone, Debug)] +#[cynic(schema_path = "./assets/schema.sdl")] +pub struct Genesis { + pub chain_config_hash: Bytes32, + pub coins_root: Bytes32, + pub contracts_root: Bytes32, + pub messages_root: Bytes32, + pub transactions_root: Bytes32, +} + +#[derive(cynic::QueryFragment, Clone, Debug)] +#[cynic(schema_path = "./assets/schema.sdl")] +pub struct PoAConsensus { + pub signature: Signature, +} + +impl Block { + /// Returns the block producer public key, if any. + pub fn block_producer(&self) -> Option { + let message = self.header.id.clone().into_message(); + match &self.consensus { + Consensus::Genesis(_) => Some(Default::default()), + Consensus::PoAConsensus(poa) => { + let signature = poa.signature.clone().into_signature(); + let producer_pub_key = signature.recover(&message); + producer_pub_key.ok() + } + Consensus::Unknown => None, + } + } +} + +impl From for BlockHeight { + fn from(fragment: BlockHeightFragment) -> Self { + BlockHeight::new(fragment.height.into()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn block_by_id_query_gql_output() { + use cynic::QueryBuilder; + let operation = BlockByIdQuery::build(BlockByIdArgs { + id: Some(BlockId::default()), + }); + insta::assert_snapshot!(operation.query) + } + + #[test] + fn block_by_height_query_gql_output() { + use cynic::QueryBuilder; + let operation = BlockByHeightQuery::build(BlockByHeightArgs { + height: Some(U32(0)), + }); + insta::assert_snapshot!(operation.query) + } + + #[test] + fn block_mutation_query_gql_output() { + use cynic::MutationBuilder; + let operation = BlockMutation::build(ProduceBlockArgs { + blocks_to_produce: U32(0), + start_timestamp: None, + }); + insta::assert_snapshot!(operation.query) + } + + #[test] + fn blocks_connection_query_gql_output() { + use cynic::QueryBuilder; + let operation = BlocksQuery::build(ConnectionArgs { + after: None, + before: None, + first: None, + last: None, + }); + insta::assert_snapshot!(operation.query) + } +} diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index 08c6481cc79..0a55adfc195 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -43,9 +43,6 @@ pub trait TemporalRegistry { keyspace: RegistryKeyspace, value: Vec, ) -> anyhow::Result>; - - /// Get the block height for the next block, i.e. the block currently being processed. - fn next_block_height(&self) -> anyhow::Result; } pub trait UtxoIdToPointer { diff --git a/crates/compression/src/services/compress.rs b/crates/compression/src/services/compress.rs index 9252d11d76d..a86dd6f6097 100644 --- a/crates/compression/src/services/compress.rs +++ b/crates/compression/src/services/compress.rs @@ -34,9 +34,9 @@ pub trait CompressDb: TemporalRegistry + UtxoIdToPointer {} impl CompressDb for T where T: TemporalRegistry + UtxoIdToPointer {} pub async fn compress(db: D, block: &Block) -> Result, Error> { - if *block.header().height() != db.next_block_height()? { - return Err(Error::NotLatest); - } + // if *block.header().height() != db.next_block_height()? { + // return Err(Error::NotLatest); + // } let target = block.transactions().to_vec(); diff --git a/crates/fuel-core/src/graphql_api/worker_service.rs b/crates/fuel-core/src/graphql_api/worker_service.rs index 0a05c7c16c0..39829a69e02 100644 --- a/crates/fuel-core/src/graphql_api/worker_service.rs +++ b/crates/fuel-core/src/graphql_api/worker_service.rs @@ -1,9 +1,21 @@ -use super::storage::old::{ - OldFuelBlockConsensus, - OldFuelBlocks, - OldTransactions, +use super::{ + ports::worker, + storage::old::{ + OldFuelBlockConsensus, + OldFuelBlocks, + OldTransactions, + }, }; use crate::{ + database::{ + database_description::{ + off_chain::OffChain, + DatabaseDescription, + DatabaseHeight, + }, + metadata::MetadataTable, + Database, + }, fuel_core_graphql_api::{ ports::{ self, @@ -32,6 +44,7 @@ use crate::{ relayed_transactions::RelayedTransactionStatuses, }, }; +use async_graphql::Description; use fuel_core_compression::{ ports::{ TemporalRegistry, @@ -51,6 +64,7 @@ use fuel_core_services::{ }; use fuel_core_storage::{ not_found, + transactional::StorageTransaction, Error as StorageError, Result as StorageResult, StorageAsMut, @@ -107,6 +121,7 @@ use futures::{ }; use std::{ borrow::Cow, + fmt::Debug, ops::Deref, }; @@ -134,10 +149,10 @@ pub struct Task { continue_on_error: bool, } -impl Task +impl<'a, TxPool, D> Task where TxPool: ports::worker::TxPool, - D: ports::worker::OffChainDatabase, + D: ports::worker::OffChainDatabase + 'a, { fn process_block(&mut self, result: SharedImportResult) -> anyhow::Result<()> { let block = &result.sealed_block.entity; @@ -248,10 +263,6 @@ where .get(&(keyspace, value))? .map(|v| v.into_owned())) } - - fn next_block_height(&self) -> anyhow::Result { - todo!() - } } impl<'a, Tx> UtxoIdToPointer for DbTx<'a, Tx> @@ -541,7 +552,7 @@ where } #[async_trait::async_trait] -impl RunnableService +impl<'a, TxPool, BlockImporter, OnChain, OffChain> RunnableService for InitializeTask where TxPool: ports::worker::TxPool, @@ -689,7 +700,7 @@ where } } -pub fn new_service( +pub fn new_service<'a, TxPool, BlockImporter, OnChain, OffChain>( tx_pool: TxPool, block_importer: BlockImporter, on_chain_database: OnChain, diff --git a/crates/fuel-core/src/schema/da_compressed.rs b/crates/fuel-core/src/schema/da_compressed.rs index faa95e8dcb3..d915a164c75 100644 --- a/crates/fuel-core/src/schema/da_compressed.rs +++ b/crates/fuel-core/src/schema/da_compressed.rs @@ -35,7 +35,7 @@ pub struct DaCompressedBlockQuery; #[Object] impl DaCompressedBlockQuery { #[graphql(complexity = "2 * QUERY_COSTS.storage_read + child_complexity")] - async fn block( + async fn da_compressed_block( &self, ctx: &Context<'_>, #[graphql(desc = "Height of the block")] height: U32, diff --git a/tests/tests/da_compression.rs b/tests/tests/da_compression.rs new file mode 100644 index 00000000000..755a52ba3a4 --- /dev/null +++ b/tests/tests/da_compression.rs @@ -0,0 +1,58 @@ +use fuel_core::{ + combined_database::CombinedDatabase, + service::{ + adapters::consensus_module::poa::block_path, + Config, + FuelService, + }, +}; +use fuel_core_client::client::{ + types::TransactionStatus, + FuelClient, +}; +use fuel_core_poa::signer::SignMode; +use fuel_core_storage::transactional::AtomicView; +use fuel_core_types::{ + blockchain::consensus::Consensus, + fuel_crypto::SecretKey, + fuel_tx::Transaction, + secrecy::Secret, +}; +use rand::{ + rngs::StdRng, + SeedableRng, +}; +use tempfile::tempdir; +use test_helpers::{ + fuel_core_driver::FuelCoreDriver, + produce_block_with_tx, +}; + +#[tokio::test] +async fn can_get_da_compressed_blocks() { + let mut rng = StdRng::seed_from_u64(10); + let poa_secret = SecretKey::random(&mut rng); + let poa_public = poa_secret.public_key(); + + let db = CombinedDatabase::default(); + let mut config = Config::local_node(); + config.consensus_signer = SignMode::Key(Secret::new(poa_secret.into())); + let srv = FuelService::from_combined_database(db.clone(), config) + .await + .unwrap(); + let client = FuelClient::from(srv.bound_address); + + let status = client + .submit_and_await_commit(&Transaction::default_test_tx()) + .await + .unwrap(); + + let block_height = match status { + TransactionStatus::Success { block_height, .. } => block_height, + _ => { + panic!("unexpected result") + } + }; + + let block = client.get_da_compressed_block(block_height).await.unwrap(); +} diff --git a/tests/tests/lib.rs b/tests/tests/lib.rs index e7a3db5bce9..5337e134358 100644 --- a/tests/tests/lib.rs +++ b/tests/tests/lib.rs @@ -8,6 +8,7 @@ mod chain; mod coin; mod coins; mod contract; +mod da_compression; mod dap; mod debugger; mod dos; From 0b1c2e25b3760d14031f1f48d6fcb7270995ee74 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 17 Sep 2024 11:40:44 +0200 Subject: [PATCH 038/112] Fetch UtxoIds when compacting; fix GraphQL types --- crates/client/assets/schema.sdl | 2 +- crates/client/src/client.rs | 18 +- .../client/src/client/schema/da_compressed.rs | 271 ++++-------------- .../src/graphql_api/worker_service.rs | 18 +- crates/fuel-core/src/schema/da_compressed.rs | 9 +- tests/tests/da_compression.rs | 12 +- 6 files changed, 88 insertions(+), 242 deletions(-) diff --git a/crates/client/assets/schema.sdl b/crates/client/assets/schema.sdl index f659b0f1aed..5d52bd23e87 100644 --- a/crates/client/assets/schema.sdl +++ b/crates/client/assets/schema.sdl @@ -284,7 +284,7 @@ enum ContractParametersVersion { } type DaCompressedBlock { - bytes: [Int!]! + bytes: HexString! } union DependentCost = LightOperation | HeavyOperation diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index 56819dd7c1d..211c2df7bc0 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -76,6 +76,7 @@ use schema::{ block::BlockByIdArgs, coins::CoinByIdArgs, contract::ContractByIdArgs, + da_compressed::DaCompressedBlockByHeightArgs, tx::{ TxArg, TxIdArgs, @@ -866,18 +867,17 @@ impl FuelClient { &self, height: BlockHeight, ) -> io::Result>> { - let query = schema::block::BlockByIdQuery::build(BlockByIdArgs { - id: Some((*id).into()), - }); + let query = schema::da_compressed::DaCompressedBlockByHeightQuery::build( + DaCompressedBlockByHeightArgs { + height: U32(height.into()), + }, + ); - let block = self + Ok(self .query(query) .await? - .block - .map(TryInto::try_into) - .transpose()?; - - Ok(block) + .da_compressed_block + .map(|b| b.bytes.into())) } /// Retrieve a blob by its ID diff --git a/crates/client/src/client/schema/da_compressed.rs b/crates/client/src/client/schema/da_compressed.rs index e159d44bc9a..d8fb7171423 100644 --- a/crates/client/src/client/schema/da_compressed.rs +++ b/crates/client/src/client/schema/da_compressed.rs @@ -1,237 +1,74 @@ -use super::Bytes32; use crate::client::schema::{ schema, - BlockId, - ConnectionArgs, - PageInfo, - Signature, - Tai64Timestamp, - TransactionId, - U16, U32, - U64, -}; -use fuel_core_types::{ - fuel_crypto, - fuel_types::BlockHeight, }; -#[derive(cynic::QueryVariables, Debug)] -pub struct BlockByIdArgs { - pub id: Option, -} - -#[derive(cynic::QueryFragment, Clone, Debug)] -#[cynic( - schema_path = "./assets/schema.sdl", - graphql_type = "Query", - variables = "BlockByIdArgs" -)] -pub struct BlockByIdQuery { - #[arguments(id: $id)] - pub block: Option, -} +use super::HexString; #[derive(cynic::QueryVariables, Debug)] -pub struct BlockByHeightArgs { - pub height: Option, +pub struct DaCompressedBlockByHeightArgs { + pub height: U32, } #[derive(cynic::QueryFragment, Clone, Debug)] #[cynic( schema_path = "./assets/schema.sdl", graphql_type = "Query", - variables = "BlockByHeightArgs" + variables = "DaCompressedBlockByHeightArgs" )] -pub struct BlockByHeightQuery { +pub struct DaCompressedBlockByHeightQuery { #[arguments(height: $height)] - pub block: Option, -} - -#[derive(cynic::QueryFragment, Clone, Debug)] -#[cynic( - schema_path = "./assets/schema.sdl", - graphql_type = "Query", - variables = "ConnectionArgs" -)] -pub struct BlocksQuery { - #[arguments(after: $after, before: $before, first: $first, last: $last)] - pub blocks: BlockConnection, -} - -#[derive(cynic::QueryFragment, Clone, Debug)] -#[cynic(schema_path = "./assets/schema.sdl")] -pub struct BlockConnection { - pub edges: Vec, - pub page_info: PageInfo, -} - -#[derive(cynic::QueryFragment, Clone, Debug)] -#[cynic(schema_path = "./assets/schema.sdl")] -pub struct BlockEdge { - pub cursor: String, - pub node: Block, -} - -#[derive(cynic::Enum, Clone, Debug)] -#[cynic(schema_path = "./assets/schema.sdl")] -pub enum BlockVersion { - V1, + pub da_compressed_block: Option, } /// Block with transaction ids #[derive(cynic::QueryFragment, Clone, Debug)] #[cynic(schema_path = "./assets/schema.sdl")] -pub struct Block { - pub version: BlockVersion, - pub id: BlockId, - pub header: Header, - pub consensus: Consensus, - pub transaction_ids: Vec, -} - -#[derive(cynic::QueryFragment, Clone, Debug)] -#[cynic(schema_path = "./assets/schema.sdl", graphql_type = "Block")] -pub struct BlockIdFragment { - pub id: BlockId, -} - -#[derive(cynic::QueryFragment, Clone, Debug)] -#[cynic(schema_path = "./assets/schema.sdl", graphql_type = "Block")] -pub struct BlockHeightFragment { - pub height: U32, -} - -#[derive(cynic::QueryVariables, Debug)] -pub struct ProduceBlockArgs { - pub start_timestamp: Option, - pub blocks_to_produce: U32, -} - -#[derive(cynic::QueryFragment, Clone, Debug)] -#[cynic( - schema_path = "./assets/schema.sdl", - variables = "ProduceBlockArgs", - graphql_type = "Mutation" -)] -pub struct BlockMutation { - #[arguments(blocksToProduce: $blocks_to_produce, startTimestamp: $start_timestamp)] - pub produce_blocks: U32, -} - -#[derive(cynic::Enum, Clone, Debug)] -#[cynic(schema_path = "./assets/schema.sdl")] -pub enum HeaderVersion { - V1, -} - -#[derive(cynic::QueryFragment, Clone, Debug)] -#[cynic(schema_path = "./assets/schema.sdl")] -pub struct Header { - pub version: HeaderVersion, - pub id: BlockId, - pub da_height: U64, - pub consensus_parameters_version: U32, - pub state_transition_bytecode_version: U32, - pub transactions_count: U16, - pub message_receipt_count: U32, - pub transactions_root: Bytes32, - pub message_outbox_root: Bytes32, - pub event_inbox_root: Bytes32, - pub height: U32, - pub prev_root: Bytes32, - pub time: Tai64Timestamp, - pub application_hash: Bytes32, -} - -#[derive(cynic::InlineFragments, Clone, Debug)] -#[cynic(schema_path = "./assets/schema.sdl")] -pub enum Consensus { - Genesis(Genesis), - PoAConsensus(PoAConsensus), - #[cynic(fallback)] - Unknown, -} - -#[derive(cynic::QueryFragment, Clone, Debug)] -#[cynic(schema_path = "./assets/schema.sdl")] -pub struct Genesis { - pub chain_config_hash: Bytes32, - pub coins_root: Bytes32, - pub contracts_root: Bytes32, - pub messages_root: Bytes32, - pub transactions_root: Bytes32, -} - -#[derive(cynic::QueryFragment, Clone, Debug)] -#[cynic(schema_path = "./assets/schema.sdl")] -pub struct PoAConsensus { - pub signature: Signature, -} - -impl Block { - /// Returns the block producer public key, if any. - pub fn block_producer(&self) -> Option { - let message = self.header.id.clone().into_message(); - match &self.consensus { - Consensus::Genesis(_) => Some(Default::default()), - Consensus::PoAConsensus(poa) => { - let signature = poa.signature.clone().into_signature(); - let producer_pub_key = signature.recover(&message); - producer_pub_key.ok() - } - Consensus::Unknown => None, - } - } -} - -impl From for BlockHeight { - fn from(fragment: BlockHeightFragment) -> Self { - BlockHeight::new(fragment.height.into()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn block_by_id_query_gql_output() { - use cynic::QueryBuilder; - let operation = BlockByIdQuery::build(BlockByIdArgs { - id: Some(BlockId::default()), - }); - insta::assert_snapshot!(operation.query) - } - - #[test] - fn block_by_height_query_gql_output() { - use cynic::QueryBuilder; - let operation = BlockByHeightQuery::build(BlockByHeightArgs { - height: Some(U32(0)), - }); - insta::assert_snapshot!(operation.query) - } - - #[test] - fn block_mutation_query_gql_output() { - use cynic::MutationBuilder; - let operation = BlockMutation::build(ProduceBlockArgs { - blocks_to_produce: U32(0), - start_timestamp: None, - }); - insta::assert_snapshot!(operation.query) - } - - #[test] - fn blocks_connection_query_gql_output() { - use cynic::QueryBuilder; - let operation = BlocksQuery::build(ConnectionArgs { - after: None, - before: None, - first: None, - last: None, - }); - insta::assert_snapshot!(operation.query) - } -} +pub struct DaCompressedBlock { + pub bytes: HexString, +} + +// #[cfg(test)] +// mod tests { +// use super::*; + +// #[test] +// fn block_by_id_query_gql_output() { +// use cynic::QueryBuilder; +// let operation = BlockByIdQuery::build(BlockByIdArgs { +// id: Some(BlockId::default()), +// }); +// insta::assert_snapshot!(operation.query) +// } + +// #[test] +// fn block_by_height_query_gql_output() { +// use cynic::QueryBuilder; +// let operation = BlockByHeightQuery::build(BlockByHeightArgs { +// height: Some(U32(0)), +// }); +// insta::assert_snapshot!(operation.query) +// } + +// #[test] +// fn block_mutation_query_gql_output() { +// use cynic::MutationBuilder; +// let operation = BlockMutation::build(ProduceBlockArgs { +// blocks_to_produce: U32(0), +// start_timestamp: None, +// }); +// insta::assert_snapshot!(operation.query) +// } + +// #[test] +// fn blocks_connection_query_gql_output() { +// use cynic::QueryBuilder; +// let operation = BlocksQuery::build(ConnectionArgs { +// after: None, +// before: None, +// first: None, +// last: None, +// }); +// insta::assert_snapshot!(operation.query) +// } +// } diff --git a/crates/fuel-core/src/graphql_api/worker_service.rs b/crates/fuel-core/src/graphql_api/worker_service.rs index 39829a69e02..97614c31b20 100644 --- a/crates/fuel-core/src/graphql_api/worker_service.rs +++ b/crates/fuel-core/src/graphql_api/worker_service.rs @@ -271,9 +271,23 @@ where { fn lookup( &self, - _utxo_id: fuel_core_types::fuel_tx::UtxoId, + utxo_id: fuel_core_types::fuel_tx::UtxoId, ) -> anyhow::Result { - todo!(); + for event in self.1 { + match event { + Event::CoinCreated(coin) | Event::CoinConsumed(coin) + if coin.utxo_id == utxo_id => + { + let output_index = coin.utxo_id.output_index(); + return Ok(fuel_core_types::fuel_tx::CompressedUtxoId { + tx_pointer: coin.tx_pointer, + output_index, + }); + } + _ => {} + } + } + panic!("UtxoId not found in the block events"); } } diff --git a/crates/fuel-core/src/schema/da_compressed.rs b/crates/fuel-core/src/schema/da_compressed.rs index d915a164c75..09e8d2b1c88 100644 --- a/crates/fuel-core/src/schema/da_compressed.rs +++ b/crates/fuel-core/src/schema/da_compressed.rs @@ -1,4 +1,7 @@ -use super::ReadViewProvider; +use super::{ + scalars::HexString, + ReadViewProvider, +}; use crate::{ fuel_core_graphql_api::{ IntoApiResult, @@ -24,8 +27,8 @@ impl From> for DaCompressedBlock { #[Object] impl DaCompressedBlock { - async fn bytes(&self) -> Vec { - self.bytes.clone() + async fn bytes(&self) -> HexString { + HexString(self.bytes.clone()) } } diff --git a/tests/tests/da_compression.rs b/tests/tests/da_compression.rs index 755a52ba3a4..6cf54170148 100644 --- a/tests/tests/da_compression.rs +++ b/tests/tests/da_compression.rs @@ -1,7 +1,6 @@ use fuel_core::{ combined_database::CombinedDatabase, service::{ - adapters::consensus_module::poa::block_path, Config, FuelService, }, @@ -11,9 +10,7 @@ use fuel_core_client::client::{ FuelClient, }; use fuel_core_poa::signer::SignMode; -use fuel_core_storage::transactional::AtomicView; use fuel_core_types::{ - blockchain::consensus::Consensus, fuel_crypto::SecretKey, fuel_tx::Transaction, secrecy::Secret, @@ -22,17 +19,11 @@ use rand::{ rngs::StdRng, SeedableRng, }; -use tempfile::tempdir; -use test_helpers::{ - fuel_core_driver::FuelCoreDriver, - produce_block_with_tx, -}; #[tokio::test] async fn can_get_da_compressed_blocks() { let mut rng = StdRng::seed_from_u64(10); let poa_secret = SecretKey::random(&mut rng); - let poa_public = poa_secret.public_key(); let db = CombinedDatabase::default(); let mut config = Config::local_node(); @@ -54,5 +45,6 @@ async fn can_get_da_compressed_blocks() { } }; - let block = client.get_da_compressed_block(block_height).await.unwrap(); + let block = client.da_compressed_block(block_height).await.unwrap(); + assert!(block.is_some()); } From e3246741deec3079f37f2b646b8fbfd78ad8722e Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 17 Sep 2024 12:13:38 +0200 Subject: [PATCH 039/112] Update snapshot tests --- .../client/src/client/schema/da_compressed.rs | 58 +++++-------------- crates/compression/src/ports.rs | 5 +- .../src/graphql_api/worker_service.rs | 23 ++------ 3 files changed, 19 insertions(+), 67 deletions(-) diff --git a/crates/client/src/client/schema/da_compressed.rs b/crates/client/src/client/schema/da_compressed.rs index d8fb7171423..73a131f1d32 100644 --- a/crates/client/src/client/schema/da_compressed.rs +++ b/crates/client/src/client/schema/da_compressed.rs @@ -28,47 +28,17 @@ pub struct DaCompressedBlock { pub bytes: HexString, } -// #[cfg(test)] -// mod tests { -// use super::*; - -// #[test] -// fn block_by_id_query_gql_output() { -// use cynic::QueryBuilder; -// let operation = BlockByIdQuery::build(BlockByIdArgs { -// id: Some(BlockId::default()), -// }); -// insta::assert_snapshot!(operation.query) -// } - -// #[test] -// fn block_by_height_query_gql_output() { -// use cynic::QueryBuilder; -// let operation = BlockByHeightQuery::build(BlockByHeightArgs { -// height: Some(U32(0)), -// }); -// insta::assert_snapshot!(operation.query) -// } - -// #[test] -// fn block_mutation_query_gql_output() { -// use cynic::MutationBuilder; -// let operation = BlockMutation::build(ProduceBlockArgs { -// blocks_to_produce: U32(0), -// start_timestamp: None, -// }); -// insta::assert_snapshot!(operation.query) -// } - -// #[test] -// fn blocks_connection_query_gql_output() { -// use cynic::QueryBuilder; -// let operation = BlocksQuery::build(ConnectionArgs { -// after: None, -// before: None, -// first: None, -// last: None, -// }); -// insta::assert_snapshot!(operation.query) -// } -// } +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn block_by_height_query_gql_output() { + use cynic::QueryBuilder; + let operation = + DaCompressedBlockByHeightQuery::build(DaCompressedBlockByHeightArgs { + height: U32(0), + }); + insta::assert_snapshot!(operation.query) + } +} diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index 0a55adfc195..2a372ae1c1c 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -9,10 +9,7 @@ use fuel_core_types::{ UtxoId, Word, }, - fuel_types::{ - BlockHeight, - Nonce, - }, + fuel_types::Nonce, }; use crate::tables::RegistryKeyspace; diff --git a/crates/fuel-core/src/graphql_api/worker_service.rs b/crates/fuel-core/src/graphql_api/worker_service.rs index 97614c31b20..f9ee0ea308c 100644 --- a/crates/fuel-core/src/graphql_api/worker_service.rs +++ b/crates/fuel-core/src/graphql_api/worker_service.rs @@ -1,21 +1,9 @@ -use super::{ - ports::worker, - storage::old::{ - OldFuelBlockConsensus, - OldFuelBlocks, - OldTransactions, - }, +use super::storage::old::{ + OldFuelBlockConsensus, + OldFuelBlocks, + OldTransactions, }; use crate::{ - database::{ - database_description::{ - off_chain::OffChain, - DatabaseDescription, - DatabaseHeight, - }, - metadata::MetadataTable, - Database, - }, fuel_core_graphql_api::{ ports::{ self, @@ -44,7 +32,6 @@ use crate::{ relayed_transactions::RelayedTransactionStatuses, }, }; -use async_graphql::Description; use fuel_core_compression::{ ports::{ TemporalRegistry, @@ -64,7 +51,6 @@ use fuel_core_services::{ }; use fuel_core_storage::{ not_found, - transactional::StorageTransaction, Error as StorageError, Result as StorageResult, StorageAsMut, @@ -121,7 +107,6 @@ use futures::{ }; use std::{ borrow::Cow, - fmt::Debug, ops::Deref, }; From 7e8424ecd23573ce38288f00b323d3c367a3ad60 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 17 Sep 2024 12:14:09 +0200 Subject: [PATCH 040/112] Remove some commented-out code --- crates/compression/src/ports.rs | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index 2a372ae1c1c..c663397f024 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -66,19 +66,3 @@ pub struct MessageInfo { pub amount: Word, pub data: Vec, } - -// Exposed interfaces: where should these live? - -// use fuel_core_types::blockchain::block::Block; - -// #[async_trait::async_trait] -// pub trait CompressPort { -// /// Compress the next block. -// async fn compress_next(&mut self, block: Block) -> anyhow::Result>; -// } - -// #[async_trait::async_trait] -// pub trait DecompressPort { -// /// Decompress the next block. -// async fn decompress_next(&mut self, block: Vec) -> anyhow::Result; -// } From 7f238918d13ba4ae730660fcc458a7396eecb618 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 17 Sep 2024 12:15:44 +0200 Subject: [PATCH 041/112] Update Cargo.lock --- Cargo.lock | 355 ++++++++++++++++++++++++++--------------------------- 1 file changed, 176 insertions(+), 179 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bf9c057b0b8..708fe548185 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14,19 +14,13 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ - "gimli 0.29.0", + "gimli 0.31.0", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -175,9 +169,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "4e1496f8fb1fbf272686b8d37f523dab3e4a7443300055e74cdaa449f3114356" [[package]] name = "arbitrary" @@ -187,9 +181,9 @@ checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -304,9 +298,9 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task", "concurrent-queue", @@ -449,7 +443,7 @@ dependencies = [ "futures-lite 2.3.0", "parking", "polling 3.7.3", - "rustix 0.38.35", + "rustix 0.38.37", "slab", "tracing", "windows-sys 0.59.0", @@ -499,7 +493,7 @@ dependencies = [ "cfg-if", "event-listener 3.1.0", "futures-lite 1.13.0", - "rustix 0.38.35", + "rustix 0.38.37", "windows-sys 0.48.0", ] @@ -515,7 +509,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.35", + "rustix 0.38.37", "signal-hook-registry", "slab", "windows-sys 0.59.0", @@ -523,20 +517,20 @@ dependencies = [ [[package]] name = "async-std" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" dependencies = [ "async-channel 1.9.0", "async-global-executor", - "async-io 1.13.0", - "async-lock 2.8.0", + "async-io 2.3.4", + "async-lock 3.4.0", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite 1.13.0", - "gloo-timers", + "futures-lite 2.3.0", + "gloo-timers 0.3.0", "kv-log-macro", "log", "memchr", @@ -679,9 +673,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "aws-config" -version = "1.5.5" +version = "1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e95816a168520d72c0e7680c405a5a8c1fb6a035b4bc4b9d7b0de8e1a941697" +checksum = "848d7b9b605720989929279fa644ce8f244d0ce3146fcca5b70e4eb7b3c020fc" dependencies = [ "aws-credential-types", "aws-runtime", @@ -721,9 +715,9 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2424565416eef55906f9f8cece2072b6b6a76075e3ff81483ebe938a89a4c05f" +checksum = "a10d5c055aa540164d9561a0e2e74ad30f0dcf7393c3a92f6733ddf9c5762468" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -746,9 +740,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "1.41.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "178910fefe72743b62b9c4670c14a038ebfdb265ff7feccf43827af6a8899e14" +checksum = "d9f7cb482caa5444d445c94417b9c74e49a849beb09ede4f2f4c3c15f8157387" dependencies = [ "aws-credential-types", "aws-runtime", @@ -768,9 +762,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.40.0" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5879bec6e74b648ce12f6085e7245417bc5f6d672781028384d2e494be3eb6d" +checksum = "27bf24cd0d389daa923e974b0e7c38daf308fc21e963c049f57980235017175e" dependencies = [ "aws-credential-types", "aws-runtime", @@ -790,9 +784,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.41.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ef4cd9362f638c22a3b959fd8df292e7e47fdf170270f86246b97109b5f2f7d" +checksum = "3b43b3220f1c46ac0e9dcc0a97d94b93305dacb36d1dd393996300c6b9b74364" dependencies = [ "aws-credential-types", "aws-runtime", @@ -812,9 +806,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.40.0" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b1e2735d2ab28b35ecbb5496c9d41857f52a0d6a0075bbf6a8af306045ea6f6" +checksum = "d1c46924fb1add65bba55636e12812cae2febf68c0f37361766f627ddcca91ce" dependencies = [ "aws-credential-types", "aws-runtime", @@ -835,9 +829,9 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.2.3" +version = "1.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5df1b0fa6be58efe9d4ccc257df0a53b89cd8909e86591a13ca54817c87517be" +checksum = "cc8db6904450bafe7473c6ca9123f88cc11089e41a025408f992db4e22d3be68" dependencies = [ "aws-credential-types", "aws-smithy-http", @@ -869,9 +863,9 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.60.10" +version = "0.60.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01dbcb6e2588fd64cfb6d7529661b06466419e4c54ed1c62d6510d2d0350a728" +checksum = "5c8bc3e8fdc6b8d07d976e301c02fe553f72a39b7a9fea820e023268467d7ab6" dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", @@ -952,9 +946,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.2.4" +version = "1.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "273dcdfd762fae3e1650b8024624e7cd50e484e37abdab73a7a706188ad34543" +checksum = "03701449087215b5369c7ea17fef0dd5d24cb93439ec5af0c7615f58c3f22605" dependencies = [ "base64-simd", "bytes", @@ -978,9 +972,9 @@ dependencies = [ [[package]] name = "aws-smithy-xml" -version = "0.60.8" +version = "0.60.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d123fbc2a4adc3c301652ba8e149bf4bc1d1725affb9784eb20c953ace06bf55" +checksum = "ab0b0166827aa700d3dc519f72f8b3a91c35d0b8d042dc5d643a91e6f80648fc" dependencies = [ "xmlparser", ] @@ -1048,18 +1042,18 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", - "miniz_oxide 0.7.4", + "miniz_oxide", "object", "rustc-demangle", "serde", + "windows-targets 0.52.6", ] [[package]] @@ -1351,9 +1345,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.15" +version = "1.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57b6a275aa2903740dc87da01c62040406b8812552e97129a63ea8850a17c6e6" +checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" dependencies = [ "jobserver", "libc", @@ -1486,9 +1480,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.16" +version = "4.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" +checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" dependencies = [ "clap_builder", "clap_derive 4.5.13", @@ -1496,9 +1490,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.15" +version = "4.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" +checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" dependencies = [ "anstream", "anstyle", @@ -1686,18 +1680,18 @@ dependencies = [ [[package]] name = "const_format" -version = "0.2.32" +version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" +checksum = "50c655d81ff1114fb0dcdea9225ea9f0cc712a6f8d189378e82bdf62a473a64b" dependencies = [ "const_format_proc_macros", ] [[package]] name = "const_format_proc_macros" -version = "0.2.32" +version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" +checksum = "eff1a44b93f47b1bac19a27932f5c591e43d1ba357ee4f61526c8a25603f0eb1" dependencies = [ "proc-macro2", "quote", @@ -1789,9 +1783,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -1936,7 +1930,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.16", + "clap 4.5.17", "criterion-plot", "futures", "is-terminal", @@ -2594,11 +2588,11 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.77", @@ -3103,7 +3097,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -3148,8 +3142,8 @@ dependencies = [ [[package]] name = "fuel-asm" -version = "0.56.0" -source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" +version = "0.57.0" +source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" dependencies = [ "bitflags 2.6.0", "fuel-types", @@ -3159,8 +3153,8 @@ dependencies = [ [[package]] name = "fuel-compression" -version = "0.56.0" -source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" +version = "0.57.0" +source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" dependencies = [ "fuel-derive", "fuel-types", @@ -3176,7 +3170,7 @@ dependencies = [ "async-graphql", "async-trait", "axum", - "clap 4.5.16", + "clap 4.5.17", "derive_more", "enum-iterator", "fuel-core", @@ -3233,7 +3227,7 @@ version = "0.0.0" dependencies = [ "anyhow", "async-trait", - "clap 4.5.16", + "clap 4.5.17", "criterion", "ctrlc", "ed25519-dalek", @@ -3270,7 +3264,7 @@ dependencies = [ "anyhow", "aws-config", "aws-sdk-kms", - "clap 4.5.16", + "clap 4.5.17", "const_format", "dirs 4.0.0", "dotenvy", @@ -3350,7 +3344,7 @@ dependencies = [ name = "fuel-core-client-bin" version = "0.35.0" dependencies = [ - "clap 4.5.16", + "clap 4.5.17", "fuel-core-client", "fuel-core-types", "serde_json", @@ -3483,7 +3477,7 @@ name = "fuel-core-keygen" version = "0.35.0" dependencies = [ "anyhow", - "clap 4.5.16", + "clap 4.5.17", "fuel-core-types", "libp2p-identity", "serde", @@ -3495,7 +3489,7 @@ version = "0.35.0" dependencies = [ "anyhow", "atty", - "clap 4.5.16", + "clap 4.5.17", "crossterm", "fuel-core-keygen", "serde_json", @@ -3692,7 +3686,7 @@ dependencies = [ "async-trait", "aws-config", "aws-sdk-kms", - "clap 4.5.16", + "clap 4.5.17", "cynic", "ethers", "fuel-core", @@ -3814,8 +3808,8 @@ dependencies = [ [[package]] name = "fuel-crypto" -version = "0.56.0" -source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" +version = "0.57.0" +source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" dependencies = [ "coins-bip32", "coins-bip39", @@ -3834,8 +3828,8 @@ dependencies = [ [[package]] name = "fuel-derive" -version = "0.56.0" -source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" +version = "0.57.0" +source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" dependencies = [ "proc-macro2", "quote", @@ -3854,8 +3848,8 @@ dependencies = [ [[package]] name = "fuel-merkle" -version = "0.56.0" -source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" +version = "0.57.0" +source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" dependencies = [ "derive_more", "digest 0.10.7", @@ -3868,13 +3862,13 @@ dependencies = [ [[package]] name = "fuel-storage" -version = "0.56.0" -source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" +version = "0.57.0" +source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" [[package]] name = "fuel-tx" -version = "0.56.0" -source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" +version = "0.57.0" +source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" dependencies = [ "bitflags 2.6.0", "derivative", @@ -3896,8 +3890,8 @@ dependencies = [ [[package]] name = "fuel-types" -version = "0.56.0" -source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" +version = "0.57.0" +source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" dependencies = [ "fuel-derive", "hex", @@ -3907,8 +3901,8 @@ dependencies = [ [[package]] name = "fuel-vm" -version = "0.56.0" -source = "git+https://github.com/FuelLabs/fuel-vm#d28a1431030b5980c73cdf1b96838fb672b81800" +version = "0.57.0" +source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" dependencies = [ "anyhow", "async-trait", @@ -4060,7 +4054,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", ] @@ -4093,7 +4087,7 @@ version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" dependencies = [ - "gloo-timers", + "gloo-timers 0.2.6", "send_wrapper 0.4.0", ] @@ -4169,9 +4163,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "glob" @@ -4191,6 +4185,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "graphql-parser" version = "0.4.0" @@ -4823,9 +4829,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.39.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "810ae6042d48e2c9e9215043563a58a80b877bc863228a74cf10c49d4620a6f5" +checksum = "6593a41c7a73841868772495db7dc1e8ecab43bb5c0b6da2059246c4b506ab60" dependencies = [ "console", "lazy_static", @@ -4879,9 +4885,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "is-terminal" @@ -5417,7 +5423,7 @@ dependencies = [ "quinn", "rand", "ring 0.17.8", - "rustls 0.23.12", + "rustls 0.23.13", "socket2 0.5.7", "thiserror", "tokio", @@ -5530,7 +5536,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -5680,7 +5686,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d8de370f98a6cb8a4606618e53e802f93b094ddec0f96988eaec2c27e6e9ce7" dependencies = [ - "clap 4.5.16", + "clap 4.5.17", "termcolor", "threadpool", ] @@ -5753,9 +5759,9 @@ dependencies = [ [[package]] name = "lz4-sys" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109de74d5d2353660401699a4174a4ff23fcc649caf553df71933c7fb45ad868" +checksum = "fcb44a01837a858d47e5a630d2ccf304c8efcc4b83b8f9f75b7a9ee4fcc6e57d" dependencies = [ "cc", "libc", @@ -5813,14 +5819,14 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2cffa4ad52c6f791f4f8b15f0c05f9824b2ced1160e88cc393d64fff9a8ac64" dependencies = [ - "rustix 0.38.35", + "rustix 0.38.37", ] [[package]] name = "memmap2" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" dependencies = [ "libc", ] @@ -5837,15 +5843,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -6304,9 +6301,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "33ea5043e58958ee56f3e15a90aee535795cd7dfd319846288d93c5b57d85cbe" [[package]] name = "oorandom" @@ -6424,9 +6421,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -6446,7 +6443,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.4", "smallvec", "windows-targets 0.52.6", ] @@ -6548,9 +6545,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.11" +version = "2.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +checksum = "9c73c26c01b8c87956cea613c907c9d6ecffd8d18a2a5908e5de0adfaa185cea" dependencies = [ "memchr", "thiserror", @@ -6689,9 +6686,9 @@ checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "plotters" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", @@ -6702,15 +6699,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] @@ -6741,7 +6738,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.35", + "rustix 0.38.37", "tracing", "windows-sys 0.59.0", ] @@ -7029,9 +7026,9 @@ checksum = "33cb294fe86a74cbcf50d4445b37da762029549ebeea341421c7c70370f86cac" [[package]] name = "psm" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b1f9bf148c15500d44581654fb9260bc9d82970f3ef777a79a40534f6aa784f" +checksum = "aa37f80ca58604976033fae9515a8a2989fc13797d953f7c04fb8fa36a11f205" dependencies = [ "cc", ] @@ -7134,9 +7131,9 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2d2fb862b7ba45e615c1429def928f2e15f815bdf933b27a2d3824e224c1f46" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ "bytes", "futures-io", @@ -7144,7 +7141,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "socket2 0.5.7", "thiserror", "tokio", @@ -7153,15 +7150,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.7" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0a9b3a42929fad8a7c3de7f86ce0814cfa893328157672680e9fb1145549c5" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", "rand", "ring 0.17.8", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "slab", "thiserror", "tinyvec", @@ -7170,15 +7167,15 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" +checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", "once_cell", "socket2 0.5.7", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -7287,9 +7284,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags 2.6.0", ] @@ -7613,9 +7610,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.35" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a85d50532239da68e9addb745ba38ff4612a242c1c7ceea689c4bc7c2f43c36f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -7638,14 +7635,14 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "once_cell", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -7689,9 +7686,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.7" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -7777,11 +7774,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -7854,9 +7851,9 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.26.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4124a35fe33ae14259c490fd70fa199a32b9ce9502f2ee6bc4f81ec06fa65894" +checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" dependencies = [ "rand", "secp256k1-sys", @@ -7864,9 +7861,9 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.8.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" dependencies = [ "cc", ] @@ -7932,18 +7929,18 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -7952,9 +7949,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", @@ -8448,9 +8445,9 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "12.10.1" +version = "12.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1944ea8afd197111bca0c0edea1e1f56abb3edd030e240c1035cc0e3ff51fec" +checksum = "9c1db5ac243c7d7f8439eb3b8f0357888b37cf3732957e91383b0ad61756374e" dependencies = [ "debugid", "memmap2", @@ -8460,9 +8457,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.10.1" +version = "12.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddaccaf1bf8e73c4f64f78dbb30aadd6965c71faa4ff3fba33f8d7296cf94a87" +checksum = "ea26e430c27d4a8a5dea4c4b81440606c7c1a415bd611451ef6af8c81416afc3" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -8559,7 +8556,7 @@ dependencies = [ "cfg-if", "fastrand 2.1.1", "once_cell", - "rustix 0.38.35", + "rustix 0.38.37", "windows-sys 0.59.0", ] @@ -8639,7 +8636,7 @@ name = "test-helpers" version = "0.0.0" dependencies = [ "anyhow", - "clap 4.5.16", + "clap 4.5.17", "fuel-core", "fuel-core-bin", "fuel-core-client", @@ -8872,9 +8869,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -8899,9 +8896,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -9175,9 +9172,9 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" @@ -9502,7 +9499,7 @@ dependencies = [ "postcard", "psm", "rayon", - "rustix 0.38.35", + "rustix 0.38.37", "serde", "serde_derive", "smallvec", @@ -9540,7 +9537,7 @@ dependencies = [ "directories-next", "log", "postcard", - "rustix 0.38.35", + "rustix 0.38.37", "serde", "serde_derive", "sha2 0.10.8", @@ -10007,9 +10004,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.21" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a77ee7c0de333dcc6da69b177380a0b81e0dacfa4f7344c465a36871ee601" +checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" [[package]] name = "xmlparser" @@ -10030,7 +10027,7 @@ dependencies = [ name = "xtask" version = "0.0.0" dependencies = [ - "clap 4.5.16", + "clap 4.5.17", "fuel-core", ] From 070454ecf2ccf76a9caef090ae2dad07707420b6 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 17 Sep 2024 12:26:21 +0200 Subject: [PATCH 042/112] cargo sort --- crates/compression/Cargo.toml | 2 +- crates/fuel-core/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 149e2dbbb79..509f878c2be 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -15,9 +15,9 @@ anyhow = { workspace = true } async-trait = { workspace = true } fuel-core-types = { workspace = true, features = ["serde", "da-compression"] } postcard = { version = "1.0", features = ["use-std"] } +rand = { workspace = true, optional = true } serde = { version = "1.0", features = ["derive"] } thiserror = { workspace = true } -rand = { workspace = true, optional = true} [dev-dependencies] bimap = { version = "0.6" } diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index 56de99ba502..6147cede730 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -22,8 +22,8 @@ clap = { workspace = true, features = ["derive"] } derive_more = { version = "0.99" } enum-iterator = { workspace = true } fuel-core-chain-config = { workspace = true, features = ["std"] } -fuel-core-consensus-module = { workspace = true } fuel-core-compression = { workspace = true } +fuel-core-consensus-module = { workspace = true } fuel-core-database = { workspace = true } fuel-core-executor = { workspace = true, features = ["std"] } fuel-core-gas-price-service = { workspace = true } From 350d9dfe7155fdefec94d097bf8a73d6e3f0006a Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 18 Sep 2024 19:27:25 +0300 Subject: [PATCH 043/112] Prettify Cargo.toml --- crates/compression/Cargo.toml | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 509f878c2be..6186060d433 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -5,7 +5,13 @@ authors = { workspace = true } categories = ["cryptography::cryptocurrencies"] edition = { workspace = true } homepage = { workspace = true } -keywords = ["blockchain", "cryptocurrencies", "fuel-core", "fuel-client", "fuel-compression"] +keywords = [ + "blockchain", + "cryptocurrencies", + "fuel-core", + "fuel-client", + "fuel-compression", +] license = { workspace = true } repository = { workspace = true } description = "Compression and decompression of Fuel blocks for DA storage." @@ -27,4 +33,9 @@ tempfile = "3" tokio = { workspace = true, features = ["sync"] } [features] -test-helpers = ["dep:rand", "fuel-core-types/test-helpers", "fuel-core-types/random", "fuel-core-types/std"] +test-helpers = [ + "dep:rand", + "fuel-core-types/test-helpers", + "fuel-core-types/random", + "fuel-core-types/std", +] From 020656a7eed3095c103a951bdc0661bc42786454 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 18 Sep 2024 19:36:17 +0300 Subject: [PATCH 044/112] Fix types after merge --- Cargo.lock | 314 +++++++++++++----- ...ts__snapshot_local_testnet_config.snap.new | 312 +++++++++++++++++ crates/compression/src/context/decompress.rs | 34 +- crates/compression/src/ports.rs | 6 +- crates/compression/src/services/decompress.rs | 2 +- crates/fuel-core/src/query/message/test.rs | 2 - tests/tests/da_compression.rs | 2 +- 7 files changed, 567 insertions(+), 105 deletions(-) create mode 100644 crates/chain-config/src/config/snapshots/fuel_core_chain_config__config__chain__tests__snapshot_local_testnet_config.snap.new diff --git a/Cargo.lock b/Cargo.lock index b7d87a23677..3f5e28030a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3140,13 +3140,26 @@ dependencies = [ "winapi", ] +[[package]] +name = "fuel-asm" +version = "0.56.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "122c27ab46707017063bf1c6e0b4f3de881e22e81b4059750a0dc95033d9cc26" +dependencies = [ + "bitflags 2.6.0", + "fuel-types 0.56.0", + "serde", + "strum 0.24.1", +] + [[package]] name = "fuel-asm" version = "0.57.0" -source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b29ea55a794c00d0dfaad06f11720a05fa928603f812dca1c38163f2b240860a" dependencies = [ "bitflags 2.6.0", - "fuel-types", + "fuel-types 0.57.0", "serde", "strum 0.24.1", ] @@ -3154,16 +3167,17 @@ dependencies = [ [[package]] name = "fuel-compression" version = "0.57.0" -source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ddebdc0c7440995bd89ae62bc5bbc7196ffd8dcff8cad78ffc55d5b2744e4d8" dependencies = [ - "fuel-derive", - "fuel-types", + "fuel-derive 0.57.0", + "fuel-types 0.57.0", "serde", ] [[package]] name = "fuel-core" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "assert_matches", @@ -3191,7 +3205,7 @@ dependencies = [ "fuel-core-sync", "fuel-core-trace", "fuel-core-txpool", - "fuel-core-types", + "fuel-core-types 0.36.0", "fuel-core-upgradable-executor", "futures", "hex", @@ -3239,7 +3253,7 @@ dependencies = [ "fuel-core-services", "fuel-core-storage", "fuel-core-sync", - "fuel-core-types", + "fuel-core-types 0.36.0", "futures", "itertools 0.12.1", "num_enum", @@ -3260,11 +3274,11 @@ dependencies = [ [[package]] name = "fuel-core-bft" -version = "0.35.0" +version = "0.36.0" [[package]] name = "fuel-core-bin" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "aws-config", @@ -3277,7 +3291,7 @@ dependencies = [ "fuel-core-chain-config", "fuel-core-poa", "fuel-core-storage", - "fuel-core-types", + "fuel-core-types 0.36.0", "hex", "humantime", "itertools 0.12.1", @@ -3298,7 +3312,7 @@ dependencies = [ [[package]] name = "fuel-core-chain-config" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "bech32", @@ -3306,7 +3320,7 @@ dependencies = [ "derivative", "fuel-core-chain-config", "fuel-core-storage", - "fuel-core-types", + "fuel-core-types 0.36.0", "insta", "itertools 0.12.1", "parquet", @@ -3324,13 +3338,13 @@ dependencies = [ [[package]] name = "fuel-core-client" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "cynic", "derive_more", "eventsource-client", - "fuel-core-types", + "fuel-core-types 0.36.0", "futures", "hex", "hyper-rustls", @@ -3347,25 +3361,25 @@ dependencies = [ [[package]] name = "fuel-core-client-bin" -version = "0.35.0" +version = "0.36.0" dependencies = [ "clap 4.5.17", "fuel-core-client", - "fuel-core-types", + "fuel-core-types 0.36.0", "serde_json", "tokio", ] [[package]] name = "fuel-core-compression" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "async-trait", "bimap", "bincode", "fuel-core-compression", - "fuel-core-types", + "fuel-core-types 0.36.0", "postcard", "rand", "serde", @@ -3376,39 +3390,38 @@ dependencies = [ [[package]] name = "fuel-core-consensus-module" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "fuel-core-chain-config", "fuel-core-poa", "fuel-core-storage", - "fuel-core-types", + "fuel-core-types 0.36.0", "test-case", ] [[package]] name = "fuel-core-database" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "derive_more", "fuel-core-storage", "fuel-core-trace", - "fuel-core-types", + "fuel-core-types 0.36.0", ] [[package]] name = "fuel-core-e2e-client" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "assert_cmd", "fuel-core", "fuel-core-chain-config", "fuel-core-client", - "fuel-core-e2e-client", "fuel-core-trace", - "fuel-core-types", + "fuel-core-types 0.36.0", "futures", "hex", "humantime-serde", @@ -3425,12 +3438,12 @@ dependencies = [ [[package]] name = "fuel-core-executor" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "fuel-core-storage", "fuel-core-trace", - "fuel-core-types", + "fuel-core-types 0.36.0", "hex", "parking_lot", "serde", @@ -3439,14 +3452,14 @@ dependencies = [ [[package]] name = "fuel-core-gas-price-service" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "async-trait", "enum-iterator", "fuel-core-services", "fuel-core-storage", - "fuel-core-types", + "fuel-core-types 0.36.0", "fuel-gas-price-algorithm", "futures", "num_enum", @@ -3462,14 +3475,14 @@ dependencies = [ [[package]] name = "fuel-core-importer" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "derive_more", "fuel-core-metrics", "fuel-core-storage", "fuel-core-trace", - "fuel-core-types", + "fuel-core-types 0.36.0", "mockall", "parking_lot", "rayon", @@ -3480,18 +3493,18 @@ dependencies = [ [[package]] name = "fuel-core-keygen" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "clap 4.5.17", - "fuel-core-types", + "fuel-core-types 0.36.0", "libp2p-identity", "serde", ] [[package]] name = "fuel-core-keygen-bin" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "atty", @@ -3504,7 +3517,7 @@ dependencies = [ [[package]] name = "fuel-core-metrics" -version = "0.35.0" +version = "0.36.0" dependencies = [ "parking_lot", "pin-project-lite", @@ -3516,7 +3529,7 @@ dependencies = [ [[package]] name = "fuel-core-p2p" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "async-trait", @@ -3527,9 +3540,10 @@ dependencies = [ "fuel-core-services", "fuel-core-storage", "fuel-core-trace", - "fuel-core-types", + "fuel-core-types 0.36.0", "futures", "hex", + "hickory-resolver", "ip_network", "libp2p", "libp2p-mplex", @@ -3553,7 +3567,7 @@ dependencies = [ [[package]] name = "fuel-core-poa" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "async-trait", @@ -3563,7 +3577,7 @@ dependencies = [ "fuel-core-poa", "fuel-core-services", "fuel-core-storage", - "fuel-core-types", + "fuel-core-types 0.36.0", "k256", "mockall", "rand", @@ -3577,7 +3591,7 @@ dependencies = [ [[package]] name = "fuel-core-producer" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "async-trait", @@ -3585,7 +3599,7 @@ dependencies = [ "fuel-core-producer", "fuel-core-storage", "fuel-core-trace", - "fuel-core-types", + "fuel-core-types 0.36.0", "mockall", "proptest", "rand", @@ -3596,7 +3610,7 @@ dependencies = [ [[package]] name = "fuel-core-relayer" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "async-trait", @@ -3610,7 +3624,7 @@ dependencies = [ "fuel-core-services", "fuel-core-storage", "fuel-core-trace", - "fuel-core-types", + "fuel-core-types 0.36.0", "futures", "mockall", "once_cell", @@ -3629,7 +3643,7 @@ dependencies = [ [[package]] name = "fuel-core-services" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "async-trait", @@ -3643,14 +3657,14 @@ dependencies = [ [[package]] name = "fuel-core-storage" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "derive_more", "enum-iterator", "fuel-core-storage", - "fuel-core-types", - "fuel-vm", + "fuel-core-types 0.36.0", + "fuel-vm 0.57.0", "impl-tools", "itertools 0.12.1", "mockall", @@ -3667,13 +3681,13 @@ dependencies = [ [[package]] name = "fuel-core-sync" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "async-trait", "fuel-core-services", "fuel-core-trace", - "fuel-core-types", + "fuel-core-types 0.36.0", "futures", "mockall", "rand", @@ -3707,7 +3721,7 @@ dependencies = [ "fuel-core-storage", "fuel-core-trace", "fuel-core-txpool", - "fuel-core-types", + "fuel-core-types 0.36.0", "fuel-core-upgradable-executor", "futures", "hex", @@ -3732,7 +3746,7 @@ dependencies = [ [[package]] name = "fuel-core-trace" -version = "0.35.0" +version = "0.36.0" dependencies = [ "ctor", "tracing", @@ -3742,7 +3756,7 @@ dependencies = [ [[package]] name = "fuel-core-txpool" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "async-trait", @@ -3752,7 +3766,7 @@ dependencies = [ "fuel-core-storage", "fuel-core-trace", "fuel-core-txpool", - "fuel-core-types", + "fuel-core-types 0.36.0", "itertools 0.12.1", "mockall", "num-rational", @@ -3769,12 +3783,28 @@ dependencies = [ [[package]] name = "fuel-core-types" version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84fda0c6dc7b3bd24a993b3902f55862b8db0fa6de5b0f1d45f5942bc59792eb" +dependencies = [ + "anyhow", + "derivative", + "derive_more", + "fuel-vm 0.56.0", + "secrecy", + "serde", + "tai64", + "zeroize", +] + +[[package]] +name = "fuel-core-types" +version = "0.36.0" dependencies = [ "anyhow", "bs58", "derivative", "derive_more", - "fuel-vm", + "fuel-vm 0.57.0", "rand", "secrecy", "serde", @@ -3784,13 +3814,13 @@ dependencies = [ [[package]] name = "fuel-core-upgradable-executor" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "derive_more", "fuel-core-executor", "fuel-core-storage", - "fuel-core-types", + "fuel-core-types 0.36.0", "fuel-core-wasm-executor", "ntest", "parking_lot", @@ -3801,27 +3831,46 @@ dependencies = [ [[package]] name = "fuel-core-wasm-executor" -version = "0.35.0" +version = "0.36.0" dependencies = [ "anyhow", "fuel-core-executor", "fuel-core-storage", - "fuel-core-types", + "fuel-core-types 0.35.0", + "fuel-core-types 0.36.0", "postcard", "proptest", "serde", + "serde_json", +] + +[[package]] +name = "fuel-crypto" +version = "0.56.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33548590131674e8f272a3e056be4dbaa1de7cb364eab2b17987cd5c0dc31cb0" +dependencies = [ + "ecdsa", + "ed25519-dalek", + "fuel-types 0.56.0", + "k256", + "p256", + "serde", + "sha2 0.10.8", + "zeroize", ] [[package]] name = "fuel-crypto" version = "0.57.0" -source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2661b2a6c43e811be4892250513a6f4c46a69cc7092a1e5b240f49697f08292e" dependencies = [ "coins-bip32", "coins-bip39", "ecdsa", "ed25519-dalek", - "fuel-types", + "fuel-types 0.57.0", "k256", "lazy_static", "p256", @@ -3832,10 +3881,23 @@ dependencies = [ "zeroize", ] +[[package]] +name = "fuel-derive" +version = "0.56.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f49fdbfc1615d88d2849650afc2b0ac2fecd69661ebadd31a073d8416747764" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", + "synstructure", +] + [[package]] name = "fuel-derive" version = "0.57.0" -source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03509567813a351ca60d8507b2ac476b06c1590f2e9edbe72bc205bb04e0af12" dependencies = [ "proc-macro2", "quote", @@ -3845,7 +3907,7 @@ dependencies = [ [[package]] name = "fuel-gas-price-algorithm" -version = "0.35.0" +version = "0.36.0" dependencies = [ "proptest", "rand", @@ -3853,38 +3915,84 @@ dependencies = [ "thiserror", ] +[[package]] +name = "fuel-merkle" +version = "0.56.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf17ce8ee5e8b573ea584c223635ff09f1288ad022bcf662954fdccb907602eb" +dependencies = [ + "derive_more", + "digest 0.10.7", + "fuel-storage 0.56.0", + "hashbrown 0.13.2", + "hex", + "serde", + "sha2 0.10.8", +] + [[package]] name = "fuel-merkle" version = "0.57.0" -source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24938ee8a5e9efe71994203527dffb4c81872aa2953de0c347ad38696527b58a" dependencies = [ "derive_more", "digest 0.10.7", - "fuel-storage", + "fuel-storage 0.57.0", "hashbrown 0.13.2", "hex", "serde", "sha2 0.10.8", ] +[[package]] +name = "fuel-storage" +version = "0.56.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c1b711f28553ddc5f3546711bd220e144ce4c1af7d9e9a1f70b2f20d9f5b791" + [[package]] name = "fuel-storage" version = "0.57.0" -source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4283f9cabc26a1154a31268e79de1e0f317d57231b4dc8d7282efb22e49d2ed3" + +[[package]] +name = "fuel-tx" +version = "0.56.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13aae44611588d199dd119e4a0ebd8eb7ae4cde6bf8b4d12715610b1f5e5b731" +dependencies = [ + "bitflags 2.6.0", + "derivative", + "derive_more", + "fuel-asm 0.56.0", + "fuel-crypto 0.56.0", + "fuel-merkle 0.56.0", + "fuel-types 0.56.0", + "hashbrown 0.14.5", + "itertools 0.10.5", + "postcard", + "serde", + "serde_json", + "strum 0.24.1", + "strum_macros 0.24.3", +] [[package]] name = "fuel-tx" version = "0.57.0" -source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "572f9e8fdda6abfe83cf1456a11eabf1de66d682176fb097f2f950704cc50c26" dependencies = [ "bitflags 2.6.0", "derivative", "derive_more", - "fuel-asm", + "fuel-asm 0.57.0", "fuel-compression", - "fuel-crypto", - "fuel-merkle", - "fuel-types", + "fuel-crypto 0.57.0", + "fuel-merkle 0.57.0", + "fuel-types 0.57.0", "hashbrown 0.14.5", "itertools 0.10.5", "postcard", @@ -3895,21 +4003,65 @@ dependencies = [ "strum_macros 0.24.3", ] +[[package]] +name = "fuel-types" +version = "0.56.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b6fb26bcb408b6897e603f68cf60bbbaf6d15381c99f54a69ea743a58235ac1" +dependencies = [ + "fuel-derive 0.56.0", + "hex", + "serde", +] + [[package]] name = "fuel-types" version = "0.57.0" -source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f196060a10db0293cdfca455f7e2f3a7914f46f25e0fbc2d28cf0a11e835a86" dependencies = [ - "fuel-derive", + "fuel-derive 0.57.0", "hex", "rand", "serde", ] +[[package]] +name = "fuel-vm" +version = "0.56.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64fc4695efac9207276f6229f2dd9811848b328a13604a698f7bce1d452bd986" +dependencies = [ + "async-trait", + "backtrace", + "bitflags 2.6.0", + "derivative", + "derive_more", + "ethnum", + "fuel-asm 0.56.0", + "fuel-crypto 0.56.0", + "fuel-merkle 0.56.0", + "fuel-storage 0.56.0", + "fuel-tx 0.56.0", + "fuel-types 0.56.0", + "hashbrown 0.14.5", + "itertools 0.10.5", + "libm", + "paste", + "percent-encoding", + "primitive-types", + "serde", + "serde_with", + "sha3", + "static_assertions", + "strum 0.24.1", +] + [[package]] name = "fuel-vm" version = "0.57.0" -source = "git+https://github.com/FuelLabs/fuel-vm#4718845c4ba9e74585a70c19e24cacc3543b244b" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6f4e0cc4ae65d00df6f3dcae90b81dd21135b45b932a79e368f35d255df12a1" dependencies = [ "anyhow", "async-trait", @@ -3918,13 +4070,13 @@ dependencies = [ "derivative", "derive_more", "ethnum", - "fuel-asm", + "fuel-asm 0.57.0", "fuel-compression", - "fuel-crypto", - "fuel-merkle", - "fuel-storage", - "fuel-tx", - "fuel-types", + "fuel-crypto 0.57.0", + "fuel-merkle 0.57.0", + "fuel-storage 0.57.0", + "fuel-tx 0.57.0", + "fuel-types 0.57.0", "hashbrown 0.14.5", "itertools 0.10.5", "libm", @@ -8653,7 +8805,7 @@ dependencies = [ "fuel-core-storage", "fuel-core-trace", "fuel-core-txpool", - "fuel-core-types", + "fuel-core-types 0.36.0", "futures", "itertools 0.12.1", "rand", diff --git a/crates/chain-config/src/config/snapshots/fuel_core_chain_config__config__chain__tests__snapshot_local_testnet_config.snap.new b/crates/chain-config/src/config/snapshots/fuel_core_chain_config__config__chain__tests__snapshot_local_testnet_config.snap.new new file mode 100644 index 00000000000..40a3d735a62 --- /dev/null +++ b/crates/chain-config/src/config/snapshots/fuel_core_chain_config__config__chain__tests__snapshot_local_testnet_config.snap.new @@ -0,0 +1,312 @@ +--- +source: crates/chain-config/src/config/chain.rs +assertion_line: 162 +expression: json +--- +{ + "chain_name": "local_testnet", + "consensus_parameters": { + "V2": { + "tx_params": { + "V1": { + "max_inputs": 255, + "max_outputs": 255, + "max_witnesses": 255, + "max_gas_per_tx": 100000000, + "max_size": 112640, + "max_bytecode_subsections": 255 + } + }, + "predicate_params": { + "V1": { + "max_predicate_length": 1048576, + "max_predicate_data_length": 1048576, + "max_message_data_length": 1048576, + "max_gas_per_predicate": 100000000 + } + }, + "script_params": { + "V1": { + "max_script_length": 1048576, + "max_script_data_length": 1048576 + } + }, + "contract_params": { + "V1": { + "contract_max_size": 102400, + "max_storage_slots": 255 + } + }, + "fee_params": { + "V1": { + "gas_price_factor": 1000000000, + "gas_per_byte": 4 + } + }, + "chain_id": 0, + "gas_costs": { + "V4": { + "add": 1, + "addi": 1, + "and": 1, + "andi": 1, + "bal": 13, + "bhei": 1, + "bhsh": 1, + "burn": 132, + "cb": 1, + "cfsi": 1, + "div": 1, + "divi": 1, + "eck1": 951, + "ecr1": 3000, + "eq": 1, + "exp": 1, + "expi": 1, + "flag": 1, + "gm": 1, + "gt": 1, + "gtf": 1, + "ji": 1, + "jmp": 1, + "jne": 1, + "jnei": 1, + "jnzi": 1, + "jmpf": 1, + "jmpb": 1, + "jnzf": 1, + "jnzb": 1, + "jnef": 1, + "jneb": 1, + "lb": 1, + "log": 9, + "lt": 1, + "lw": 1, + "mint": 135, + "mlog": 1, + "mod": 1, + "modi": 1, + "move": 1, + "movi": 1, + "mroo": 2, + "mul": 1, + "muli": 1, + "mldv": 1, + "noop": 1, + "not": 1, + "or": 1, + "ori": 1, + "poph": 2, + "popl": 2, + "pshh": 2, + "pshl": 2, + "ret_contract": 13, + "rvrt_contract": 13, + "sb": 1, + "sll": 1, + "slli": 1, + "srl": 1, + "srli": 1, + "srw": 12, + "sub": 1, + "subi": 1, + "sw": 1, + "sww": 67, + "time": 1, + "tr": 105, + "tro": 60, + "wdcm": 1, + "wqcm": 1, + "wdop": 1, + "wqop": 1, + "wdml": 1, + "wqml": 1, + "wddv": 1, + "wqdv": 2, + "wdmd": 3, + "wqmd": 4, + "wdam": 2, + "wqam": 3, + "wdmm": 3, + "wqmm": 3, + "xor": 1, + "xori": 1, + "aloc": { + "LightOperation": { + "base": 2, + "units_per_gas": 214 + } + }, + "bsiz": { + "LightOperation": { + "base": 17, + "units_per_gas": 790 + } + }, + "bldd": { + "LightOperation": { + "base": 15, + "units_per_gas": 272 + } + }, + "cfe": { + "LightOperation": { + "base": 2, + "units_per_gas": 214 + } + }, + "cfei": { + "LightOperation": { + "base": 2, + "units_per_gas": 214 + } + }, + "call": { + "LightOperation": { + "base": 144, + "units_per_gas": 214 + } + }, + "ccp": { + "LightOperation": { + "base": 15, + "units_per_gas": 103 + } + }, + "croo": { + "LightOperation": { + "base": 1, + "units_per_gas": 1 + } + }, + "csiz": { + "LightOperation": { + "base": 17, + "units_per_gas": 790 + } + }, + "ed19": { + "LightOperation": { + "base": 3000, + "units_per_gas": 214 + } + }, + "k256": { + "LightOperation": { + "base": 11, + "units_per_gas": 214 + } + }, + "ldc": { + "LightOperation": { + "base": 15, + "units_per_gas": 272 + } + }, + "logd": { + "LightOperation": { + "base": 26, + "units_per_gas": 64 + } + }, + "mcl": { + "LightOperation": { + "base": 1, + "units_per_gas": 3333 + } + }, + "mcli": { + "LightOperation": { + "base": 1, + "units_per_gas": 3333 + } + }, + "mcp": { + "LightOperation": { + "base": 1, + "units_per_gas": 2000 + } + }, + "mcpi": { + "LightOperation": { + "base": 3, + "units_per_gas": 2000 + } + }, + "meq": { + "LightOperation": { + "base": 1, + "units_per_gas": 2500 + } + }, + "retd_contract": { + "LightOperation": { + "base": 29, + "units_per_gas": 62 + } + }, + "s256": { + "LightOperation": { + "base": 2, + "units_per_gas": 214 + } + }, + "scwq": { + "LightOperation": { + "base": 13, + "units_per_gas": 5 + } + }, + "smo": { + "LightOperation": { + "base": 209, + "units_per_gas": 55 + } + }, + "srwq": { + "LightOperation": { + "base": 47, + "units_per_gas": 5 + } + }, + "swwq": { + "LightOperation": { + "base": 44, + "units_per_gas": 5 + } + }, + "contract_root": { + "LightOperation": { + "base": 75, + "units_per_gas": 1 + } + }, + "state_root": { + "LightOperation": { + "base": 412, + "units_per_gas": 1 + } + }, + "new_storage_per_byte": 1, + "vm_initialization": { + "HeavyOperation": { + "base": 2000, + "gas_per_unit": 0 + } + } + } + }, + "base_asset_id": "0000000000000000000000000000000000000000000000000000000000000000", + "block_gas_limit": 100000000, + "block_transaction_size_limit": 129024, + "privileged_address": "0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "genesis_state_transition_version": 10, + "consensus": { + "PoAV2": { + "genesis_signing_key": "22ec92c3105c942a6640bdc4e4907286ec4728e8cfc0d8ac59aad4d8e1ccaefb", + "signing_key_overrides": {} + } + } +} diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs index 9833e654e81..53db93ed3e1 100644 --- a/crates/compression/src/context/decompress.rs +++ b/crates/compression/src/context/decompress.rs @@ -62,52 +62,52 @@ fn registry_desubstitute< impl DecompressibleBy> for Address { async fn decompress_with( - c: &RegistryKey, + c: RegistryKey, ctx: &DecompressCtx, ) -> Result { - registry_desubstitute(RegistryKeyspace::address, *c, ctx) + registry_desubstitute(RegistryKeyspace::address, c, ctx) } } impl DecompressibleBy> for AssetId { async fn decompress_with( - c: &RegistryKey, + c: RegistryKey, ctx: &DecompressCtx, ) -> Result { - registry_desubstitute(RegistryKeyspace::asset_id, *c, ctx) + registry_desubstitute(RegistryKeyspace::asset_id, c, ctx) } } impl DecompressibleBy> for ContractId { async fn decompress_with( - c: &RegistryKey, + c: RegistryKey, ctx: &DecompressCtx, ) -> Result { - registry_desubstitute(RegistryKeyspace::contract_id, *c, ctx) + registry_desubstitute(RegistryKeyspace::contract_id, c, ctx) } } impl DecompressibleBy> for ScriptCode { async fn decompress_with( - c: &RegistryKey, + c: RegistryKey, ctx: &DecompressCtx, ) -> Result { - registry_desubstitute(RegistryKeyspace::script_code, *c, ctx) + registry_desubstitute(RegistryKeyspace::script_code, c, ctx) } } impl DecompressibleBy> for PredicateCode { async fn decompress_with( - c: &RegistryKey, + c: RegistryKey, ctx: &DecompressCtx, ) -> Result { - registry_desubstitute(RegistryKeyspace::predicate_code, *c, ctx) + registry_desubstitute(RegistryKeyspace::predicate_code, c, ctx) } } impl DecompressibleBy> for UtxoId { async fn decompress_with( - c: &CompressedUtxoId, + c: CompressedUtxoId, ctx: &DecompressCtx, ) -> Result { Ok(ctx.db.utxo_id(c)?) @@ -124,11 +124,11 @@ where Specification::Witness: DecompressibleBy>, { async fn decompress_with( - c: & as Compressible>::Compressed, + c: as Compressible>::Compressed, ctx: &DecompressCtx, ) -> Result, DecompressError> { - let utxo_id = UtxoId::decompress_with(&c.utxo_id, ctx).await?; - let coin_info = ctx.db.coin(&utxo_id)?; + let utxo_id = UtxoId::decompress_with(c.utxo_id, ctx).await?; + let coin_info = ctx.db.coin(utxo_id)?; let witness_index = c.witness_index.decompress(ctx).await?; let predicate_gas_used = c.predicate_gas_used.decompress(ctx).await?; let predicate = c.predicate.decompress(ctx).await?; @@ -158,10 +158,10 @@ where Specification::Witness: DecompressibleBy>, { async fn decompress_with( - c: & as Compressible>::Compressed, + c: as Compressible>::Compressed, ctx: &DecompressCtx, ) -> Result, DecompressError> { - let msg = ctx.db.message(&c.nonce)?; + let msg = ctx.db.message(c.nonce)?; let witness_index = c.witness_index.decompress(ctx).await?; let predicate_gas_used = c.predicate_gas_used.decompress(ctx).await?; let predicate = c.predicate.decompress(ctx).await?; @@ -188,7 +188,7 @@ where impl DecompressibleBy> for Mint { async fn decompress_with( - c: &Self::Compressed, + c: Self::Compressed, ctx: &DecompressCtx, ) -> Result { Ok(Transaction::mint( diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index c663397f024..f66cb8c2034 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -47,9 +47,9 @@ pub trait UtxoIdToPointer { } pub trait HistoryLookup { - fn utxo_id(&self, c: &CompressedUtxoId) -> anyhow::Result; - fn coin(&self, utxo_id: &UtxoId) -> anyhow::Result; - fn message(&self, nonce: &Nonce) -> anyhow::Result; + fn utxo_id(&self, c: CompressedUtxoId) -> anyhow::Result; + fn coin(&self, utxo_id: UtxoId) -> anyhow::Result; + fn message(&self, nonce: Nonce) -> anyhow::Result; } #[derive(Debug, Clone)] diff --git a/crates/compression/src/services/decompress.rs b/crates/compression/src/services/decompress.rs index 50c9245dc78..a3d91941f7e 100644 --- a/crates/compression/src/services/decompress.rs +++ b/crates/compression/src/services/decompress.rs @@ -57,7 +57,7 @@ pub async fn decompress( let ctx = DecompressCtx { db }; let transactions = as DecompressibleBy<_>>::decompress_with( - &compressed.transactions, + compressed.transactions, &ctx, ) .await?; diff --git a/crates/fuel-core/src/query/message/test.rs b/crates/fuel-core/src/query/message/test.rs index a052b5742dd..350004ba31d 100644 --- a/crates/fuel-core/src/query/message/test.rs +++ b/crates/fuel-core/src/query/message/test.rs @@ -1,6 +1,5 @@ use std::ops::Deref; -use fuel_core_txpool::types::ContractId; use fuel_core_types::{ blockchain::header::{ ApplicationHeader, @@ -9,7 +8,6 @@ use fuel_core_types::{ }, entities::relayer::message::MerkleProof, fuel_tx::{ - AssetId, Script, Transaction, }, diff --git a/tests/tests/da_compression.rs b/tests/tests/da_compression.rs index 6cf54170148..5e4640189a9 100644 --- a/tests/tests/da_compression.rs +++ b/tests/tests/da_compression.rs @@ -21,7 +21,7 @@ use rand::{ }; #[tokio::test] -async fn can_get_da_compressed_blocks() { +async fn can_fetch_da_compressed_block_from_graphql() { let mut rng = StdRng::seed_from_u64(10); let poa_secret = SecretKey::random(&mut rng); From 6c1e932ef2ad98aa578a28430a6ad6b7e462d033 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 18 Sep 2024 19:40:26 +0300 Subject: [PATCH 045/112] npx prettify --- crates/compression/Cargo.toml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 6186060d433..74c60c4e62f 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -6,11 +6,11 @@ categories = ["cryptography::cryptocurrencies"] edition = { workspace = true } homepage = { workspace = true } keywords = [ - "blockchain", - "cryptocurrencies", - "fuel-core", - "fuel-client", - "fuel-compression", + "blockchain", + "cryptocurrencies", + "fuel-core", + "fuel-client", + "fuel-compression", ] license = { workspace = true } repository = { workspace = true } @@ -34,8 +34,8 @@ tokio = { workspace = true, features = ["sync"] } [features] test-helpers = [ - "dep:rand", - "fuel-core-types/test-helpers", - "fuel-core-types/random", - "fuel-core-types/std", + "dep:rand", + "fuel-core-types/test-helpers", + "fuel-core-types/random", + "fuel-core-types/std", ] From 28db6608e0714c799e58f911e3b9bb18fc97b9b1 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 18 Sep 2024 19:45:27 +0300 Subject: [PATCH 046/112] Remove unneeded self-dep --- Cargo.lock | 1 - crates/compression/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3f5e28030a9..4dc21616f03 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3378,7 +3378,6 @@ dependencies = [ "async-trait", "bimap", "bincode", - "fuel-core-compression", "fuel-core-types 0.36.0", "postcard", "rand", diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 74c60c4e62f..6d9bc0a0451 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -28,7 +28,6 @@ thiserror = { workspace = true } [dev-dependencies] bimap = { version = "0.6" } bincode = { version = "1.3" } -fuel-core-compression = { workspace = true, features = ["test-helpers"] } tempfile = "3" tokio = { workspace = true, features = ["sync"] } From d060c71eea5253d89bd8ce182a0f193fc6640350 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 18 Sep 2024 19:52:49 +0300 Subject: [PATCH 047/112] Approve snapshot --- ...ts__snapshot_local_testnet_config.snap.new | 312 ------------------ ...sts__block_by_height_query_gql_output.snap | 9 + 2 files changed, 9 insertions(+), 312 deletions(-) delete mode 100644 crates/chain-config/src/config/snapshots/fuel_core_chain_config__config__chain__tests__snapshot_local_testnet_config.snap.new create mode 100644 crates/client/src/client/schema/snapshots/fuel_core_client__client__schema__da_compressed__tests__block_by_height_query_gql_output.snap diff --git a/crates/chain-config/src/config/snapshots/fuel_core_chain_config__config__chain__tests__snapshot_local_testnet_config.snap.new b/crates/chain-config/src/config/snapshots/fuel_core_chain_config__config__chain__tests__snapshot_local_testnet_config.snap.new deleted file mode 100644 index 40a3d735a62..00000000000 --- a/crates/chain-config/src/config/snapshots/fuel_core_chain_config__config__chain__tests__snapshot_local_testnet_config.snap.new +++ /dev/null @@ -1,312 +0,0 @@ ---- -source: crates/chain-config/src/config/chain.rs -assertion_line: 162 -expression: json ---- -{ - "chain_name": "local_testnet", - "consensus_parameters": { - "V2": { - "tx_params": { - "V1": { - "max_inputs": 255, - "max_outputs": 255, - "max_witnesses": 255, - "max_gas_per_tx": 100000000, - "max_size": 112640, - "max_bytecode_subsections": 255 - } - }, - "predicate_params": { - "V1": { - "max_predicate_length": 1048576, - "max_predicate_data_length": 1048576, - "max_message_data_length": 1048576, - "max_gas_per_predicate": 100000000 - } - }, - "script_params": { - "V1": { - "max_script_length": 1048576, - "max_script_data_length": 1048576 - } - }, - "contract_params": { - "V1": { - "contract_max_size": 102400, - "max_storage_slots": 255 - } - }, - "fee_params": { - "V1": { - "gas_price_factor": 1000000000, - "gas_per_byte": 4 - } - }, - "chain_id": 0, - "gas_costs": { - "V4": { - "add": 1, - "addi": 1, - "and": 1, - "andi": 1, - "bal": 13, - "bhei": 1, - "bhsh": 1, - "burn": 132, - "cb": 1, - "cfsi": 1, - "div": 1, - "divi": 1, - "eck1": 951, - "ecr1": 3000, - "eq": 1, - "exp": 1, - "expi": 1, - "flag": 1, - "gm": 1, - "gt": 1, - "gtf": 1, - "ji": 1, - "jmp": 1, - "jne": 1, - "jnei": 1, - "jnzi": 1, - "jmpf": 1, - "jmpb": 1, - "jnzf": 1, - "jnzb": 1, - "jnef": 1, - "jneb": 1, - "lb": 1, - "log": 9, - "lt": 1, - "lw": 1, - "mint": 135, - "mlog": 1, - "mod": 1, - "modi": 1, - "move": 1, - "movi": 1, - "mroo": 2, - "mul": 1, - "muli": 1, - "mldv": 1, - "noop": 1, - "not": 1, - "or": 1, - "ori": 1, - "poph": 2, - "popl": 2, - "pshh": 2, - "pshl": 2, - "ret_contract": 13, - "rvrt_contract": 13, - "sb": 1, - "sll": 1, - "slli": 1, - "srl": 1, - "srli": 1, - "srw": 12, - "sub": 1, - "subi": 1, - "sw": 1, - "sww": 67, - "time": 1, - "tr": 105, - "tro": 60, - "wdcm": 1, - "wqcm": 1, - "wdop": 1, - "wqop": 1, - "wdml": 1, - "wqml": 1, - "wddv": 1, - "wqdv": 2, - "wdmd": 3, - "wqmd": 4, - "wdam": 2, - "wqam": 3, - "wdmm": 3, - "wqmm": 3, - "xor": 1, - "xori": 1, - "aloc": { - "LightOperation": { - "base": 2, - "units_per_gas": 214 - } - }, - "bsiz": { - "LightOperation": { - "base": 17, - "units_per_gas": 790 - } - }, - "bldd": { - "LightOperation": { - "base": 15, - "units_per_gas": 272 - } - }, - "cfe": { - "LightOperation": { - "base": 2, - "units_per_gas": 214 - } - }, - "cfei": { - "LightOperation": { - "base": 2, - "units_per_gas": 214 - } - }, - "call": { - "LightOperation": { - "base": 144, - "units_per_gas": 214 - } - }, - "ccp": { - "LightOperation": { - "base": 15, - "units_per_gas": 103 - } - }, - "croo": { - "LightOperation": { - "base": 1, - "units_per_gas": 1 - } - }, - "csiz": { - "LightOperation": { - "base": 17, - "units_per_gas": 790 - } - }, - "ed19": { - "LightOperation": { - "base": 3000, - "units_per_gas": 214 - } - }, - "k256": { - "LightOperation": { - "base": 11, - "units_per_gas": 214 - } - }, - "ldc": { - "LightOperation": { - "base": 15, - "units_per_gas": 272 - } - }, - "logd": { - "LightOperation": { - "base": 26, - "units_per_gas": 64 - } - }, - "mcl": { - "LightOperation": { - "base": 1, - "units_per_gas": 3333 - } - }, - "mcli": { - "LightOperation": { - "base": 1, - "units_per_gas": 3333 - } - }, - "mcp": { - "LightOperation": { - "base": 1, - "units_per_gas": 2000 - } - }, - "mcpi": { - "LightOperation": { - "base": 3, - "units_per_gas": 2000 - } - }, - "meq": { - "LightOperation": { - "base": 1, - "units_per_gas": 2500 - } - }, - "retd_contract": { - "LightOperation": { - "base": 29, - "units_per_gas": 62 - } - }, - "s256": { - "LightOperation": { - "base": 2, - "units_per_gas": 214 - } - }, - "scwq": { - "LightOperation": { - "base": 13, - "units_per_gas": 5 - } - }, - "smo": { - "LightOperation": { - "base": 209, - "units_per_gas": 55 - } - }, - "srwq": { - "LightOperation": { - "base": 47, - "units_per_gas": 5 - } - }, - "swwq": { - "LightOperation": { - "base": 44, - "units_per_gas": 5 - } - }, - "contract_root": { - "LightOperation": { - "base": 75, - "units_per_gas": 1 - } - }, - "state_root": { - "LightOperation": { - "base": 412, - "units_per_gas": 1 - } - }, - "new_storage_per_byte": 1, - "vm_initialization": { - "HeavyOperation": { - "base": 2000, - "gas_per_unit": 0 - } - } - } - }, - "base_asset_id": "0000000000000000000000000000000000000000000000000000000000000000", - "block_gas_limit": 100000000, - "block_transaction_size_limit": 129024, - "privileged_address": "0000000000000000000000000000000000000000000000000000000000000000" - } - }, - "genesis_state_transition_version": 10, - "consensus": { - "PoAV2": { - "genesis_signing_key": "22ec92c3105c942a6640bdc4e4907286ec4728e8cfc0d8ac59aad4d8e1ccaefb", - "signing_key_overrides": {} - } - } -} diff --git a/crates/client/src/client/schema/snapshots/fuel_core_client__client__schema__da_compressed__tests__block_by_height_query_gql_output.snap b/crates/client/src/client/schema/snapshots/fuel_core_client__client__schema__da_compressed__tests__block_by_height_query_gql_output.snap new file mode 100644 index 00000000000..62f797d34e8 --- /dev/null +++ b/crates/client/src/client/schema/snapshots/fuel_core_client__client__schema__da_compressed__tests__block_by_height_query_gql_output.snap @@ -0,0 +1,9 @@ +--- +source: crates/client/src/client/schema/da_compressed.rs +expression: operation.query +--- +query($height: U32!) { + daCompressedBlock(height: $height) { + bytes + } +} From b6d7343d5604b98d4ec1178dc9cf61b4428838e8 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 18 Sep 2024 20:05:51 +0300 Subject: [PATCH 048/112] Remove unused functions --- crates/compression/src/tables.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index 57ee6d65028..0edb6907d0d 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -29,23 +29,6 @@ macro_rules! tables { $name, )* } - impl RegistryKeyspace { - pub fn name(&self) -> &'static str { - match self { - $( - Self::$name => stringify!($name), - )* - } - } - pub fn from_str(name: &str) -> Option { - match name { - $( - stringify!($name) => Some(Self::$name), - )* - _ => None, - } - } - } #[derive(Debug, Clone, Default)] pub struct PerRegistryKeyspace { From 22dbc4e3a0058a7db5fc355277a26a76b026abbf Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 18 Sep 2024 20:27:02 +0300 Subject: [PATCH 049/112] Cargo fix --- crates/fuel-core/src/graphql_api.rs | 1 + .../src/graphql_api/da_compression.rs | 132 ++++++++++++++++ .../src/graphql_api/storage/da_compression.rs | 1 + .../src/graphql_api/worker_service.rs | 141 ++---------------- 4 files changed, 145 insertions(+), 130 deletions(-) create mode 100644 crates/fuel-core/src/graphql_api/da_compression.rs diff --git a/crates/fuel-core/src/graphql_api.rs b/crates/fuel-core/src/graphql_api.rs index c3fc9abb991..04dc2a75b47 100644 --- a/crates/fuel-core/src/graphql_api.rs +++ b/crates/fuel-core/src/graphql_api.rs @@ -8,6 +8,7 @@ use std::{ }; pub mod api_service; +mod da_compression; pub mod database; pub(crate) mod metrics_extension; pub mod ports; diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs new file mode 100644 index 00000000000..5f97273c051 --- /dev/null +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -0,0 +1,132 @@ +use crate::{ + fuel_core_graphql_api::ports::worker::OffChainDatabaseTransaction, + graphql_api::storage::da_compression::{ + DaCompressedBlocks, + DaCompressionTemporalRegistry, + DaCompressionTemporalRegistryIndex, + }, +}; +use fuel_core_compression::{ + ports::{ + TemporalRegistry, + UtxoIdToPointer, + }, + services::compress::compress, + RegistryKeyspace, +}; +use fuel_core_storage::{ + not_found, + StorageAsMut, + StorageAsRef, + StorageInspect, +}; +use fuel_core_types::{ + blockchain::block::Block, + services::executor::Event, +}; +use futures::FutureExt; + +/// Performs DA compression for a block and stores it in the database. +pub fn da_compress_block( + block: &Block, + events: &[Event], + transaction: &mut T, +) -> anyhow::Result<()> +where + T: OffChainDatabaseTransaction, + T: StorageInspect, +{ + struct DbTx<'a, Tx>(&'a mut Tx, &'a [Event]); + + impl<'a, Tx> TemporalRegistry for DbTx<'a, Tx> + where + Tx: OffChainDatabaseTransaction, + Tx: StorageInspect, + { + fn read_registry( + &self, + keyspace: RegistryKeyspace, + key: fuel_core_types::fuel_compression::RegistryKey, + ) -> anyhow::Result> { + Ok(self + .0 + .storage_as_ref::() + .get(&(keyspace, key))? + .ok_or(not_found!(DaCompressionTemporalRegistry))? + .into_owned()) + } + + fn write_registry( + &mut self, + keyspace: RegistryKeyspace, + key: fuel_core_types::fuel_compression::RegistryKey, + value: Vec, + ) -> anyhow::Result<()> { + // Write the actual value + self.0 + .storage_as_mut::() + .insert(&(keyspace, key), &value)?; + + // Remove the overwritten value from index, if any + self.0 + .storage_as_mut::() + .remove(&(keyspace, value.clone()))?; + + // Add the new value to the index + self.0 + .storage_as_mut::() + .insert(&(keyspace, value), &key)?; + + Ok(()) + } + + fn registry_index_lookup( + &self, + keyspace: RegistryKeyspace, + value: Vec, + ) -> anyhow::Result> + { + Ok(self + .0 + .storage_as_ref::() + .get(&(keyspace, value))? + .map(|v| v.into_owned())) + } + } + + impl<'a, Tx> UtxoIdToPointer for DbTx<'a, Tx> + where + Tx: OffChainDatabaseTransaction, + { + fn lookup( + &self, + utxo_id: fuel_core_types::fuel_tx::UtxoId, + ) -> anyhow::Result { + for event in self.1 { + match event { + Event::CoinCreated(coin) | Event::CoinConsumed(coin) + if coin.utxo_id == utxo_id => + { + let output_index = coin.utxo_id.output_index(); + return Ok(fuel_core_types::fuel_tx::CompressedUtxoId { + tx_pointer: coin.tx_pointer, + output_index, + }); + } + _ => {} + } + } + panic!("UtxoId not found in the block events"); + } + } + + let compressed = compress(DbTx(transaction, events), block) + .now_or_never() + .expect("The current implementation resolved all futures instantly")?; + + transaction + .storage_as_mut::() + .insert(&block.header().consensus().height, &compressed)?; + + Ok(()) +} diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression.rs b/crates/fuel-core/src/graphql_api/storage/da_compression.rs index 410103dc153..fa5c17e65a5 100644 --- a/crates/fuel-core/src/graphql_api/storage/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/storage/da_compression.rs @@ -75,6 +75,7 @@ impl TableWithBlueprint for DaCompressionTemporalRegistryIndex { mod tests { use super::*; + #[allow(clippy::arithmetic_side_effects)] // Test code, also safe fn generate_registry_key( rng: &mut impl rand::Rng, ) -> (RegistryKeyspace, RegistryKey) { diff --git a/crates/fuel-core/src/graphql_api/worker_service.rs b/crates/fuel-core/src/graphql_api/worker_service.rs index f9ee0ea308c..9d74d17731a 100644 --- a/crates/fuel-core/src/graphql_api/worker_service.rs +++ b/crates/fuel-core/src/graphql_api/worker_service.rs @@ -1,7 +1,10 @@ -use super::storage::old::{ - OldFuelBlockConsensus, - OldFuelBlocks, - OldTransactions, +use super::{ + da_compression::da_compress_block, + storage::old::{ + OldFuelBlockConsensus, + OldFuelBlocks, + OldTransactions, + }, }; use crate::{ fuel_core_graphql_api::{ @@ -23,22 +26,7 @@ use crate::{ }, }, }, - graphql_api::storage::{ - da_compression::{ - DaCompressedBlocks, - DaCompressionTemporalRegistry, - DaCompressionTemporalRegistryIndex, - }, - relayed_transactions::RelayedTransactionStatuses, - }, -}; -use fuel_core_compression::{ - ports::{ - TemporalRegistry, - UtxoIdToPointer, - }, - services::compress::compress, - RegistryKeyspace, + graphql_api::storage::relayed_transactions::RelayedTransactionStatuses, }; use fuel_core_metrics::graphql_metrics::graphql_metrics; use fuel_core_services::{ @@ -50,12 +38,9 @@ use fuel_core_services::{ StateWatcher, }; use fuel_core_storage::{ - not_found, Error as StorageError, Result as StorageResult, StorageAsMut, - StorageAsRef, - StorageInspect, }; use fuel_core_txpool::types::TxId; use fuel_core_types::{ @@ -166,7 +151,7 @@ where &mut transaction, )?; - da_compress_block(&block, &result.events, &mut transaction)?; + da_compress_block(block, &result.events, &mut transaction)?; transaction.commit()?; @@ -183,110 +168,6 @@ where } } -fn da_compress_block( - block: &Block, - events: &[Event], - transaction: &mut T, -) -> anyhow::Result<()> -where - T: OffChainDatabaseTransaction, - T: StorageInspect, -{ - struct DbTx<'a, Tx>(&'a mut Tx, &'a [Event]); - - impl<'a, Tx> TemporalRegistry for DbTx<'a, Tx> - where - Tx: OffChainDatabaseTransaction, - Tx: StorageInspect, - { - fn read_registry( - &self, - keyspace: RegistryKeyspace, - key: fuel_core_types::fuel_compression::RegistryKey, - ) -> anyhow::Result> { - Ok(self - .0 - .storage_as_ref::() - .get(&(keyspace, key))? - .ok_or(not_found!(DaCompressionTemporalRegistry))? - .into_owned()) - } - - fn write_registry( - &mut self, - keyspace: RegistryKeyspace, - key: fuel_core_types::fuel_compression::RegistryKey, - value: Vec, - ) -> anyhow::Result<()> { - // Write the actual value - self.0 - .storage_as_mut::() - .insert(&(keyspace, key), &value)?; - - // Remove the overwritten value from index, if any - self.0 - .storage_as_mut::() - .remove(&(keyspace, value.clone()))?; - - // Add the new value to the index - self.0 - .storage_as_mut::() - .insert(&(keyspace, value), &key)?; - - Ok(()) - } - - fn registry_index_lookup( - &self, - keyspace: RegistryKeyspace, - value: Vec, - ) -> anyhow::Result> - { - Ok(self - .0 - .storage_as_ref::() - .get(&(keyspace, value))? - .map(|v| v.into_owned())) - } - } - - impl<'a, Tx> UtxoIdToPointer for DbTx<'a, Tx> - where - Tx: OffChainDatabaseTransaction, - { - fn lookup( - &self, - utxo_id: fuel_core_types::fuel_tx::UtxoId, - ) -> anyhow::Result { - for event in self.1 { - match event { - Event::CoinCreated(coin) | Event::CoinConsumed(coin) - if coin.utxo_id == utxo_id => - { - let output_index = coin.utxo_id.output_index(); - return Ok(fuel_core_types::fuel_tx::CompressedUtxoId { - tx_pointer: coin.tx_pointer, - output_index, - }); - } - _ => {} - } - } - panic!("UtxoId not found in the block events"); - } - } - - let compressed = compress(DbTx(transaction, events), block) - .now_or_never() - .expect("The current implementation resolved all futures instantly")?; - - transaction - .storage_as_mut::() - .insert(&block.header().consensus().height, &compressed)?; - - Ok(()) -} - /// Process the executor events and update the indexes for the messages and coins. pub fn process_executor_events<'a, Iter, T>( events: Iter, @@ -551,7 +432,7 @@ where } #[async_trait::async_trait] -impl<'a, TxPool, BlockImporter, OnChain, OffChain> RunnableService +impl RunnableService for InitializeTask where TxPool: ports::worker::TxPool, @@ -699,7 +580,7 @@ where } } -pub fn new_service<'a, TxPool, BlockImporter, OnChain, OffChain>( +pub fn new_service( tx_pool: TxPool, block_importer: BlockImporter, on_chain_database: OnChain, From 35147c635e1a0c1617acc472c937994d58affa70 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 18 Sep 2024 20:58:04 +0300 Subject: [PATCH 050/112] Update changelog --- CHANGELOG.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f6eae160f49..e83db37df2b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +### Added + +- [#1609](https://github.com/FuelLabs/fuel-core/pull/1609): Add DA compression support. Compressed blocks are stored in the offchain database when blocks are produced, and can be fetched using the GraphQL API. + ## [Version 0.36.0] ### Added @@ -239,10 +243,6 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - [#1895](https://github.com/FuelLabs/fuel-core/pull/1895): Added backward and forward compatibility integration tests for forkless upgrades. - [#1898](https://github.com/FuelLabs/fuel-core/pull/1898): Enforce increasing of the `Executor::VERSION` on each release. -### Added - -- [#1609](https://github.com/FuelLabs/fuel-core/pull/1609): Add a DA compression crate `fuel-core-compression`. - ### Changed - [#1906](https://github.com/FuelLabs/fuel-core/pull/1906): Makes `cli::snapshot::Command` members public such that clients can create and execute snapshot commands programmatically. This enables snapshot execution in external programs, such as the regenesis test suite. From 99f875646fee35e1537471cc3efb7bf32be82e9f Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 18 Sep 2024 21:04:23 +0300 Subject: [PATCH 051/112] Clean up --- .../src/graphql_api/da_compression.rs | 160 +++++++++--------- 1 file changed, 80 insertions(+), 80 deletions(-) diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index 5f97273c051..853d6dd257d 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -36,97 +36,97 @@ where T: OffChainDatabaseTransaction, T: StorageInspect, { - struct DbTx<'a, Tx>(&'a mut Tx, &'a [Event]); + let compressed = compress(CompressTx(transaction, events), block) + .now_or_never() + .expect("The current implementation resolved all futures instantly")?; - impl<'a, Tx> TemporalRegistry for DbTx<'a, Tx> - where - Tx: OffChainDatabaseTransaction, - Tx: StorageInspect, - { - fn read_registry( - &self, - keyspace: RegistryKeyspace, - key: fuel_core_types::fuel_compression::RegistryKey, - ) -> anyhow::Result> { - Ok(self - .0 - .storage_as_ref::() - .get(&(keyspace, key))? - .ok_or(not_found!(DaCompressionTemporalRegistry))? - .into_owned()) - } + transaction + .storage_as_mut::() + .insert(&block.header().consensus().height, &compressed)?; - fn write_registry( - &mut self, - keyspace: RegistryKeyspace, - key: fuel_core_types::fuel_compression::RegistryKey, - value: Vec, - ) -> anyhow::Result<()> { - // Write the actual value - self.0 - .storage_as_mut::() - .insert(&(keyspace, key), &value)?; + Ok(()) +} - // Remove the overwritten value from index, if any - self.0 - .storage_as_mut::() - .remove(&(keyspace, value.clone()))?; +struct CompressTx<'a, Tx>(&'a mut Tx, &'a [Event]); - // Add the new value to the index - self.0 - .storage_as_mut::() - .insert(&(keyspace, value), &key)?; +impl<'a, Tx> TemporalRegistry for CompressTx<'a, Tx> +where + Tx: OffChainDatabaseTransaction, + Tx: StorageInspect, +{ + fn read_registry( + &self, + keyspace: RegistryKeyspace, + key: fuel_core_types::fuel_compression::RegistryKey, + ) -> anyhow::Result> { + Ok(self + .0 + .storage_as_ref::() + .get(&(keyspace, key))? + .ok_or(not_found!(DaCompressionTemporalRegistry))? + .into_owned()) + } - Ok(()) - } + fn write_registry( + &mut self, + keyspace: RegistryKeyspace, + key: fuel_core_types::fuel_compression::RegistryKey, + value: Vec, + ) -> anyhow::Result<()> { + // Write the actual value + self.0 + .storage_as_mut::() + .insert(&(keyspace, key), &value)?; - fn registry_index_lookup( - &self, - keyspace: RegistryKeyspace, - value: Vec, - ) -> anyhow::Result> - { - Ok(self - .0 - .storage_as_ref::() - .get(&(keyspace, value))? - .map(|v| v.into_owned())) - } + // Remove the overwritten value from index, if any + self.0 + .storage_as_mut::() + .remove(&(keyspace, value.clone()))?; + + // Add the new value to the index + self.0 + .storage_as_mut::() + .insert(&(keyspace, value), &key)?; + + Ok(()) } - impl<'a, Tx> UtxoIdToPointer for DbTx<'a, Tx> - where - Tx: OffChainDatabaseTransaction, + fn registry_index_lookup( + &self, + keyspace: RegistryKeyspace, + value: Vec, + ) -> anyhow::Result> { - fn lookup( - &self, - utxo_id: fuel_core_types::fuel_tx::UtxoId, - ) -> anyhow::Result { - for event in self.1 { - match event { - Event::CoinCreated(coin) | Event::CoinConsumed(coin) - if coin.utxo_id == utxo_id => - { - let output_index = coin.utxo_id.output_index(); - return Ok(fuel_core_types::fuel_tx::CompressedUtxoId { - tx_pointer: coin.tx_pointer, - output_index, - }); - } - _ => {} + Ok(self + .0 + .storage_as_ref::() + .get(&(keyspace, value))? + .map(|v| v.into_owned())) + } +} + +impl<'a, Tx> UtxoIdToPointer for CompressTx<'a, Tx> +where + Tx: OffChainDatabaseTransaction, +{ + fn lookup( + &self, + utxo_id: fuel_core_types::fuel_tx::UtxoId, + ) -> anyhow::Result { + for event in self.1 { + match event { + Event::CoinCreated(coin) | Event::CoinConsumed(coin) + if coin.utxo_id == utxo_id => + { + let output_index = coin.utxo_id.output_index(); + return Ok(fuel_core_types::fuel_tx::CompressedUtxoId { + tx_pointer: coin.tx_pointer, + output_index, + }); } + _ => {} } - panic!("UtxoId not found in the block events"); } + panic!("UtxoId not found in the block events"); } - - let compressed = compress(DbTx(transaction, events), block) - .now_or_never() - .expect("The current implementation resolved all futures instantly")?; - - transaction - .storage_as_mut::() - .insert(&block.header().consensus().height, &compressed)?; - - Ok(()) } From 7fe1dfcfe85f2dfe3d669608cd13bd232675ebbf Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 18 Sep 2024 21:13:44 +0300 Subject: [PATCH 052/112] fmt --- crates/fuel-core/src/graphql_api/da_compression.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index 853d6dd257d..e35fc1e5fa2 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -95,8 +95,7 @@ where &self, keyspace: RegistryKeyspace, value: Vec, - ) -> anyhow::Result> - { + ) -> anyhow::Result> { Ok(self .0 .storage_as_ref::() From da83ea1d53bb576ef472715ae133ad771376837d Mon Sep 17 00:00:00 2001 From: Hannes Karppila <2204863+Dentosal@users.noreply.github.com> Date: Thu, 19 Sep 2024 17:15:33 +0300 Subject: [PATCH 053/112] Update Cargo.toml Co-authored-by: Aaryamann Challani <43716372+rymnc@users.noreply.github.com> --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 1e04f846e23..73f866e5e51 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,6 @@ members = [ "crates/chain-config", "crates/client", "crates/compression", - "crates/compression", "crates/database", "crates/fuel-core", "crates/fuel-gas-price-algorithm", From 4748041a3ba7e4ef0d099d45977f766947654a9d Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 20 Sep 2024 13:50:57 +0300 Subject: [PATCH 054/112] Add proptest-based ser/de tests for compressed blocks --- Cargo.lock | 1 + crates/compression/Cargo.toml | 1 + crates/compression/src/lib.rs | 145 +++++++++++++++++++++++++------ crates/compression/src/tables.rs | 12 +-- 4 files changed, 120 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4dc21616f03..ddd15a69a7e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3380,6 +3380,7 @@ dependencies = [ "bincode", "fuel-core-types 0.36.0", "postcard", + "proptest", "rand", "serde", "tempfile", diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 6d9bc0a0451..4cf2d090f88 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -28,6 +28,7 @@ thiserror = { workspace = true } [dev-dependencies] bimap = { version = "0.6" } bincode = { version = "1.3" } +proptest = { workspace = true } tempfile = "3" tokio = { workspace = true, features = ["sync"] } diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index efa906f4a22..641238f9818 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -58,35 +58,124 @@ struct CompressedBlockPayload { #[cfg(test)] mod tests { + use std::collections::HashMap; + + use fuel_core_types::{ + fuel_compression::RegistryKey, + fuel_tx::{ + input::PredicateCode, + Address, + AssetId, + ContractId, + ScriptCode, + }, + }; + use proptest::prelude::*; + use tables::{ + PerRegistryKeyspace, + PostcardSerialized, + }; + use super::*; - #[test] - fn postcard_roundtrip() { - let original = CompressedBlockPayload { - registrations: RegistrationsPerTable::default(), - header: Header { - da_height: DaBlockHeight::default(), - prev_root: Default::default(), - height: 3u32.into(), - consensus_parameters_version: 1, - state_transition_bytecode_version: 2, - time: Tai64::UNIX_EPOCH, - }, - transactions: vec![], - }; - - let compressed = postcard::to_allocvec(&original).unwrap(); - let decompressed: CompressedBlockPayload = - postcard::from_bytes(&compressed).unwrap(); - - let CompressedBlockPayload { - registrations, - header, - transactions, - } = decompressed; - - assert!(registrations.is_empty()); - assert_eq!(header.height, 3u32.into()); - assert!(transactions.is_empty()); + fn keyspace() -> impl Strategy { + prop_oneof![ + Just(RegistryKeyspace::address), + Just(RegistryKeyspace::asset_id), + Just(RegistryKeyspace::contract_id), + Just(RegistryKeyspace::script_code), + Just(RegistryKeyspace::predicate_code), + ] + } + + fn keyspace_and_value( + ) -> impl Strategy { + (keyspace(), prop::array::uniform32(0..u8::MAX)).prop_map(|(keyspace, value)| { + let value = match keyspace { + RegistryKeyspace::address => { + PostcardSerialized::new(Address::new(value)).unwrap() + } + RegistryKeyspace::asset_id => { + PostcardSerialized::new(AssetId::new(value)).unwrap() + } + RegistryKeyspace::contract_id => { + PostcardSerialized::new(ContractId::new(value)).unwrap() + } + RegistryKeyspace::script_code => { + let len = (value[0] % 32) as usize; + PostcardSerialized::new(ScriptCode { + bytes: value[..len].to_vec(), + }) + .unwrap() + } + RegistryKeyspace::predicate_code => { + let len = (value[0] % 32) as usize; + PostcardSerialized::new(PredicateCode { + bytes: value[..len].to_vec(), + }) + .unwrap() + } + }; + (keyspace, value) + }) + } + + proptest! { + /// Serialization for compressed transactions is already tested in fuel-vm, + /// but the rest of the block de/serialization is be tested here. + #[test] + fn postcard_roundtrip( + da_height in 0..u64::MAX, + prev_root in prop::array::uniform32(0..u8::MAX), + height in 0..u32::MAX, + consensus_parameters_version in 0..u32::MAX, + state_transition_bytecode_version in 0..u32::MAX, + registration_inputs in prop::collection::vec( + (keyspace_and_value(), prop::num::u16::ANY).prop_map(|((ks, v), rk)| { + let k = RegistryKey::try_from(rk as u32).unwrap(); + (ks, (k, v)) + }), + 0..123 + ), + ) { + let mut registrations: PerRegistryKeyspace> = Default::default(); + + for (keyspace, (key, data)) in registration_inputs { + registrations[keyspace].insert(key, data); + } + + let original = CompressedBlockPayload { + registrations: RegistrationsPerTable::try_from(registrations).unwrap(), + header: Header { + da_height: da_height.into(), + prev_root: prev_root.into(), + height: height.into(), + consensus_parameters_version, + state_transition_bytecode_version, + time: Tai64::UNIX_EPOCH, + }, + transactions: vec![], + }; + + let compressed = postcard::to_allocvec(&original).unwrap(); + let decompressed: CompressedBlockPayload = + postcard::from_bytes(&compressed).unwrap(); + + let CompressedBlockPayload { + registrations, + header, + transactions, + } = decompressed; + + assert_eq!(registrations, original.registrations); + + assert_eq!(header.da_height, da_height.into()); + assert_eq!(header.prev_root, prev_root.into()); + assert_eq!(header.height, height.into()); + assert_eq!(header.consensus_parameters_version, consensus_parameters_version); + assert_eq!(header.state_transition_bytecode_version, state_transition_bytecode_version); + + assert!(transactions.is_empty()); + } } } diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index 0edb6907d0d..3c29ee12765 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -55,7 +55,7 @@ macro_rules! tables { } } - #[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)] + #[derive(Debug, Clone, PartialEq, Default, serde::Serialize, serde::Deserialize)] pub struct RegistrationsPerTable { $(pub $name: Vec<(RegistryKey, $type)>,)* } @@ -75,16 +75,6 @@ macro_rules! tables { } impl RegistrationsPerTable { - #[cfg(test)] - pub(crate) fn is_empty(&self) -> bool { - $( - if !self.$name.is_empty() { - return false; - } - )* - true - } - pub(crate) fn write_to_registry(&self, registry: &mut R) -> anyhow::Result<()> { $( for (key, value) in self.$name.iter() { From 896b2d1bc9f0f195f3c032028ba25285778e6395 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 20 Sep 2024 14:51:05 +0300 Subject: [PATCH 055/112] Add a test that shows compressed blocks are available from non-producer nodes --- tests/tests/da_compression.rs | 59 ++++++++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/tests/tests/da_compression.rs b/tests/tests/da_compression.rs index 5e4640189a9..c5341511d9b 100644 --- a/tests/tests/da_compression.rs +++ b/tests/tests/da_compression.rs @@ -1,5 +1,6 @@ use fuel_core::{ combined_database::CombinedDatabase, + p2p_test_helpers::*, service::{ Config, FuelService, @@ -12,7 +13,10 @@ use fuel_core_client::client::{ use fuel_core_poa::signer::SignMode; use fuel_core_types::{ fuel_crypto::SecretKey, - fuel_tx::Transaction, + fuel_tx::{ + Input, + Transaction, + }, secrecy::Secret, }; use rand::{ @@ -48,3 +52,56 @@ async fn can_fetch_da_compressed_block_from_graphql() { let block = client.da_compressed_block(block_height).await.unwrap(); assert!(block.is_some()); } + +#[tokio::test(flavor = "multi_thread")] +async fn da_compressed_blocks_are_available_from_non_block_producing_nodes() { + let mut rng = StdRng::seed_from_u64(line!() as u64); + + // Create a producer and a validator that share the same key pair. + let secret = SecretKey::random(&mut rng); + let pub_key = Input::owner(&secret.public_key()); + let Nodes { + mut producers, + mut validators, + bootstrap_nodes: _dont_drop, + } = make_nodes( + [Some(BootstrapSetup::new(pub_key))], + [Some( + ProducerSetup::new(secret).with_txs(1).with_name("Alice"), + )], + [Some(ValidatorSetup::new(pub_key).with_name("Bob"))], + Some(Config { + debug: true, + utxo_validation: false, + ..Config::local_node() + }), + ) + .await; + + let mut producer = producers.pop().unwrap(); + let mut validator = validators.pop().unwrap(); + + let p_client = FuelClient::from(producer.node.shared.graph_ql.bound_address); + let v_client = FuelClient::from(validator.node.shared.graph_ql.bound_address); + + // Insert some txs + let expected = producer.insert_txs().await; + producer.consistency_10s(&expected).await; + validator.consistency_20s(&expected).await; + + let block_height = 1u32.into(); + + let p_block = p_client + .da_compressed_block(block_height) + .await + .unwrap() + .expect("Compressed block not available from producer"); + + let v_block = v_client + .da_compressed_block(block_height) + .await + .unwrap() + .expect("Compressed block not available from validator"); + + assert!(p_block == v_block); +} From 9542cbf0c3bd12b0a4ef19f83bbd0668130d9bbe Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 20 Sep 2024 15:45:35 +0300 Subject: [PATCH 056/112] Cleanup --- crates/fuel-core/src/lib.rs | 2 +- tests/tests/da_compression.rs | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/crates/fuel-core/src/lib.rs b/crates/fuel-core/src/lib.rs index 76fdef49dcb..40d866a137d 100644 --- a/crates/fuel-core/src/lib.rs +++ b/crates/fuel-core/src/lib.rs @@ -1,7 +1,7 @@ #![deny(clippy::arithmetic_side_effects)] #![deny(clippy::cast_possible_truncation)] #![deny(unused_crate_dependencies)] -// #![deny(warnings)] +#![deny(warnings)] use crate::service::genesis::NotifyCancel; use tokio_util::sync::CancellationToken; diff --git a/tests/tests/da_compression.rs b/tests/tests/da_compression.rs index c5341511d9b..2400659e974 100644 --- a/tests/tests/da_compression.rs +++ b/tests/tests/da_compression.rs @@ -70,11 +70,7 @@ async fn da_compressed_blocks_are_available_from_non_block_producing_nodes() { ProducerSetup::new(secret).with_txs(1).with_name("Alice"), )], [Some(ValidatorSetup::new(pub_key).with_name("Bob"))], - Some(Config { - debug: true, - utxo_validation: false, - ..Config::local_node() - }), + None, ) .await; From 3612ed8f9b2aeb3231ea8749a38c3cad34fbd006 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 20 Sep 2024 15:58:50 +0300 Subject: [PATCH 057/112] Shuffle stuff around for aesthetic reasons --- crates/compression/README.md | 13 ++------- .../src/{services => }/compress.rs | 0 crates/compression/src/context/compress.rs | 2 +- crates/compression/src/context/decompress.rs | 2 +- crates/compression/src/context/prepare.rs | 2 +- .../src/{services => }/decompress.rs | 0 crates/compression/src/lib.rs | 29 ++++++++++--------- .../src/graphql_api/da_compression.rs | 2 +- 8 files changed, 22 insertions(+), 28 deletions(-) rename crates/compression/src/{services => }/compress.rs (100%) rename crates/compression/src/{services => }/decompress.rs (100%) diff --git a/crates/compression/README.md b/crates/compression/README.md index fcbb2446433..8aa0a49c5a6 100644 --- a/crates/compression/README.md +++ b/crates/compression/README.md @@ -10,25 +10,18 @@ This crate provides offchain registries for different types such as `AssetId`, ` The registries allow replacing repeated objects with their respective keys, so if an object is used multiple times in a short interval (couple of months, maybe), then the full value -exists on only a single uncompressed block, +exists on only a single uncompressed block. ### Fraud proofs Compressed block will start with 32 bytes of merkle root over all compression smts, followed by newly registered values along with their keys. Using an SMT provides flexibility around the algorithm we use to define keys without knowing how exactly values were chosen to be registered. - -Each registry also uses an SMT. Since the keys are three bytes long, the depth of the SMT is capped at 24 levels. - - - More efficient for fraud proofs instead of needing to provide entire previous blocks with proofs +Each registry also uses an SMT. Since the keys are three bytes long, the depth of the SMT is capped at 24 levels. ## Compression of `UtxoIds` -Since each `UtxoId` only appears once, there's no point in registering them. Instead, they are replaced with `TxPointer`s (7 bytes worst case), which are still unique. +Since each `UtxoId` only appears once, there's no point in registering them. Instead, they are replaced with `TxPointer` and output index, which are still unique. ### Fraud proofs During fraud proofs we need to use the `prev_root` to prove that the referenced block height is part of the chain. - -## Other techniques - -- These techniques should be good enough for now, but there are lots of other interesting ideas for this. diff --git a/crates/compression/src/services/compress.rs b/crates/compression/src/compress.rs similarity index 100% rename from crates/compression/src/services/compress.rs rename to crates/compression/src/compress.rs diff --git a/crates/compression/src/context/compress.rs b/crates/compression/src/context/compress.rs index 988f6f47367..c579e87e4e9 100644 --- a/crates/compression/src/context/compress.rs +++ b/crates/compression/src/context/compress.rs @@ -18,8 +18,8 @@ use fuel_core_types::{ }; use crate::{ + compress::CompressDb, eviction_policy::CacheEvictor, - services::compress::CompressDb, tables::{ PerRegistryKeyspace, PostcardSerialized, diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs index 53db93ed3e1..d9aec760bdf 100644 --- a/crates/compression/src/context/decompress.rs +++ b/crates/compression/src/context/decompress.rs @@ -31,7 +31,7 @@ use fuel_core_types::{ }; use crate::{ - services::decompress::{ + decompress::{ DecompressDb, DecompressError, }, diff --git a/crates/compression/src/context/prepare.rs b/crates/compression/src/context/prepare.rs index e97f8defb21..eccf02c1781 100644 --- a/crates/compression/src/context/prepare.rs +++ b/crates/compression/src/context/prepare.rs @@ -11,7 +11,7 @@ use fuel_core_types::{ use input::PredicateCode; use crate::{ - services::compress::CompressDb, + compress::CompressDb, tables::{ PerRegistryKeyspace, RegistryKeyspace, diff --git a/crates/compression/src/services/decompress.rs b/crates/compression/src/decompress.rs similarity index 100% rename from crates/compression/src/services/decompress.rs rename to crates/compression/src/decompress.rs diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 641238f9818..288a2149ee6 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -1,10 +1,8 @@ +pub mod compress; +pub mod decompress; mod eviction_policy; pub mod ports; mod tables; -pub mod services { - pub mod compress; - pub mod decompress; -} mod context { pub mod compress; pub mod decompress; @@ -35,16 +33,6 @@ use fuel_core_types::{ }; use tables::RegistrationsPerTable; -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -struct Header { - pub da_height: DaBlockHeight, - pub prev_root: Bytes32, - pub height: BlockHeight, - pub time: Tai64, - pub consensus_parameters_version: ConsensusParametersVersion, - pub state_transition_bytecode_version: StateTransitionBytecodeVersion, -} - /// Compressed block, without the preceding version byte. #[derive(Clone, Serialize, Deserialize)] struct CompressedBlockPayload { @@ -56,6 +44,19 @@ struct CompressedBlockPayload { transactions: Vec, } + +/// Fuel block header with only the fields required to reconstruct it. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +struct Header { + pub da_height: DaBlockHeight, + pub prev_root: Bytes32, + pub height: BlockHeight, + pub time: Tai64, + pub consensus_parameters_version: ConsensusParametersVersion, + pub state_transition_bytecode_version: StateTransitionBytecodeVersion, +} + + #[cfg(test)] mod tests { use std::collections::HashMap; diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index e35fc1e5fa2..9ae3a346202 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -7,11 +7,11 @@ use crate::{ }, }; use fuel_core_compression::{ + compress::compress, ports::{ TemporalRegistry, UtxoIdToPointer, }, - services::compress::compress, RegistryKeyspace, }; use fuel_core_storage::{ From c06173d5781588d1ddab2ca4a7ea269a9511c3d5 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 20 Sep 2024 16:53:17 +0300 Subject: [PATCH 058/112] Include registrations_root merkle root, but keep it zeroed for now --- crates/compression/README.md | 2 +- crates/compression/src/compress.rs | 6 +++++- crates/compression/src/decompress.rs | 2 ++ crates/compression/src/lib.rs | 10 +++++++--- 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/crates/compression/README.md b/crates/compression/README.md index 8aa0a49c5a6..ae3005b09ad 100644 --- a/crates/compression/README.md +++ b/crates/compression/README.md @@ -14,7 +14,7 @@ exists on only a single uncompressed block. ### Fraud proofs -Compressed block will start with 32 bytes of merkle root over all compression smts, followed by newly registered values along with their keys. Using an SMT provides flexibility around the algorithm we use to define keys without knowing how exactly values were chosen to be registered. +Compressed block will contain a merkle root over all compression smts, followed by newly registered values along with their keys. Using an SMT provides flexibility around the algorithm we use to define keys without knowing how exactly values were chosen to be registered. Each registry also uses an SMT. Since the keys are three bytes long, the depth of the SMT is capped at 24 levels. diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index a86dd6f6097..8ce1bf98620 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -1,7 +1,10 @@ use fuel_core_types::{ blockchain::block::Block, fuel_compression::CompressibleBy, - fuel_tx::Transaction, + fuel_tx::{ + Bytes32, + Transaction, + }, }; use crate::{ @@ -65,6 +68,7 @@ pub async fn compress(db: D, block: &Block) -> Result, Er // Construct the actual compacted block let compact = CompressedBlockPayload { registrations, + registrations_root: Bytes32::default(), // TODO: https://github.com/FuelLabs/fuel-core/issues/2232 header: Header { da_height: block.header().da_height, prev_root: *block.header().prev_root(), diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index a3d91941f7e..600794f38e2 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -52,6 +52,8 @@ pub async fn decompress( // return Err(DecompressError::NotLatest); // } + // TODO: merkle root verification: https://github.com/FuelLabs/fuel-core/issues/2232 + compressed.registrations.write_to_registry(&mut db)?; let ctx = DecompressCtx { db }; diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 288a2149ee6..ec3e14149d1 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -36,15 +36,16 @@ use tables::RegistrationsPerTable; /// Compressed block, without the preceding version byte. #[derive(Clone, Serialize, Deserialize)] struct CompressedBlockPayload { - /// Registration section of the compressed block + /// Temporal registry insertions registrations: RegistrationsPerTable, + /// Merkle root of the temporal registry state + registrations_root: Bytes32, /// Compressed block header header: Header, /// Compressed transactions transactions: Vec, } - /// Fuel block header with only the fields required to reconstruct it. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] struct Header { @@ -56,7 +57,6 @@ struct Header { pub state_transition_bytecode_version: StateTransitionBytecodeVersion, } - #[cfg(test)] mod tests { use std::collections::HashMap; @@ -131,6 +131,7 @@ mod tests { height in 0..u32::MAX, consensus_parameters_version in 0..u32::MAX, state_transition_bytecode_version in 0..u32::MAX, + registrations_root in prop::array::uniform32(0..u8::MAX), registration_inputs in prop::collection::vec( (keyspace_and_value(), prop::num::u16::ANY).prop_map(|((ks, v), rk)| { let k = RegistryKey::try_from(rk as u32).unwrap(); @@ -147,6 +148,7 @@ mod tests { let original = CompressedBlockPayload { registrations: RegistrationsPerTable::try_from(registrations).unwrap(), + registrations_root: registrations_root.into(), header: Header { da_height: da_height.into(), prev_root: prev_root.into(), @@ -164,11 +166,13 @@ mod tests { let CompressedBlockPayload { registrations, + registrations_root, header, transactions, } = decompressed; assert_eq!(registrations, original.registrations); + assert_eq!(registrations_root, original.registrations_root); assert_eq!(header.da_height, da_height.into()); assert_eq!(header.prev_root, prev_root.into()); From 8ac98e5403b6bb04e508d9654a0b87d03e7ce7cc Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 20 Sep 2024 17:01:06 +0300 Subject: [PATCH 059/112] Cleanup, more lints --- Cargo.lock | 20 -------------------- crates/compression/Cargo.toml | 5 ----- crates/compression/src/compress.rs | 2 +- crates/compression/src/lib.rs | 17 +++++++++++------ crates/compression/src/ports.rs | 4 ++++ crates/compression/src/tables.rs | 2 ++ 6 files changed, 18 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 29c5286e048..71c2d7a930b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1108,21 +1108,6 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" -[[package]] -name = "bimap" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" - -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - [[package]] name = "bindgen" version = "0.65.1" @@ -3375,17 +3360,12 @@ name = "fuel-core-compression" version = "0.36.0" dependencies = [ "anyhow", - "async-trait", - "bimap", - "bincode", "fuel-core-types 0.36.0", "postcard", "proptest", "rand", "serde", - "tempfile", "thiserror", - "tokio", ] [[package]] diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 4cf2d090f88..cd47c97ea8c 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -18,7 +18,6 @@ description = "Compression and decompression of Fuel blocks for DA storage." [dependencies] anyhow = { workspace = true } -async-trait = { workspace = true } fuel-core-types = { workspace = true, features = ["serde", "da-compression"] } postcard = { version = "1.0", features = ["use-std"] } rand = { workspace = true, optional = true } @@ -26,11 +25,7 @@ serde = { version = "1.0", features = ["derive"] } thiserror = { workspace = true } [dev-dependencies] -bimap = { version = "0.6" } -bincode = { version = "1.3" } proptest = { workspace = true } -tempfile = "3" -tokio = { workspace = true, features = ["sync"] } [features] test-helpers = [ diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 8ce1bf98620..3a623b65c27 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -68,7 +68,7 @@ pub async fn compress(db: D, block: &Block) -> Result, Er // Construct the actual compacted block let compact = CompressedBlockPayload { registrations, - registrations_root: Bytes32::default(), // TODO: https://github.com/FuelLabs/fuel-core/issues/2232 + registrations_root: Bytes32::default(), /* TODO: https://github.com/FuelLabs/fuel-core/issues/2232 */ header: Header { da_height: block.header().da_height, prev_root: *block.header().prev_root(), diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index ec3e14149d1..6367e50e46d 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -1,3 +1,8 @@ +#![deny(clippy::arithmetic_side_effects)] +#![deny(clippy::cast_possible_truncation)] +#![deny(unused_crate_dependencies)] +#![deny(warnings)] + pub mod compress; pub mod decompress; mod eviction_policy; @@ -49,12 +54,12 @@ struct CompressedBlockPayload { /// Fuel block header with only the fields required to reconstruct it. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] struct Header { - pub da_height: DaBlockHeight, - pub prev_root: Bytes32, - pub height: BlockHeight, - pub time: Tai64, - pub consensus_parameters_version: ConsensusParametersVersion, - pub state_transition_bytecode_version: StateTransitionBytecodeVersion, + da_height: DaBlockHeight, + prev_root: Bytes32, + height: BlockHeight, + time: Tai64, + consensus_parameters_version: ConsensusParametersVersion, + state_transition_bytecode_version: StateTransitionBytecodeVersion, } #[cfg(test)] diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index f66cb8c2034..d70c20c717e 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -42,16 +42,19 @@ pub trait TemporalRegistry { ) -> anyhow::Result>; } +/// Lookup for UTXO pointers used for compression. pub trait UtxoIdToPointer { fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result; } +/// Lookup for history of UTXOs and messages, used for decompression. pub trait HistoryLookup { fn utxo_id(&self, c: CompressedUtxoId) -> anyhow::Result; fn coin(&self, utxo_id: UtxoId) -> anyhow::Result; fn message(&self, nonce: Nonce) -> anyhow::Result; } +/// Information about a coin. #[derive(Debug, Clone)] pub struct CoinInfo { pub owner: Address, @@ -59,6 +62,7 @@ pub struct CoinInfo { pub asset_id: AssetId, } +/// Information about a message. #[derive(Debug, Clone)] pub struct MessageInfo { pub sender: Address, diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index 3c29ee12765..b6350f25e77 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -30,6 +30,7 @@ macro_rules! tables { )* } + #[doc = "A value for each keyspace"] #[derive(Debug, Clone, Default)] pub struct PerRegistryKeyspace { $(pub $name: T,)* @@ -55,6 +56,7 @@ macro_rules! tables { } } + #[doc = "The set of registrations for each table, as used in the compressed block header"] #[derive(Debug, Clone, PartialEq, Default, serde::Serialize, serde::Deserialize)] pub struct RegistrationsPerTable { $(pub $name: Vec<(RegistryKey, $type)>,)* From 454351de830372b4b214af2ef913c71e920575c6 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Sun, 22 Sep 2024 22:43:43 +0300 Subject: [PATCH 060/112] doc link fix --- crates/fuel-core/src/graphql_api/storage.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/fuel-core/src/graphql_api/storage.rs b/crates/fuel-core/src/graphql_api/storage.rs index 585cf917017..f1ed9bc6430 100644 --- a/crates/fuel-core/src/graphql_api/storage.rs +++ b/crates/fuel-core/src/graphql_api/storage.rs @@ -95,7 +95,7 @@ pub enum Column { /// See [`SpentMessages`](messages::SpentMessages) SpentMessages = 13, /// DA compression and postcard serialized blocks. - /// See [`DaCompressedBlocks`](da_compressed::DaCompressedBlocks) + /// See [`DaCompressedBlocks`](da_compression::DaCompressedBlocks) DaCompressedBlocks = 14, /// DA compression metadata. DaCompressionMetadata = 15, From a155b65ab2a0f6736e0d801f2be967321e5ddf5d Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 23 Sep 2024 03:30:46 +0300 Subject: [PATCH 061/112] Feature fix attempt --- crates/compression/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index cd47c97ea8c..5aeebe165ba 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -18,7 +18,7 @@ description = "Compression and decompression of Fuel blocks for DA storage." [dependencies] anyhow = { workspace = true } -fuel-core-types = { workspace = true, features = ["serde", "da-compression"] } +fuel-core-types = { workspace = true, features = ["alloc", "serde", "da-compression"] } postcard = { version = "1.0", features = ["use-std"] } rand = { workspace = true, optional = true } serde = { version = "1.0", features = ["derive"] } From 5cb035cde4f0d7b2e9c22f2c7cb79e517d38ab9f Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 23 Sep 2024 03:39:17 +0300 Subject: [PATCH 062/112] prettify --- crates/compression/Cargo.toml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 5aeebe165ba..5b5975fa5db 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -18,7 +18,11 @@ description = "Compression and decompression of Fuel blocks for DA storage." [dependencies] anyhow = { workspace = true } -fuel-core-types = { workspace = true, features = ["alloc", "serde", "da-compression"] } +fuel-core-types = { workspace = true, features = [ + "alloc", + "serde", + "da-compression" +] } postcard = { version = "1.0", features = ["use-std"] } rand = { workspace = true, optional = true } serde = { version = "1.0", features = ["derive"] } From 465b04c0dd8ddd6d1d3da3205ec657af7c9f36b1 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 23 Sep 2024 12:33:53 +0300 Subject: [PATCH 063/112] prettify more --- crates/compression/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 5b5975fa5db..ea8763654c4 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -21,7 +21,7 @@ anyhow = { workspace = true } fuel-core-types = { workspace = true, features = [ "alloc", "serde", - "da-compression" + "da-compression", ] } postcard = { version = "1.0", features = ["use-std"] } rand = { workspace = true, optional = true } From b615ad698567e68966e712f122508ad9c127dd4e Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 23 Sep 2024 13:39:25 +0300 Subject: [PATCH 064/112] Optimize cache evictor --- crates/compression/src/compress.rs | 6 ++- crates/compression/src/context/compress.rs | 18 ++++--- crates/compression/src/eviction_policy.rs | 22 ++++++--- crates/compression/src/ports.rs | 13 +++++ .../src/graphql_api/da_compression.rs | 32 ++++++++++++ crates/fuel-core/src/graphql_api/ports.rs | 2 + crates/fuel-core/src/graphql_api/storage.rs | 3 ++ .../src/graphql_api/storage/da_compression.rs | 49 ++++++++++++++++--- 8 files changed, 123 insertions(+), 22 deletions(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 3a623b65c27..75f3794e158 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -14,6 +14,7 @@ use crate::{ }, eviction_policy::CacheEvictor, ports::{ + EvictorDb, TemporalRegistry, UtxoIdToPointer, }, @@ -36,7 +37,10 @@ pub enum Error { pub trait CompressDb: TemporalRegistry + UtxoIdToPointer {} impl CompressDb for T where T: TemporalRegistry + UtxoIdToPointer {} -pub async fn compress(db: D, block: &Block) -> Result, Error> { +pub async fn compress( + db: D, + block: &Block, +) -> Result, Error> { // if *block.header().height() != db.next_block_height()? { // return Err(Error::NotLatest); // } diff --git a/crates/compression/src/context/compress.rs b/crates/compression/src/context/compress.rs index c579e87e4e9..1209405c208 100644 --- a/crates/compression/src/context/compress.rs +++ b/crates/compression/src/context/compress.rs @@ -20,6 +20,7 @@ use fuel_core_types::{ use crate::{ compress::CompressDb, eviction_policy::CacheEvictor, + ports::EvictorDb, tables::{ PerRegistryKeyspace, PostcardSerialized, @@ -38,7 +39,10 @@ impl ContextError for CompressCtx { type Error = anyhow::Error; } -fn registry_substitute( +fn registry_substitute< + D: CompressDb + EvictorDb, + T: serde::Serialize + Default + PartialEq, +>( keyspace: RegistryKeyspace, value: &T, ctx: &mut CompressCtx, @@ -52,13 +56,13 @@ fn registry_substitute return Ok(found); } - let key = ctx.cache_evictor.next_key(keyspace)?; + let key = ctx.cache_evictor.next_key(&mut ctx.db, keyspace)?; let old = ctx.changes[keyspace].insert(key, PostcardSerialized::new(value)?); assert!(old.is_none(), "Key collision in registry substitution"); Ok(key) } -impl CompressibleBy> for Address { +impl CompressibleBy> for Address { async fn compress_with( &self, ctx: &mut CompressCtx, @@ -67,7 +71,7 @@ impl CompressibleBy> for Address { } } -impl CompressibleBy> for AssetId { +impl CompressibleBy> for AssetId { async fn compress_with( &self, ctx: &mut CompressCtx, @@ -76,7 +80,7 @@ impl CompressibleBy> for AssetId { } } -impl CompressibleBy> for ContractId { +impl CompressibleBy> for ContractId { async fn compress_with( &self, ctx: &mut CompressCtx, @@ -85,7 +89,7 @@ impl CompressibleBy> for ContractId { } } -impl CompressibleBy> for ScriptCode { +impl CompressibleBy> for ScriptCode { async fn compress_with( &self, ctx: &mut CompressCtx, @@ -94,7 +98,7 @@ impl CompressibleBy> for ScriptCode { } } -impl CompressibleBy> for PredicateCode { +impl CompressibleBy> for PredicateCode { async fn compress_with( &self, ctx: &mut CompressCtx, diff --git a/crates/compression/src/eviction_policy.rs b/crates/compression/src/eviction_policy.rs index be524c97bb1..34f4a937e4e 100644 --- a/crates/compression/src/eviction_policy.rs +++ b/crates/compression/src/eviction_policy.rs @@ -2,9 +2,12 @@ use std::collections::HashSet; use fuel_core_types::fuel_compression::RegistryKey; -use crate::tables::{ - PerRegistryKeyspace, - RegistryKeyspace, +use crate::{ + ports::EvictorDb, + tables::{ + PerRegistryKeyspace, + RegistryKeyspace, + }, }; pub struct CacheEvictor { @@ -14,19 +17,24 @@ pub struct CacheEvictor { impl CacheEvictor { /// Get a key, evicting an old value if necessary - pub fn next_key( + pub fn next_key( &mut self, + db: &mut D, keyspace: RegistryKeyspace, - ) -> anyhow::Result { + ) -> anyhow::Result + where + D: EvictorDb, + { // Pick first key not in the set - // TODO: this can be optimized by keeping a counter of the last key used // TODO: use a proper algo, maybe LRU? - let mut key = RegistryKey::ZERO; + let mut key = db.read_latest(keyspace)?; while self.keep_keys[keyspace].contains(&key) { key = key.next(); assert_ne!(key, RegistryKey::ZERO, "Ran out of keys"); } + db.write_latest(keyspace, key)?; + self.keep_keys[keyspace].insert(key); Ok(key) } diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index d70c20c717e..dd19786fc60 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -70,3 +70,16 @@ pub struct MessageInfo { pub amount: Word, pub data: Vec, } + +/// Temporal registry evictor state storage, +/// currently backed by a `DaCompressionTemporalRegistryEvictor` +/// column in the offchain database. +pub trait EvictorDb { + fn read_latest(&mut self, keyspace: RegistryKeyspace) -> anyhow::Result; + + fn write_latest( + &mut self, + keyspace: RegistryKeyspace, + key: RegistryKey, + ) -> anyhow::Result<()>; +} diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index 9ae3a346202..c487b7788d0 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -3,12 +3,14 @@ use crate::{ graphql_api::storage::da_compression::{ DaCompressedBlocks, DaCompressionTemporalRegistry, + DaCompressionTemporalRegistryEvictor, DaCompressionTemporalRegistryIndex, }, }; use fuel_core_compression::{ compress::compress, ports::{ + EvictorDb, TemporalRegistry, UtxoIdToPointer, }, @@ -19,6 +21,7 @@ use fuel_core_storage::{ StorageAsMut, StorageAsRef, StorageInspect, + StorageMutate, }; use fuel_core_types::{ blockchain::block::Block, @@ -35,6 +38,7 @@ pub fn da_compress_block( where T: OffChainDatabaseTransaction, T: StorageInspect, + T: StorageMutate, { let compressed = compress(CompressTx(transaction, events), block) .now_or_never() @@ -129,3 +133,31 @@ where panic!("UtxoId not found in the block events"); } } + +impl<'a, Tx> EvictorDb for CompressTx<'a, Tx> +where + Tx: OffChainDatabaseTransaction, +{ + fn write_latest( + &mut self, + keyspace: RegistryKeyspace, + key: fuel_core_types::fuel_compression::RegistryKey, + ) -> anyhow::Result<()> { + self.0 + .storage_as_mut::() + .insert(&keyspace, &key)?; + Ok(()) + } + + fn read_latest( + &mut self, + keyspace: RegistryKeyspace, + ) -> anyhow::Result { + Ok(self + .0 + .storage_as_ref::() + .get(&keyspace)? + .ok_or(not_found!(DaCompressionTemporalRegistryEvictor))? + .into_owned()) + } +} diff --git a/crates/fuel-core/src/graphql_api/ports.rs b/crates/fuel-core/src/graphql_api/ports.rs index 3b71b88036d..5cc3f498ab4 100644 --- a/crates/fuel-core/src/graphql_api/ports.rs +++ b/crates/fuel-core/src/graphql_api/ports.rs @@ -281,6 +281,7 @@ pub mod worker { da_compression::{ DaCompressedBlocks, DaCompressionTemporalRegistry, + DaCompressionTemporalRegistryEvictor, DaCompressionTemporalRegistryIndex, }, old::{ @@ -339,6 +340,7 @@ pub mod worker { + StorageMutate + StorageMutate + StorageMutate + + StorageMutate { fn record_tx_id_owner( &mut self, diff --git a/crates/fuel-core/src/graphql_api/storage.rs b/crates/fuel-core/src/graphql_api/storage.rs index f1ed9bc6430..c7818dbe38d 100644 --- a/crates/fuel-core/src/graphql_api/storage.rs +++ b/crates/fuel-core/src/graphql_api/storage.rs @@ -105,6 +105,9 @@ pub enum Column { /// Temporal registry lookup index for DA compression. /// See [`DaCompressionTemporalRegistryIndex`](da_compression::DaCompressionTemporalRegistryIndex) DaCompressionTemporalRegistryIndex = 17, + /// Temporal registry evictor state. + /// See [`DaCompressionTemporalRegistryEvictor`](da_compression::DaCompressionTemporalRegistryEvictor) + DaCompressionTemporalRegistryEvictor = 18, } impl Column { diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression.rs b/crates/fuel-core/src/graphql_api/storage/da_compression.rs index fa5c17e65a5..38ad9e68eae 100644 --- a/crates/fuel-core/src/graphql_api/storage/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/storage/da_compression.rs @@ -56,7 +56,7 @@ pub struct DaCompressionTemporalRegistryIndex; impl Mappable for DaCompressionTemporalRegistryIndex { type Key = Self::OwnedKey; // The second value is a postcard-encoded value, where the original type depends on the keyspace. - // TODO: should we hash the secodn part of this key? + // TODO: should we hash the second part of this key? type OwnedKey = (RegistryKeyspace, Vec); type Value = Self::OwnedValue; type OwnedValue = RegistryKey; @@ -71,20 +71,47 @@ impl TableWithBlueprint for DaCompressionTemporalRegistryIndex { } } +/// This table is used to hold "next key to evict" for each keyspace. +/// In the future we'll likely switch to use LRU or something, in which +/// case this table can be repurposed, iff migrations have been figured out. +pub struct DaCompressionTemporalRegistryEvictor; + +impl Mappable for DaCompressionTemporalRegistryEvictor { + type Key = Self::OwnedKey; + type OwnedKey = RegistryKeyspace; + type Value = Self::OwnedValue; + type OwnedValue = RegistryKey; +} + +impl TableWithBlueprint for DaCompressionTemporalRegistryEvictor { + type Blueprint = Plain; + type Column = super::Column; + + fn column() -> Self::Column { + Self::Column::DaCompressionTemporalRegistryEvictor + } +} + #[cfg(test)] mod tests { use super::*; #[allow(clippy::arithmetic_side_effects)] // Test code, also safe - fn generate_registry_key( - rng: &mut impl rand::Rng, - ) -> (RegistryKeyspace, RegistryKey) { - let keyspace: RegistryKeyspace = rng.gen(); + fn generate_keyspace(rng: &mut impl rand::Rng) -> RegistryKeyspace { + rng.gen() + } + #[allow(clippy::arithmetic_side_effects)] // Test code, also safe + fn generate_raw_key(rng: &mut impl rand::Rng) -> RegistryKey { let raw_key: u32 = rng.gen_range(0..2u32.pow(24) - 2); - let key = RegistryKey::try_from(raw_key).unwrap(); + RegistryKey::try_from(raw_key).unwrap() + } - (keyspace, key) + #[allow(clippy::arithmetic_side_effects)] // Test code, also safe + fn generate_registry_key( + rng: &mut impl rand::Rng, + ) -> (RegistryKeyspace, RegistryKey) { + (generate_keyspace(rng), generate_raw_key(rng)) } fn generate_registry_index_key( @@ -119,4 +146,12 @@ mod tests { RegistryKey::ZERO, generate_registry_index_key ); + + fuel_core_storage::basic_storage_tests!( + DaCompressionTemporalRegistryEvictor, + RegistryKeyspace::address, + RegistryKey::ZERO, + RegistryKey::ZERO, + generate_keyspace + ); } From c334ea24198a286aeaf6bc440b1007d205216248 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 24 Sep 2024 11:22:56 +0300 Subject: [PATCH 065/112] Remove redundant trait bounds --- crates/fuel-core/src/graphql_api/da_compression.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index c487b7788d0..9c84602f366 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -20,8 +20,6 @@ use fuel_core_storage::{ not_found, StorageAsMut, StorageAsRef, - StorageInspect, - StorageMutate, }; use fuel_core_types::{ blockchain::block::Block, @@ -37,8 +35,6 @@ pub fn da_compress_block( ) -> anyhow::Result<()> where T: OffChainDatabaseTransaction, - T: StorageInspect, - T: StorageMutate, { let compressed = compress(CompressTx(transaction, events), block) .now_or_never() @@ -56,7 +52,6 @@ struct CompressTx<'a, Tx>(&'a mut Tx, &'a [Event]); impl<'a, Tx> TemporalRegistry for CompressTx<'a, Tx> where Tx: OffChainDatabaseTransaction, - Tx: StorageInspect, { fn read_registry( &self, From 50d53d14249a22865eaa343aff6ceff7b1ff8745 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 25 Sep 2024 12:28:28 +0300 Subject: [PATCH 066/112] PR review: cleanup --- .../src/graphql_api/da_compression.rs | 39 ++++++++++++------- .../src/graphql_api/worker_service.rs | 4 +- crates/fuel-core/src/service/adapters.rs | 1 - .../src/service/adapters/da_compression.rs | 1 - 4 files changed, 26 insertions(+), 19 deletions(-) delete mode 100644 crates/fuel-core/src/service/adapters/da_compression.rs diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index 9c84602f366..1dafb309997 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -30,24 +30,33 @@ use futures::FutureExt; /// Performs DA compression for a block and stores it in the database. pub fn da_compress_block( block: &Block, - events: &[Event], - transaction: &mut T, + block_events: &[Event], + db_tx: &mut T, ) -> anyhow::Result<()> where T: OffChainDatabaseTransaction, { - let compressed = compress(CompressTx(transaction, events), block) - .now_or_never() - .expect("The current implementation resolved all futures instantly")?; + let compressed = compress( + CompressTx { + db_tx, + block_events, + }, + block, + ) + .now_or_never() + .expect("The current implementation resolved all futures instantly")?; - transaction + db_tx .storage_as_mut::() .insert(&block.header().consensus().height, &compressed)?; Ok(()) } -struct CompressTx<'a, Tx>(&'a mut Tx, &'a [Event]); +struct CompressTx<'a, Tx> { + db_tx: &'a mut Tx, + block_events: &'a [Event], +} impl<'a, Tx> TemporalRegistry for CompressTx<'a, Tx> where @@ -59,7 +68,7 @@ where key: fuel_core_types::fuel_compression::RegistryKey, ) -> anyhow::Result> { Ok(self - .0 + .db_tx .storage_as_ref::() .get(&(keyspace, key))? .ok_or(not_found!(DaCompressionTemporalRegistry))? @@ -73,17 +82,17 @@ where value: Vec, ) -> anyhow::Result<()> { // Write the actual value - self.0 + self.db_tx .storage_as_mut::() .insert(&(keyspace, key), &value)?; // Remove the overwritten value from index, if any - self.0 + self.db_tx .storage_as_mut::() .remove(&(keyspace, value.clone()))?; // Add the new value to the index - self.0 + self.db_tx .storage_as_mut::() .insert(&(keyspace, value), &key)?; @@ -96,7 +105,7 @@ where value: Vec, ) -> anyhow::Result> { Ok(self - .0 + .db_tx .storage_as_ref::() .get(&(keyspace, value))? .map(|v| v.into_owned())) @@ -111,7 +120,7 @@ where &self, utxo_id: fuel_core_types::fuel_tx::UtxoId, ) -> anyhow::Result { - for event in self.1 { + for event in self.block_events { match event { Event::CoinCreated(coin) | Event::CoinConsumed(coin) if coin.utxo_id == utxo_id => @@ -138,7 +147,7 @@ where keyspace: RegistryKeyspace, key: fuel_core_types::fuel_compression::RegistryKey, ) -> anyhow::Result<()> { - self.0 + self.db_tx .storage_as_mut::() .insert(&keyspace, &key)?; Ok(()) @@ -149,7 +158,7 @@ where keyspace: RegistryKeyspace, ) -> anyhow::Result { Ok(self - .0 + .db_tx .storage_as_ref::() .get(&keyspace)? .ok_or(not_found!(DaCompressionTemporalRegistryEvictor))? diff --git a/crates/fuel-core/src/graphql_api/worker_service.rs b/crates/fuel-core/src/graphql_api/worker_service.rs index 9d74d17731a..98aeffb105f 100644 --- a/crates/fuel-core/src/graphql_api/worker_service.rs +++ b/crates/fuel-core/src/graphql_api/worker_service.rs @@ -119,10 +119,10 @@ pub struct Task { continue_on_error: bool, } -impl<'a, TxPool, D> Task +impl Task where TxPool: ports::worker::TxPool, - D: ports::worker::OffChainDatabase + 'a, + D: ports::worker::OffChainDatabase, { fn process_block(&mut self, result: SharedImportResult) -> anyhow::Result<()> { let block = &result.sealed_block.entity; diff --git a/crates/fuel-core/src/service/adapters.rs b/crates/fuel-core/src/service/adapters.rs index 8dfe7e2e98c..7c356611967 100644 --- a/crates/fuel-core/src/service/adapters.rs +++ b/crates/fuel-core/src/service/adapters.rs @@ -49,7 +49,6 @@ use crate::{ pub mod block_importer; pub mod consensus_module; pub mod consensus_parameters_provider; -pub mod da_compression; pub mod executor; pub mod fuel_gas_price_provider; pub mod gas_price_adapters; diff --git a/crates/fuel-core/src/service/adapters/da_compression.rs b/crates/fuel-core/src/service/adapters/da_compression.rs deleted file mode 100644 index 8b137891791..00000000000 --- a/crates/fuel-core/src/service/adapters/da_compression.rs +++ /dev/null @@ -1 +0,0 @@ - From e0ddd856223ff88118f2aea320962f80ddaf3cc0 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 25 Sep 2024 12:33:43 +0300 Subject: [PATCH 067/112] Rewrite "we have enough keys" assertion in registry evictor --- crates/compression/src/eviction_policy.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/compression/src/eviction_policy.rs b/crates/compression/src/eviction_policy.rs index 34f4a937e4e..d57fef0ba3a 100644 --- a/crates/compression/src/eviction_policy.rs +++ b/crates/compression/src/eviction_policy.rs @@ -28,9 +28,11 @@ impl CacheEvictor { // Pick first key not in the set // TODO: use a proper algo, maybe LRU? let mut key = db.read_latest(keyspace)?; + + debug_assert!(self.keep_keys[keyspace].len() < 2usize.pow(24).saturating_sub(2)); + while self.keep_keys[keyspace].contains(&key) { key = key.next(); - assert_ne!(key, RegistryKey::ZERO, "Ran out of keys"); } db.write_latest(keyspace, key)?; From e0e505de032427bb941a48a9526558f5ba6f97e2 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 25 Sep 2024 14:16:09 +0300 Subject: [PATCH 068/112] Get rid of PostcardSerialized --- Cargo.lock | 1 + crates/compression/Cargo.toml | 1 + crates/compression/src/context/compress.rs | 85 +---------- crates/compression/src/context/decompress.rs | 74 +--------- crates/compression/src/context/prepare.rs | 66 +-------- crates/compression/src/lib.rs | 42 +++--- crates/compression/src/ports.rs | 13 +- crates/compression/src/tables.rs | 138 +++++++++++++++--- .../src/graphql_api/da_compression.rs | 17 +-- .../src/graphql_api/storage/da_compression.rs | 50 ++++--- 10 files changed, 190 insertions(+), 297 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 71c2d7a930b..070bc2db0fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3365,6 +3365,7 @@ dependencies = [ "proptest", "rand", "serde", + "strum 0.26.3", "thiserror", ] diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index ea8763654c4..c2a759d1223 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -26,6 +26,7 @@ fuel-core-types = { workspace = true, features = [ postcard = { version = "1.0", features = ["use-std"] } rand = { workspace = true, optional = true } serde = { version = "1.0", features = ["derive"] } +strum = { version = "0.26", features = ["derive"] } thiserror = { workspace = true } [dev-dependencies] diff --git a/crates/compression/src/context/compress.rs b/crates/compression/src/context/compress.rs index 1209405c208..e76557ededa 100644 --- a/crates/compression/src/context/compress.rs +++ b/crates/compression/src/context/compress.rs @@ -1,18 +1,10 @@ -use std::collections::HashMap; - use fuel_core_types::{ fuel_compression::{ CompressibleBy, ContextError, - RegistryKey, }, fuel_tx::{ - input::PredicateCode, - Address, - AssetId, CompressedUtxoId, - ContractId, - ScriptCode, UtxoId, }, }; @@ -20,93 +12,20 @@ use fuel_core_types::{ use crate::{ compress::CompressDb, eviction_policy::CacheEvictor, - ports::EvictorDb, - tables::{ - PerRegistryKeyspace, - PostcardSerialized, - RegistryKeyspace, - }, + tables::PerRegistryKeyspaceMap, }; pub struct CompressCtx { pub db: D, pub cache_evictor: CacheEvictor, /// Changes to the temporary registry, to be included in the compressed block header - pub changes: PerRegistryKeyspace>, + pub changes: PerRegistryKeyspaceMap, } impl ContextError for CompressCtx { type Error = anyhow::Error; } -fn registry_substitute< - D: CompressDb + EvictorDb, - T: serde::Serialize + Default + PartialEq, ->( - keyspace: RegistryKeyspace, - value: &T, - ctx: &mut CompressCtx, -) -> anyhow::Result { - if *value == T::default() { - return Ok(RegistryKey::DEFAULT_VALUE); - } - - let ser_value = postcard::to_stdvec(value)?; - if let Some(found) = ctx.db.registry_index_lookup(keyspace, ser_value)? { - return Ok(found); - } - - let key = ctx.cache_evictor.next_key(&mut ctx.db, keyspace)?; - let old = ctx.changes[keyspace].insert(key, PostcardSerialized::new(value)?); - assert!(old.is_none(), "Key collision in registry substitution"); - Ok(key) -} - -impl CompressibleBy> for Address { - async fn compress_with( - &self, - ctx: &mut CompressCtx, - ) -> anyhow::Result { - registry_substitute(RegistryKeyspace::address, self, ctx) - } -} - -impl CompressibleBy> for AssetId { - async fn compress_with( - &self, - ctx: &mut CompressCtx, - ) -> anyhow::Result { - registry_substitute(RegistryKeyspace::asset_id, self, ctx) - } -} - -impl CompressibleBy> for ContractId { - async fn compress_with( - &self, - ctx: &mut CompressCtx, - ) -> anyhow::Result { - registry_substitute(RegistryKeyspace::contract_id, self, ctx) - } -} - -impl CompressibleBy> for ScriptCode { - async fn compress_with( - &self, - ctx: &mut CompressCtx, - ) -> anyhow::Result { - registry_substitute(RegistryKeyspace::script_code, self, ctx) - } -} - -impl CompressibleBy> for PredicateCode { - async fn compress_with( - &self, - ctx: &mut CompressCtx, - ) -> anyhow::Result { - registry_substitute(RegistryKeyspace::script_code, self, ctx) - } -} - impl CompressibleBy> for UtxoId { async fn compress_with( &self, diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs index d9aec760bdf..2482f5e84cb 100644 --- a/crates/compression/src/context/decompress.rs +++ b/crates/compression/src/context/decompress.rs @@ -4,7 +4,6 @@ use fuel_core_types::{ ContextError, Decompress, DecompressibleBy, - RegistryKey, }, fuel_tx::{ input::{ @@ -17,25 +16,17 @@ use fuel_core_types::{ MessageSpecification, }, AsField, - PredicateCode, }, - Address, - AssetId, CompressedUtxoId, - ContractId, Mint, - ScriptCode, Transaction, UtxoId, }, }; -use crate::{ - decompress::{ - DecompressDb, - DecompressError, - }, - tables::RegistryKeyspace, +use crate::decompress::{ + DecompressDb, + DecompressError, }; pub struct DecompressCtx { @@ -46,65 +37,6 @@ impl ContextError for DecompressCtx { type Error = DecompressError; } -fn registry_desubstitute< - D: DecompressDb, - T: serde::de::DeserializeOwned + Default + PartialEq, ->( - keyspace: RegistryKeyspace, - key: RegistryKey, - ctx: &DecompressCtx, -) -> Result { - if key == RegistryKey::DEFAULT_VALUE { - return Ok(T::default()); - } - Ok(postcard::from_bytes(&ctx.db.read_registry(keyspace, key)?)?) -} - -impl DecompressibleBy> for Address { - async fn decompress_with( - c: RegistryKey, - ctx: &DecompressCtx, - ) -> Result { - registry_desubstitute(RegistryKeyspace::address, c, ctx) - } -} - -impl DecompressibleBy> for AssetId { - async fn decompress_with( - c: RegistryKey, - ctx: &DecompressCtx, - ) -> Result { - registry_desubstitute(RegistryKeyspace::asset_id, c, ctx) - } -} - -impl DecompressibleBy> for ContractId { - async fn decompress_with( - c: RegistryKey, - ctx: &DecompressCtx, - ) -> Result { - registry_desubstitute(RegistryKeyspace::contract_id, c, ctx) - } -} - -impl DecompressibleBy> for ScriptCode { - async fn decompress_with( - c: RegistryKey, - ctx: &DecompressCtx, - ) -> Result { - registry_desubstitute(RegistryKeyspace::script_code, c, ctx) - } -} - -impl DecompressibleBy> for PredicateCode { - async fn decompress_with( - c: RegistryKey, - ctx: &DecompressCtx, - ) -> Result { - registry_desubstitute(RegistryKeyspace::predicate_code, c, ctx) - } -} - impl DecompressibleBy> for UtxoId { async fn decompress_with( c: CompressedUtxoId, diff --git a/crates/compression/src/context/prepare.rs b/crates/compression/src/context/prepare.rs index eccf02c1781..368aedcd58f 100644 --- a/crates/compression/src/context/prepare.rs +++ b/crates/compression/src/context/prepare.rs @@ -8,14 +8,10 @@ use fuel_core_types::{ }, fuel_tx::*, }; -use input::PredicateCode; use crate::{ compress::CompressDb, - tables::{ - PerRegistryKeyspace, - RegistryKeyspace, - }, + tables::PerRegistryKeyspace, }; /// Preparation pass through the block to collect all keys accessed during compression. @@ -31,66 +27,6 @@ impl ContextError for PrepareCtx { type Error = anyhow::Error; } -fn registry_prepare( - keyspace: RegistryKeyspace, - value: &T, - ctx: &mut PrepareCtx, -) -> anyhow::Result { - if *value == T::default() { - return Ok(RegistryKey::ZERO); - } - let value = postcard::to_stdvec(value)?; - if let Some(found) = ctx.db.registry_index_lookup(keyspace, value)? { - ctx.accessed_keys[keyspace].insert(found); - } - Ok(RegistryKey::ZERO) -} - -impl CompressibleBy> for Address { - async fn compress_with( - &self, - ctx: &mut PrepareCtx, - ) -> anyhow::Result { - registry_prepare(RegistryKeyspace::address, self, ctx) - } -} - -impl CompressibleBy> for AssetId { - async fn compress_with( - &self, - ctx: &mut PrepareCtx, - ) -> anyhow::Result { - registry_prepare(RegistryKeyspace::asset_id, self, ctx) - } -} - -impl CompressibleBy> for ContractId { - async fn compress_with( - &self, - ctx: &mut PrepareCtx, - ) -> anyhow::Result { - registry_prepare(RegistryKeyspace::contract_id, self, ctx) - } -} - -impl CompressibleBy> for ScriptCode { - async fn compress_with( - &self, - ctx: &mut PrepareCtx, - ) -> anyhow::Result { - registry_prepare(RegistryKeyspace::script_code, self, ctx) - } -} - -impl CompressibleBy> for PredicateCode { - async fn compress_with( - &self, - ctx: &mut PrepareCtx, - ) -> anyhow::Result { - registry_prepare(RegistryKeyspace::script_code, self, ctx) - } -} - impl CompressibleBy> for UtxoId { async fn compress_with( &self, diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 6367e50e46d..a386540da80 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -14,7 +14,10 @@ mod context { pub mod prepare; } -pub use tables::RegistryKeyspace; +pub use tables::{ + RegistryKeyspace, + RegistryKeyspaceValue, +}; use serde::{ Deserialize, @@ -64,8 +67,6 @@ struct Header { #[cfg(test)] mod tests { - use std::collections::HashMap; - use fuel_core_types::{ fuel_compression::RegistryKey, fuel_tx::{ @@ -77,10 +78,7 @@ mod tests { }, }; use proptest::prelude::*; - use tables::{ - PerRegistryKeyspace, - PostcardSerialized, - }; + use tables::PerRegistryKeyspaceMap; use super::*; @@ -94,35 +92,31 @@ mod tests { ] } - fn keyspace_and_value( - ) -> impl Strategy { + fn keyspace_value() -> impl Strategy { (keyspace(), prop::array::uniform32(0..u8::MAX)).prop_map(|(keyspace, value)| { - let value = match keyspace { + match keyspace { RegistryKeyspace::address => { - PostcardSerialized::new(Address::new(value)).unwrap() + RegistryKeyspaceValue::address(Address::new(value)) } RegistryKeyspace::asset_id => { - PostcardSerialized::new(AssetId::new(value)).unwrap() + RegistryKeyspaceValue::asset_id(AssetId::new(value)) } RegistryKeyspace::contract_id => { - PostcardSerialized::new(ContractId::new(value)).unwrap() + RegistryKeyspaceValue::contract_id(ContractId::new(value)) } RegistryKeyspace::script_code => { let len = (value[0] % 32) as usize; - PostcardSerialized::new(ScriptCode { + RegistryKeyspaceValue::script_code(ScriptCode { bytes: value[..len].to_vec(), }) - .unwrap() } RegistryKeyspace::predicate_code => { let len = (value[0] % 32) as usize; - PostcardSerialized::new(PredicateCode { + RegistryKeyspaceValue::predicate_code(PredicateCode { bytes: value[..len].to_vec(), }) - .unwrap() } - }; - (keyspace, value) + } }) } @@ -138,17 +132,17 @@ mod tests { state_transition_bytecode_version in 0..u32::MAX, registrations_root in prop::array::uniform32(0..u8::MAX), registration_inputs in prop::collection::vec( - (keyspace_and_value(), prop::num::u16::ANY).prop_map(|((ks, v), rk)| { + (keyspace_value(), prop::num::u16::ANY).prop_map(|(v, rk)| { let k = RegistryKey::try_from(rk as u32).unwrap(); - (ks, (k, v)) + (k, v) }), 0..123 ), ) { - let mut registrations: PerRegistryKeyspace> = Default::default(); + let mut registrations: PerRegistryKeyspaceMap = Default::default(); - for (keyspace, (key, data)) in registration_inputs { - registrations[keyspace].insert(key, data); + for (key, ksv) in registration_inputs { + registrations.insert(key, ksv); } let original = CompressedBlockPayload { diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index dd19786fc60..de0701dba0d 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -12,7 +12,10 @@ use fuel_core_types::{ fuel_types::Nonce, }; -use crate::tables::RegistryKeyspace; +use crate::tables::{ + RegistryKeyspace, + RegistryKeyspaceValue, +}; /// Rolling cache for compression. /// Holds the latest state which can be event sourced from the compressed blocks. @@ -24,21 +27,19 @@ pub trait TemporalRegistry { &self, keyspace: RegistryKeyspace, key: RegistryKey, - ) -> anyhow::Result>; + ) -> anyhow::Result; /// Reads a value from the registry at its current height. fn write_registry( &mut self, - keyspace: RegistryKeyspace, key: RegistryKey, - value: Vec, + value: RegistryKeyspaceValue, ) -> anyhow::Result<()>; /// Lookup registry key by the value. fn registry_index_lookup( &self, - keyspace: RegistryKeyspace, - value: Vec, + value: RegistryKeyspaceValue, ) -> anyhow::Result>; } diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index b6350f25e77..cc57ed29959 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -1,35 +1,63 @@ use fuel_core_types::{ - fuel_compression::RegistryKey, + fuel_compression::{ + CompressibleBy, + DecompressibleBy, + RegistryKey, + }, fuel_tx::{ + input::PredicateCode, Address, AssetId, ContractId, + ScriptCode, }, }; use std::collections::HashMap; -use crate::ports::TemporalRegistry; - -/// Type-erased (serialized) data -#[derive(Debug, Clone)] -pub struct PostcardSerialized(Vec); -impl PostcardSerialized { - pub(crate) fn new(value: T) -> anyhow::Result { - Ok(Self(postcard::to_stdvec(&value)?)) - } -} +use crate::{ + compress::CompressDb, + context::{ + compress::CompressCtx, + decompress::DecompressCtx, + prepare::PrepareCtx, + }, + decompress::{ + DecompressDb, + DecompressError, + }, + ports::{ + EvictorDb, + TemporalRegistry, + }, +}; macro_rules! tables { ($($name:ident: $type:ty),*$(,)?) => { #[doc = "RegistryKey namespaces"] - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] + #[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] #[allow(non_camel_case_types)] // Match names in structs exactly - pub enum RegistryKeyspace { + #[derive(strum::EnumDiscriminants)] + #[strum_discriminants(name(RegistryKeyspace))] + #[strum_discriminants(derive(Hash, serde::Serialize, serde::Deserialize))] + #[strum_discriminants(allow(non_camel_case_types))] + pub enum RegistryKeyspaceValue { $( - $name, + $name($type), )* } + + impl RegistryKeyspaceValue { + pub fn keyspace(&self) -> RegistryKeyspace { + match self { + $( + RegistryKeyspaceValue::$name(_) => RegistryKeyspace::$name, + )* + } + } + } + + #[doc = "A value for each keyspace"] #[derive(Debug, Clone, Default)] pub struct PerRegistryKeyspace { @@ -56,20 +84,39 @@ macro_rules! tables { } } + #[doc = "Key-value mapping for each keyspace"] + #[derive(Debug, Clone, Default)] + pub struct PerRegistryKeyspaceMap { + $(pub $name: HashMap,)* + } + + impl PerRegistryKeyspaceMap { + #[cfg(test)] + pub fn insert(&mut self, key: RegistryKey, value: RegistryKeyspaceValue) { + match value { + $( + RegistryKeyspaceValue::$name(value) => { + self.$name.insert(key, value); + } + )* + } + } + } + #[doc = "The set of registrations for each table, as used in the compressed block header"] #[derive(Debug, Clone, PartialEq, Default, serde::Serialize, serde::Deserialize)] pub struct RegistrationsPerTable { $(pub $name: Vec<(RegistryKey, $type)>,)* } - impl TryFrom>> for RegistrationsPerTable { + impl TryFrom for RegistrationsPerTable { type Error = anyhow::Error; - fn try_from(value: PerRegistryKeyspace>) -> Result { + fn try_from(value: PerRegistryKeyspaceMap) -> Result { let mut result = Self::default(); $( for (key, value) in value.$name.into_iter() { - result.$name.push((key, postcard::from_bytes(&value.0)?)); + result.$name.push((key, value)); } )* Ok(result) @@ -80,13 +127,64 @@ macro_rules! tables { pub(crate) fn write_to_registry(&self, registry: &mut R) -> anyhow::Result<()> { $( for (key, value) in self.$name.iter() { - registry.write_registry(RegistryKeyspace::$name, *key, postcard::to_stdvec(value)?)?; + registry.write_registry(*key, RegistryKeyspaceValue::$name(value.clone()))?; } )* Ok(()) } } + + $( + impl CompressibleBy> for $type { + async fn compress_with( + &self, + ctx: &mut PrepareCtx, + ) -> anyhow::Result { + if *self == <$type>::default() { + return Ok(RegistryKey::ZERO); + } + if let Some(found) = ctx.db.registry_index_lookup(RegistryKeyspaceValue::$name(self.clone()))? { + ctx.accessed_keys[RegistryKeyspace::$name].insert(found); + } + Ok(RegistryKey::ZERO) + } + } + + impl CompressibleBy> for $type { + async fn compress_with( + &self, + ctx: &mut CompressCtx, + ) -> anyhow::Result { + if *self == Default::default() { + return Ok(RegistryKey::DEFAULT_VALUE); + } + if let Some(found) = ctx.db.registry_index_lookup(RegistryKeyspaceValue::$name(self.clone()))? { + return Ok(found); + } + + let key = ctx.cache_evictor.next_key(&mut ctx.db, RegistryKeyspace::$name)?; + let old = ctx.changes.$name.insert(key, self.clone()); + assert!(old.is_none(), "Key collision in registry substitution"); + Ok(key) + } + } + + impl DecompressibleBy> for $type { + async fn decompress_with( + key: RegistryKey, + ctx: &DecompressCtx, + ) -> Result { + if key == RegistryKey::DEFAULT_VALUE { + return Ok(<$type>::default()); + } + match ctx.db.read_registry(RegistryKeyspace::$name, key)? { + RegistryKeyspaceValue::$name(value) => Ok(value), + _ => panic!("Registry returned incorrectly-typed value") + } + } + } + )* }; } @@ -94,8 +192,8 @@ tables!( address: Address, asset_id: AssetId, contract_id: ContractId, - script_code: Vec, - predicate_code: Vec, + script_code: ScriptCode, + predicate_code: PredicateCode, ); // TODO: move inside the macro when this stabilizes: https://github.com/rust-lang/rust/pull/122808 diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index 1dafb309997..53c6ee59bb6 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -15,6 +15,7 @@ use fuel_core_compression::{ UtxoIdToPointer, }, RegistryKeyspace, + RegistryKeyspaceValue, }; use fuel_core_storage::{ not_found, @@ -66,7 +67,7 @@ where &self, keyspace: RegistryKeyspace, key: fuel_core_types::fuel_compression::RegistryKey, - ) -> anyhow::Result> { + ) -> anyhow::Result { Ok(self .db_tx .storage_as_ref::() @@ -77,37 +78,35 @@ where fn write_registry( &mut self, - keyspace: RegistryKeyspace, key: fuel_core_types::fuel_compression::RegistryKey, - value: Vec, + value: RegistryKeyspaceValue, ) -> anyhow::Result<()> { // Write the actual value self.db_tx .storage_as_mut::() - .insert(&(keyspace, key), &value)?; + .insert(&(value.keyspace(), key), &value)?; // Remove the overwritten value from index, if any self.db_tx .storage_as_mut::() - .remove(&(keyspace, value.clone()))?; + .remove(&value)?; // Add the new value to the index self.db_tx .storage_as_mut::() - .insert(&(keyspace, value), &key)?; + .insert(&value, &key)?; Ok(()) } fn registry_index_lookup( &self, - keyspace: RegistryKeyspace, - value: Vec, + value: RegistryKeyspaceValue, ) -> anyhow::Result> { Ok(self .db_tx .storage_as_ref::() - .get(&(keyspace, value))? + .get(&value)? .map(|v| v.into_owned())) } } diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression.rs b/crates/fuel-core/src/graphql_api/storage/da_compression.rs index 38ad9e68eae..56d5cabcb83 100644 --- a/crates/fuel-core/src/graphql_api/storage/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/storage/da_compression.rs @@ -1,4 +1,7 @@ -use fuel_core_compression::RegistryKeyspace; +use fuel_core_compression::{ + RegistryKeyspace, + RegistryKeyspaceValue, +}; use fuel_core_storage::{ blueprint::plain::Plain, codec::{ @@ -38,12 +41,11 @@ impl Mappable for DaCompressionTemporalRegistry { type Key = Self::OwnedKey; type OwnedKey = (RegistryKeyspace, RegistryKey); type Value = Self::OwnedValue; - // This a postcard-encoded value, where the original type depends on the keyspace. - type OwnedValue = Vec; + type OwnedValue = RegistryKeyspaceValue; } impl TableWithBlueprint for DaCompressionTemporalRegistry { - type Blueprint = Plain; + type Blueprint = Plain; type Column = super::Column; fn column() -> Self::Column { @@ -55,9 +57,8 @@ pub struct DaCompressionTemporalRegistryIndex; impl Mappable for DaCompressionTemporalRegistryIndex { type Key = Self::OwnedKey; - // The second value is a postcard-encoded value, where the original type depends on the keyspace. - // TODO: should we hash the second part of this key? - type OwnedKey = (RegistryKeyspace, Vec); + // TODO: should we hash the key? + type OwnedKey = RegistryKeyspaceValue; type Value = Self::OwnedValue; type OwnedValue = RegistryKey; } @@ -94,35 +95,46 @@ impl TableWithBlueprint for DaCompressionTemporalRegistryEvictor { #[cfg(test)] mod tests { + use fuel_core_types::fuel_tx::{ + input::PredicateCode, + ScriptCode, + }; + use super::*; - #[allow(clippy::arithmetic_side_effects)] // Test code, also safe fn generate_keyspace(rng: &mut impl rand::Rng) -> RegistryKeyspace { rng.gen() } - #[allow(clippy::arithmetic_side_effects)] // Test code, also safe + #[allow(clippy::arithmetic_side_effects)] // Test code, and also safe fn generate_raw_key(rng: &mut impl rand::Rng) -> RegistryKey { let raw_key: u32 = rng.gen_range(0..2u32.pow(24) - 2); RegistryKey::try_from(raw_key).unwrap() } - #[allow(clippy::arithmetic_side_effects)] // Test code, also safe fn generate_registry_key( rng: &mut impl rand::Rng, ) -> (RegistryKeyspace, RegistryKey) { (generate_keyspace(rng), generate_raw_key(rng)) } - fn generate_registry_index_key( - rng: &mut impl rand::Rng, - ) -> (RegistryKeyspace, Vec) { - let keyspace: RegistryKeyspace = rng.gen(); - + fn generate_registry_index_key(rng: &mut impl rand::Rng) -> RegistryKeyspaceValue { let mut bytes: Vec = vec![0u8; rng.gen_range(0..1234)]; rng.fill(bytes.as_mut_slice()); - (keyspace, bytes) + match rng.gen() { + RegistryKeyspace::address => RegistryKeyspaceValue::address(rng.gen()), + RegistryKeyspace::asset_id => RegistryKeyspaceValue::asset_id(rng.gen()), + RegistryKeyspace::contract_id => { + RegistryKeyspaceValue::contract_id(rng.gen()) + } + RegistryKeyspace::script_code => { + RegistryKeyspaceValue::script_code(ScriptCode { bytes }) + } + RegistryKeyspace::predicate_code => { + RegistryKeyspaceValue::predicate_code(PredicateCode { bytes }) + } + } } fuel_core_storage::basic_storage_tests!( @@ -134,14 +146,14 @@ mod tests { fuel_core_storage::basic_storage_tests!( DaCompressionTemporalRegistry, (RegistryKeyspace::address, RegistryKey::ZERO), - ::Value::default(), - ::Value::default(), + RegistryKeyspaceValue::address(fuel_core_types::fuel_tx::Address::zeroed()), + RegistryKeyspaceValue::address(fuel_core_types::fuel_tx::Address::zeroed()), generate_registry_key ); fuel_core_storage::basic_storage_tests!( DaCompressionTemporalRegistryIndex, - (RegistryKeyspace::address, Vec::default()), + RegistryKeyspaceValue::address(fuel_core_types::fuel_tx::Address::zeroed()), RegistryKey::ZERO, RegistryKey::ZERO, generate_registry_index_key From 5cf05e17c9750e5f68fd570d6bbc883c8c24d0dc Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 25 Sep 2024 14:55:28 +0300 Subject: [PATCH 069/112] Split TemporalRegistry trait to be per keyspace --- crates/compression/src/compress.rs | 6 +- crates/compression/src/decompress.rs | 12 +- crates/compression/src/ports.rs | 24 +--- crates/compression/src/tables.rs | 22 ++-- .../src/graphql_api/da_compression.rs | 117 +++++++++++------- 5 files changed, 99 insertions(+), 82 deletions(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 75f3794e158..fb8e0e42fb9 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -15,12 +15,12 @@ use crate::{ eviction_policy::CacheEvictor, ports::{ EvictorDb, - TemporalRegistry, UtxoIdToPointer, }, tables::{ PerRegistryKeyspace, RegistrationsPerTable, + TemporalRegistryAll, }, CompressedBlockPayload, Header, @@ -34,8 +34,8 @@ pub enum Error { Other(#[from] anyhow::Error), } -pub trait CompressDb: TemporalRegistry + UtxoIdToPointer {} -impl CompressDb for T where T: TemporalRegistry + UtxoIdToPointer {} +pub trait CompressDb: TemporalRegistryAll + UtxoIdToPointer {} +impl CompressDb for T where T: TemporalRegistryAll + UtxoIdToPointer {} pub async fn compress( db: D, diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index 600794f38e2..e6db1f5dae1 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -14,10 +14,8 @@ use fuel_core_types::{ use crate::{ context::decompress::DecompressCtx, - ports::{ - HistoryLookup, - TemporalRegistry, - }, + ports::HistoryLookup, + tables::TemporalRegistryAll, CompressedBlockPayload, }; @@ -34,10 +32,10 @@ pub enum DecompressError { Other(#[from] anyhow::Error), } -pub trait DecompressDb: TemporalRegistry + HistoryLookup {} -impl DecompressDb for T where T: TemporalRegistry + HistoryLookup {} +pub trait DecompressDb: TemporalRegistryAll + HistoryLookup {} +impl DecompressDb for T where T: TemporalRegistryAll + HistoryLookup {} -pub async fn decompress( +pub async fn decompress( mut db: D, block: Vec, ) -> Result { diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index de0701dba0d..500b2eb940c 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -12,35 +12,21 @@ use fuel_core_types::{ fuel_types::Nonce, }; -use crate::tables::{ - RegistryKeyspace, - RegistryKeyspaceValue, -}; +use crate::tables::RegistryKeyspace; /// Rolling cache for compression. /// Holds the latest state which can be event sourced from the compressed blocks. /// The changes done using this trait in a single call to `compress` or `decompress` /// must be committed atomically, after which block height must be incremented. -pub trait TemporalRegistry { +pub trait TemporalRegistry { /// Reads a value from the registry at its current height. - fn read_registry( - &self, - keyspace: RegistryKeyspace, - key: RegistryKey, - ) -> anyhow::Result; + fn read_registry(&self, key: RegistryKey) -> anyhow::Result; /// Reads a value from the registry at its current height. - fn write_registry( - &mut self, - key: RegistryKey, - value: RegistryKeyspaceValue, - ) -> anyhow::Result<()>; + fn write_registry(&mut self, key: RegistryKey, value: T) -> anyhow::Result<()>; /// Lookup registry key by the value. - fn registry_index_lookup( - &self, - value: RegistryKeyspaceValue, - ) -> anyhow::Result>; + fn registry_index_lookup(&self, value: &T) -> anyhow::Result>; } /// Lookup for UTXO pointers used for compression. diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index cc57ed29959..f861ed55916 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -123,11 +123,20 @@ macro_rules! tables { } } + pub trait TemporalRegistryAll: Sized $( + + TemporalRegistry<$type> + )* {} + + impl TemporalRegistryAll for T where T: Sized $( + + TemporalRegistry<$type> + )* {} + + impl RegistrationsPerTable { - pub(crate) fn write_to_registry(&self, registry: &mut R) -> anyhow::Result<()> { + pub(crate) fn write_to_registry(&self, registry: &mut R) -> anyhow::Result<()> { $( for (key, value) in self.$name.iter() { - registry.write_registry(*key, RegistryKeyspaceValue::$name(value.clone()))?; + registry.write_registry(*key, value.clone())?; } )* @@ -144,7 +153,7 @@ macro_rules! tables { if *self == <$type>::default() { return Ok(RegistryKey::ZERO); } - if let Some(found) = ctx.db.registry_index_lookup(RegistryKeyspaceValue::$name(self.clone()))? { + if let Some(found) = ctx.db.registry_index_lookup(self)? { ctx.accessed_keys[RegistryKeyspace::$name].insert(found); } Ok(RegistryKey::ZERO) @@ -159,7 +168,7 @@ macro_rules! tables { if *self == Default::default() { return Ok(RegistryKey::DEFAULT_VALUE); } - if let Some(found) = ctx.db.registry_index_lookup(RegistryKeyspaceValue::$name(self.clone()))? { + if let Some(found) = ctx.db.registry_index_lookup(self)? { return Ok(found); } @@ -178,10 +187,7 @@ macro_rules! tables { if key == RegistryKey::DEFAULT_VALUE { return Ok(<$type>::default()); } - match ctx.db.read_registry(RegistryKeyspace::$name, key)? { - RegistryKeyspaceValue::$name(value) => Ok(value), - _ => panic!("Registry returned incorrectly-typed value") - } + Ok(ctx.db.read_registry(key)?) } } )* diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index 53c6ee59bb6..9f594ffc255 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -24,6 +24,13 @@ use fuel_core_storage::{ }; use fuel_core_types::{ blockchain::block::Block, + fuel_tx::{ + input::PredicateCode, + Address, + AssetId, + ContractId, + ScriptCode, + }, services::executor::Event, }; use futures::FutureExt; @@ -59,56 +66,76 @@ struct CompressTx<'a, Tx> { block_events: &'a [Event], } -impl<'a, Tx> TemporalRegistry for CompressTx<'a, Tx> -where - Tx: OffChainDatabaseTransaction, -{ - fn read_registry( - &self, - keyspace: RegistryKeyspace, - key: fuel_core_types::fuel_compression::RegistryKey, - ) -> anyhow::Result { - Ok(self - .db_tx - .storage_as_ref::() - .get(&(keyspace, key))? - .ok_or(not_found!(DaCompressionTemporalRegistry))? - .into_owned()) - } +macro_rules! impl_temporal_registry { + ($($name:ident: $type:ty),*$(,)?) => { + $( + impl<'a, Tx> TemporalRegistry<$type> for CompressTx<'a, Tx> + where + Tx: OffChainDatabaseTransaction, + { + fn read_registry( + &self, + key: fuel_core_types::fuel_compression::RegistryKey, + ) -> anyhow::Result<$type> { + let v = self + .db_tx + .storage_as_ref::() + .get(&(RegistryKeyspace::$name, key))? + .ok_or(not_found!(DaCompressionTemporalRegistry))? + .into_owned(); + match v { + RegistryKeyspaceValue::$name(v) => Ok(v), + _ => anyhow::bail!("Unexpected value in the registry"), + } + } - fn write_registry( - &mut self, - key: fuel_core_types::fuel_compression::RegistryKey, - value: RegistryKeyspaceValue, - ) -> anyhow::Result<()> { - // Write the actual value - self.db_tx - .storage_as_mut::() - .insert(&(value.keyspace(), key), &value)?; + fn write_registry( + &mut self, + key: fuel_core_types::fuel_compression::RegistryKey, + value: $type, + ) -> anyhow::Result<()> { + // Write the actual value + self.db_tx + .storage_as_mut::() + .insert(&(RegistryKeyspace::$name, key), &RegistryKeyspaceValue::$name(value.clone()))?; - // Remove the overwritten value from index, if any - self.db_tx - .storage_as_mut::() - .remove(&value)?; + // Remove the overwritten value from index, if any + self.db_tx + .storage_as_mut::() + .remove(&RegistryKeyspaceValue::$name(value.clone()))?; - // Add the new value to the index - self.db_tx - .storage_as_mut::() - .insert(&value, &key)?; + // Add the new value to the index + self.db_tx + .storage_as_mut::() + .insert(&RegistryKeyspaceValue::$name(value), &key)?; - Ok(()) - } + Ok(()) + } - fn registry_index_lookup( - &self, - value: RegistryKeyspaceValue, - ) -> anyhow::Result> { - Ok(self - .db_tx - .storage_as_ref::() - .get(&value)? - .map(|v| v.into_owned())) - } + fn registry_index_lookup( + &self, + value: &$type, + ) -> anyhow::Result> + { + Ok(self + .db_tx + .storage_as_ref::() + .get(&RegistryKeyspaceValue::$name(value.clone()))? + .map(|v| v.into_owned())) + } + } + + )* + }; +} + +// Arguments here should match the tables! macro from crates/compression/src/tables.rs +impl_temporal_registry! { + address: Address, + asset_id: AssetId, + contract_id: ContractId, + script_code: ScriptCode, + predicate_code: PredicateCode, } impl<'a, Tx> UtxoIdToPointer for CompressTx<'a, Tx> From fdb8337703b663efdae93d8e2288da6b36f5578d Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 25 Sep 2024 15:04:39 +0300 Subject: [PATCH 070/112] Simplify directory structure --- Cargo.lock | 1 - crates/compression/Cargo.toml | 1 - crates/compression/src/compress.rs | 74 ++++++++-- crates/compression/src/context/compress.rs | 36 ----- crates/compression/src/context/decompress.rs | 135 ------------------ crates/compression/src/context/prepare.rs | 40 ------ crates/compression/src/decompress.rs | 142 +++++++++++++++++-- crates/compression/src/lib.rs | 5 - crates/compression/src/tables.rs | 35 ++--- 9 files changed, 209 insertions(+), 260 deletions(-) delete mode 100644 crates/compression/src/context/compress.rs delete mode 100644 crates/compression/src/context/decompress.rs delete mode 100644 crates/compression/src/context/prepare.rs diff --git a/Cargo.lock b/Cargo.lock index 070bc2db0fa..71c2d7a930b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3365,7 +3365,6 @@ dependencies = [ "proptest", "rand", "serde", - "strum 0.26.3", "thiserror", ] diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index c2a759d1223..ea8763654c4 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -26,7 +26,6 @@ fuel-core-types = { workspace = true, features = [ postcard = { version = "1.0", features = ["use-std"] } rand = { workspace = true, optional = true } serde = { version = "1.0", features = ["derive"] } -strum = { version = "0.26", features = ["derive"] } thiserror = { workspace = true } [dev-dependencies] diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index fb8e0e42fb9..0990045c194 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -1,17 +1,6 @@ -use fuel_core_types::{ - blockchain::block::Block, - fuel_compression::CompressibleBy, - fuel_tx::{ - Bytes32, - Transaction, - }, -}; +use std::collections::HashSet; use crate::{ - context::{ - compress::CompressCtx, - prepare::PrepareCtx, - }, eviction_policy::CacheEvictor, ports::{ EvictorDb, @@ -19,12 +8,28 @@ use crate::{ }, tables::{ PerRegistryKeyspace, + PerRegistryKeyspaceMap, RegistrationsPerTable, TemporalRegistryAll, }, CompressedBlockPayload, Header, }; +use fuel_core_types::{ + blockchain::block::Block, + fuel_compression::{ + CompressibleBy, + ContextError, + RegistryKey, + }, + fuel_tx::{ + Bytes32, + CompressedUtxoId, + Transaction, + TxPointer, + UtxoId, + }, +}; #[derive(Debug, thiserror::Error)] pub enum Error { @@ -93,3 +98,48 @@ pub async fn compress( Ok(compressed) } + +/// Preparation pass through the block to collect all keys accessed during compression. +/// Returns dummy values. The resulting "compressed block" should be discarded. +pub struct PrepareCtx { + /// Database handle + pub db: D, + /// Keys accessed during compression. Will not be overwritten. + pub accessed_keys: PerRegistryKeyspace>, +} + +impl ContextError for PrepareCtx { + type Error = anyhow::Error; +} + +impl CompressibleBy> for UtxoId { + async fn compress_with( + &self, + _ctx: &mut PrepareCtx, + ) -> anyhow::Result { + Ok(CompressedUtxoId { + tx_pointer: TxPointer::default(), + output_index: 0, + }) + } +} + +pub struct CompressCtx { + pub db: D, + pub cache_evictor: CacheEvictor, + /// Changes to the temporary registry, to be included in the compressed block header + pub changes: PerRegistryKeyspaceMap, +} + +impl ContextError for CompressCtx { + type Error = anyhow::Error; +} + +impl CompressibleBy> for UtxoId { + async fn compress_with( + &self, + ctx: &mut CompressCtx, + ) -> anyhow::Result { + ctx.db.lookup(*self) + } +} diff --git a/crates/compression/src/context/compress.rs b/crates/compression/src/context/compress.rs deleted file mode 100644 index e76557ededa..00000000000 --- a/crates/compression/src/context/compress.rs +++ /dev/null @@ -1,36 +0,0 @@ -use fuel_core_types::{ - fuel_compression::{ - CompressibleBy, - ContextError, - }, - fuel_tx::{ - CompressedUtxoId, - UtxoId, - }, -}; - -use crate::{ - compress::CompressDb, - eviction_policy::CacheEvictor, - tables::PerRegistryKeyspaceMap, -}; - -pub struct CompressCtx { - pub db: D, - pub cache_evictor: CacheEvictor, - /// Changes to the temporary registry, to be included in the compressed block header - pub changes: PerRegistryKeyspaceMap, -} - -impl ContextError for CompressCtx { - type Error = anyhow::Error; -} - -impl CompressibleBy> for UtxoId { - async fn compress_with( - &self, - ctx: &mut CompressCtx, - ) -> anyhow::Result { - ctx.db.lookup(*self) - } -} diff --git a/crates/compression/src/context/decompress.rs b/crates/compression/src/context/decompress.rs deleted file mode 100644 index 2482f5e84cb..00000000000 --- a/crates/compression/src/context/decompress.rs +++ /dev/null @@ -1,135 +0,0 @@ -use fuel_core_types::{ - fuel_compression::{ - Compressible, - ContextError, - Decompress, - DecompressibleBy, - }, - fuel_tx::{ - input::{ - coin::{ - Coin, - CoinSpecification, - }, - message::{ - Message, - MessageSpecification, - }, - AsField, - }, - CompressedUtxoId, - Mint, - Transaction, - UtxoId, - }, -}; - -use crate::decompress::{ - DecompressDb, - DecompressError, -}; - -pub struct DecompressCtx { - pub db: D, -} - -impl ContextError for DecompressCtx { - type Error = DecompressError; -} - -impl DecompressibleBy> for UtxoId { - async fn decompress_with( - c: CompressedUtxoId, - ctx: &DecompressCtx, - ) -> Result { - Ok(ctx.db.utxo_id(c)?) - } -} - -impl DecompressibleBy> for Coin -where - D: DecompressDb, - Specification: CoinSpecification, - Specification::Predicate: DecompressibleBy>, - Specification::PredicateData: DecompressibleBy>, - Specification::PredicateGasUsed: DecompressibleBy>, - Specification::Witness: DecompressibleBy>, -{ - async fn decompress_with( - c: as Compressible>::Compressed, - ctx: &DecompressCtx, - ) -> Result, DecompressError> { - let utxo_id = UtxoId::decompress_with(c.utxo_id, ctx).await?; - let coin_info = ctx.db.coin(utxo_id)?; - let witness_index = c.witness_index.decompress(ctx).await?; - let predicate_gas_used = c.predicate_gas_used.decompress(ctx).await?; - let predicate = c.predicate.decompress(ctx).await?; - let predicate_data = c.predicate_data.decompress(ctx).await?; - Ok(Self { - utxo_id, - owner: coin_info.owner, - amount: coin_info.amount, - asset_id: coin_info.asset_id, - tx_pointer: Default::default(), - witness_index, - predicate_gas_used, - predicate, - predicate_data, - }) - } -} - -impl DecompressibleBy> for Message -where - D: DecompressDb, - Specification: MessageSpecification, - Specification::Data: DecompressibleBy> + Default, - Specification::Predicate: DecompressibleBy>, - Specification::PredicateData: DecompressibleBy>, - Specification::PredicateGasUsed: DecompressibleBy>, - Specification::Witness: DecompressibleBy>, -{ - async fn decompress_with( - c: as Compressible>::Compressed, - ctx: &DecompressCtx, - ) -> Result, DecompressError> { - let msg = ctx.db.message(c.nonce)?; - let witness_index = c.witness_index.decompress(ctx).await?; - let predicate_gas_used = c.predicate_gas_used.decompress(ctx).await?; - let predicate = c.predicate.decompress(ctx).await?; - let predicate_data = c.predicate_data.decompress(ctx).await?; - let mut message: Message = Message { - sender: msg.sender, - recipient: msg.recipient, - amount: msg.amount, - nonce: c.nonce, - witness_index, - predicate_gas_used, - data: Default::default(), - predicate, - predicate_data, - }; - - if let Some(data) = message.data.as_mut_field() { - data.clone_from(&msg.data) - } - - Ok(message) - } -} - -impl DecompressibleBy> for Mint { - async fn decompress_with( - c: Self::Compressed, - ctx: &DecompressCtx, - ) -> Result { - Ok(Transaction::mint( - Default::default(), // TODO: what should this we do with this? - c.input_contract.decompress(ctx).await?, - c.output_contract.decompress(ctx).await?, - c.mint_amount.decompress(ctx).await?, - c.mint_asset_id.decompress(ctx).await?, - c.gas_price.decompress(ctx).await?, - )) - } -} diff --git a/crates/compression/src/context/prepare.rs b/crates/compression/src/context/prepare.rs deleted file mode 100644 index 368aedcd58f..00000000000 --- a/crates/compression/src/context/prepare.rs +++ /dev/null @@ -1,40 +0,0 @@ -use std::collections::HashSet; - -use fuel_core_types::{ - fuel_compression::{ - CompressibleBy, - ContextError, - RegistryKey, - }, - fuel_tx::*, -}; - -use crate::{ - compress::CompressDb, - tables::PerRegistryKeyspace, -}; - -/// Preparation pass through the block to collect all keys accessed during compression. -/// Returns dummy values. The resulting "compressed block" should be discarded. -pub struct PrepareCtx { - /// Database handle - pub db: D, - /// Keys accessed during compression. Will not be overwritten. - pub accessed_keys: PerRegistryKeyspace>, -} - -impl ContextError for PrepareCtx { - type Error = anyhow::Error; -} - -impl CompressibleBy> for UtxoId { - async fn compress_with( - &self, - _ctx: &mut PrepareCtx, - ) -> anyhow::Result { - Ok(CompressedUtxoId { - tx_pointer: TxPointer::default(), - output_index: 0, - }) - } -} diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index e6db1f5dae1..a980be0a4e8 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -1,3 +1,8 @@ +use crate::{ + ports::HistoryLookup, + tables::TemporalRegistryAll, + CompressedBlockPayload, +}; use fuel_core_types::{ blockchain::{ block::PartialFuelBlock, @@ -8,15 +13,29 @@ use fuel_core_types::{ }, primitives::Empty, }, - fuel_compression::DecompressibleBy, - fuel_tx::Transaction, -}; - -use crate::{ - context::decompress::DecompressCtx, - ports::HistoryLookup, - tables::TemporalRegistryAll, - CompressedBlockPayload, + fuel_compression::{ + Compressible, + ContextError, + Decompress, + DecompressibleBy, + }, + fuel_tx::{ + input::{ + coin::{ + Coin, + CoinSpecification, + }, + message::{ + Message, + MessageSpecification, + }, + AsField, + }, + CompressedUtxoId, + Mint, + Transaction, + UtxoId, + }, }; #[derive(Debug, thiserror::Error)] @@ -84,3 +103,108 @@ pub async fn decompress( transactions, }) } + +pub struct DecompressCtx { + pub db: D, +} + +impl ContextError for DecompressCtx { + type Error = DecompressError; +} + +impl DecompressibleBy> for UtxoId { + async fn decompress_with( + c: CompressedUtxoId, + ctx: &DecompressCtx, + ) -> Result { + Ok(ctx.db.utxo_id(c)?) + } +} + +impl DecompressibleBy> for Coin +where + D: DecompressDb, + Specification: CoinSpecification, + Specification::Predicate: DecompressibleBy>, + Specification::PredicateData: DecompressibleBy>, + Specification::PredicateGasUsed: DecompressibleBy>, + Specification::Witness: DecompressibleBy>, +{ + async fn decompress_with( + c: as Compressible>::Compressed, + ctx: &DecompressCtx, + ) -> Result, DecompressError> { + let utxo_id = UtxoId::decompress_with(c.utxo_id, ctx).await?; + let coin_info = ctx.db.coin(utxo_id)?; + let witness_index = c.witness_index.decompress(ctx).await?; + let predicate_gas_used = c.predicate_gas_used.decompress(ctx).await?; + let predicate = c.predicate.decompress(ctx).await?; + let predicate_data = c.predicate_data.decompress(ctx).await?; + Ok(Self { + utxo_id, + owner: coin_info.owner, + amount: coin_info.amount, + asset_id: coin_info.asset_id, + tx_pointer: Default::default(), + witness_index, + predicate_gas_used, + predicate, + predicate_data, + }) + } +} + +impl DecompressibleBy> for Message +where + D: DecompressDb, + Specification: MessageSpecification, + Specification::Data: DecompressibleBy> + Default, + Specification::Predicate: DecompressibleBy>, + Specification::PredicateData: DecompressibleBy>, + Specification::PredicateGasUsed: DecompressibleBy>, + Specification::Witness: DecompressibleBy>, +{ + async fn decompress_with( + c: as Compressible>::Compressed, + ctx: &DecompressCtx, + ) -> Result, DecompressError> { + let msg = ctx.db.message(c.nonce)?; + let witness_index = c.witness_index.decompress(ctx).await?; + let predicate_gas_used = c.predicate_gas_used.decompress(ctx).await?; + let predicate = c.predicate.decompress(ctx).await?; + let predicate_data = c.predicate_data.decompress(ctx).await?; + let mut message: Message = Message { + sender: msg.sender, + recipient: msg.recipient, + amount: msg.amount, + nonce: c.nonce, + witness_index, + predicate_gas_used, + data: Default::default(), + predicate, + predicate_data, + }; + + if let Some(data) = message.data.as_mut_field() { + data.clone_from(&msg.data) + } + + Ok(message) + } +} + +impl DecompressibleBy> for Mint { + async fn decompress_with( + c: Self::Compressed, + ctx: &DecompressCtx, + ) -> Result { + Ok(Transaction::mint( + Default::default(), // TODO: what should this we do with this? + c.input_contract.decompress(ctx).await?, + c.output_contract.decompress(ctx).await?, + c.mint_amount.decompress(ctx).await?, + c.mint_asset_id.decompress(ctx).await?, + c.gas_price.decompress(ctx).await?, + )) + } +} diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index a386540da80..4d74d759709 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -8,11 +8,6 @@ pub mod decompress; mod eviction_policy; pub mod ports; mod tables; -mod context { - pub mod compress; - pub mod decompress; - pub mod prepare; -} pub use tables::{ RegistryKeyspace, diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index f861ed55916..b5d8d92d59c 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -15,13 +15,13 @@ use fuel_core_types::{ use std::collections::HashMap; use crate::{ - compress::CompressDb, - context::{ - compress::CompressCtx, - decompress::DecompressCtx, - prepare::PrepareCtx, + compress::{ + CompressCtx, + CompressDb, + PrepareCtx, }, decompress::{ + DecompressCtx, DecompressDb, DecompressError, }, @@ -34,30 +34,23 @@ use crate::{ macro_rules! tables { ($($name:ident: $type:ty),*$(,)?) => { #[doc = "RegistryKey namespaces"] + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] + #[allow(non_camel_case_types)] // Match names in structs exactly + pub enum RegistryKeyspace { + $( + $name, + )* + } + + #[doc = "RegistryKey namespace with an associated typed value"] #[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] #[allow(non_camel_case_types)] // Match names in structs exactly - #[derive(strum::EnumDiscriminants)] - #[strum_discriminants(name(RegistryKeyspace))] - #[strum_discriminants(derive(Hash, serde::Serialize, serde::Deserialize))] - #[strum_discriminants(allow(non_camel_case_types))] pub enum RegistryKeyspaceValue { $( $name($type), )* } - - impl RegistryKeyspaceValue { - pub fn keyspace(&self) -> RegistryKeyspace { - match self { - $( - RegistryKeyspaceValue::$name(_) => RegistryKeyspace::$name, - )* - } - } - } - - #[doc = "A value for each keyspace"] #[derive(Debug, Clone, Default)] pub struct PerRegistryKeyspace { From e165e9e533727cd74347fedb0a3c1ba29cee7f08 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 25 Sep 2024 15:08:35 +0300 Subject: [PATCH 071/112] Use a separate GraphQL complexity cost for da compressed blocks --- crates/fuel-core/src/graphql_api.rs | 2 ++ crates/fuel-core/src/schema/da_compressed.rs | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/fuel-core/src/graphql_api.rs b/crates/fuel-core/src/graphql_api.rs index 04dc2a75b47..8df7d16b3ac 100644 --- a/crates/fuel-core/src/graphql_api.rs +++ b/crates/fuel-core/src/graphql_api.rs @@ -41,6 +41,7 @@ pub struct Costs { pub storage_read: usize, pub storage_iterator: usize, pub bytecode_read: usize, + pub da_compressed_block_read: usize, } pub const QUERY_COSTS: Costs = Costs { @@ -60,6 +61,7 @@ pub const QUERY_COSTS: Costs = Costs { storage_read: 10, storage_iterator: 100, bytecode_read: 2000, + da_compressed_block_read: 1000, }; #[derive(Clone, Debug)] diff --git a/crates/fuel-core/src/schema/da_compressed.rs b/crates/fuel-core/src/schema/da_compressed.rs index 09e8d2b1c88..3af336f8ba9 100644 --- a/crates/fuel-core/src/schema/da_compressed.rs +++ b/crates/fuel-core/src/schema/da_compressed.rs @@ -37,7 +37,7 @@ pub struct DaCompressedBlockQuery; #[Object] impl DaCompressedBlockQuery { - #[graphql(complexity = "2 * QUERY_COSTS.storage_read + child_complexity")] + #[graphql(complexity = "QUERY_COSTS.da_compressed_block_read")] async fn da_compressed_block( &self, ctx: &Context<'_>, From e036b824b9c99865db4ddfb10cee5fcc5568b6cc Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 25 Sep 2024 15:27:36 +0300 Subject: [PATCH 072/112] Use PartialBlockHeader in compressed block header (instead of custom type) --- crates/compression/src/compress.rs | 12 +----- crates/compression/src/decompress.rs | 6 +-- crates/compression/src/lib.rs | 60 +++++++++++++--------------- 3 files changed, 31 insertions(+), 47 deletions(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 0990045c194..b354f58ee34 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -13,7 +13,6 @@ use crate::{ TemporalRegistryAll, }, CompressedBlockPayload, - Header, }; use fuel_core_types::{ blockchain::block::Block, @@ -78,16 +77,7 @@ pub async fn compress( let compact = CompressedBlockPayload { registrations, registrations_root: Bytes32::default(), /* TODO: https://github.com/FuelLabs/fuel-core/issues/2232 */ - header: Header { - da_height: block.header().da_height, - prev_root: *block.header().prev_root(), - consensus_parameters_version: block.header().consensus_parameters_version, - state_transition_bytecode_version: block - .header() - .state_transition_bytecode_version, - height: *block.header().height(), - time: block.header().time(), - }, + header: block.header().into(), transactions, }; diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index a980be0a4e8..76778541420 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -94,9 +94,9 @@ pub async fn decompress( generated: Empty, }, consensus: ConsensusHeader { - prev_root: compressed.header.prev_root, - height: compressed.header.height, - time: compressed.header.time, + prev_root: *compressed.header.prev_root(), + height: *compressed.header.height(), + time: *compressed.header.time(), generated: Empty, }, }, diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 4d74d759709..37aa6f6f163 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -20,19 +20,9 @@ use serde::{ }; use fuel_core_types::{ - blockchain::{ - header::{ - ConsensusParametersVersion, - StateTransitionBytecodeVersion, - }, - primitives::DaBlockHeight, - }, + blockchain::header::PartialBlockHeader, fuel_tx::CompressedTransaction, - fuel_types::{ - BlockHeight, - Bytes32, - }, - tai64::Tai64, + fuel_types::Bytes32, }; use tables::RegistrationsPerTable; @@ -44,25 +34,21 @@ struct CompressedBlockPayload { /// Merkle root of the temporal registry state registrations_root: Bytes32, /// Compressed block header - header: Header, + header: PartialBlockHeader, /// Compressed transactions transactions: Vec, } -/// Fuel block header with only the fields required to reconstruct it. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -struct Header { - da_height: DaBlockHeight, - prev_root: Bytes32, - height: BlockHeight, - time: Tai64, - consensus_parameters_version: ConsensusParametersVersion, - state_transition_bytecode_version: StateTransitionBytecodeVersion, -} - #[cfg(test)] mod tests { use fuel_core_types::{ + blockchain::{ + header::{ + ApplicationHeader, + ConsensusHeader, + }, + primitives::Empty, + }, fuel_compression::RegistryKey, fuel_tx::{ input::PredicateCode, @@ -71,6 +57,7 @@ mod tests { ContractId, ScriptCode, }, + tai64::Tai64, }; use proptest::prelude::*; use tables::PerRegistryKeyspaceMap; @@ -140,17 +127,24 @@ mod tests { registrations.insert(key, ksv); } - let original = CompressedBlockPayload { - registrations: RegistrationsPerTable::try_from(registrations).unwrap(), - registrations_root: registrations_root.into(), - header: Header { + let header = PartialBlockHeader { + application: ApplicationHeader { da_height: da_height.into(), - prev_root: prev_root.into(), - height: height.into(), consensus_parameters_version, state_transition_bytecode_version, - time: Tai64::UNIX_EPOCH, + generated: Empty, }, + consensus: ConsensusHeader { + prev_root: prev_root.into(), + height: height.into(), + time: Tai64::UNIX_EPOCH, + generated: Empty + } + }; + let original = CompressedBlockPayload { + registrations: RegistrationsPerTable::try_from(registrations).unwrap(), + registrations_root: registrations_root.into(), + header, transactions: vec![], }; @@ -169,8 +163,8 @@ mod tests { assert_eq!(registrations_root, original.registrations_root); assert_eq!(header.da_height, da_height.into()); - assert_eq!(header.prev_root, prev_root.into()); - assert_eq!(header.height, height.into()); + assert_eq!(*header.prev_root(), prev_root.into()); + assert_eq!(*header.height(), height.into()); assert_eq!(header.consensus_parameters_version, consensus_parameters_version); assert_eq!(header.state_transition_bytecode_version, state_transition_bytecode_version); From 9a75b4b58d00abf5ae4508feeee8ad2baef68349 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 25 Sep 2024 15:59:01 +0300 Subject: [PATCH 073/112] Use enum for versioning --- Cargo.lock | 1 + crates/compression/Cargo.toml | 1 + crates/compression/README.md | 2 +- crates/compression/src/compress.rs | 11 ++-- crates/compression/src/decompress.rs | 99 +++++++++++++++++++++++++--- crates/compression/src/lib.rs | 14 ++-- 6 files changed, 108 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 71c2d7a930b..40736b678a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3366,6 +3366,7 @@ dependencies = [ "rand", "serde", "thiserror", + "tokio", ] [[package]] diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index ea8763654c4..49f95d0dfe3 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -30,6 +30,7 @@ thiserror = { workspace = true } [dev-dependencies] proptest = { workspace = true } +tokio = { workspace = true} [features] test-helpers = [ diff --git a/crates/compression/README.md b/crates/compression/README.md index ae3005b09ad..685a5d2a3ae 100644 --- a/crates/compression/README.md +++ b/crates/compression/README.md @@ -2,7 +2,7 @@ ## Compressed block header -Each compressed block begins with a single-byte version field, so that it's possible to change the format later. +Each compressed block begins with a version field, so that it's possible to change the format later. ## Temporal registry diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index b354f58ee34..e38ed18ecbd 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -12,7 +12,8 @@ use crate::{ RegistrationsPerTable, TemporalRegistryAll, }, - CompressedBlockPayload, + CompressedBlock, + CompressedBlockPayloadV0, }; use fuel_core_types::{ blockchain::block::Block, @@ -74,17 +75,15 @@ pub async fn compress( registrations.write_to_registry(&mut ctx.db)?; // Construct the actual compacted block - let compact = CompressedBlockPayload { + let compact = CompressedBlockPayloadV0 { registrations, registrations_root: Bytes32::default(), /* TODO: https://github.com/FuelLabs/fuel-core/issues/2232 */ header: block.header().into(), transactions, }; - let version = 0u8; - - let compressed = - postcard::to_allocvec(&(version, compact)).expect("Serialization cannot fail"); + let compressed = postcard::to_allocvec(&CompressedBlock::V0(compact)) + .expect("Serialization cannot fail"); Ok(compressed) } diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index 76778541420..49b9d3cc67b 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -1,7 +1,7 @@ use crate::{ ports::HistoryLookup, tables::TemporalRegistryAll, - CompressedBlockPayload, + CompressedBlock, }; use fuel_core_types::{ blockchain::{ @@ -42,9 +42,7 @@ use fuel_core_types::{ pub enum DecompressError { #[error("Only the next sequential block can be decompressed")] NotLatest, - #[error("Unknown compression version")] - UnknownVersion, - #[error("Deserialization error: {0}")] + #[error("Deserialization error: {0} (possibly unknown version)")] Postcard(#[from] postcard::Error), /// Other errors #[error("Unknown error: {0}")] @@ -58,11 +56,8 @@ pub async fn decompress( mut db: D, block: Vec, ) -> Result { - if block.is_empty() || block[0] != 0 { - return Err(DecompressError::UnknownVersion); - } - - let compressed: CompressedBlockPayload = postcard::from_bytes(&block[1..])?; + let compressed: CompressedBlock = postcard::from_bytes(&block)?; + let CompressedBlock::V0(compressed) = compressed; // TODO: should be store height on da just to have this check? // if *block.header.height != db.next_block_height()? { @@ -208,3 +203,89 @@ impl DecompressibleBy> for Mint { )) } } + +#[cfg(test)] +mod tests { + use crate::ports::TemporalRegistry; + + use super::*; + use fuel_core_types::{ + fuel_compression::RegistryKey, + fuel_tx::{ + input::PredicateCode, + Address, + AssetId, + ContractId, + ScriptCode, + }, + }; + use serde::{ + Deserialize, + Serialize, + }; + + pub struct MockDb; + impl HistoryLookup for MockDb { + fn utxo_id(&self, _: CompressedUtxoId) -> anyhow::Result { + unimplemented!() + } + + fn coin(&self, _: UtxoId) -> anyhow::Result { + unimplemented!() + } + + fn message( + &self, + _: fuel_core_types::fuel_types::Nonce, + ) -> anyhow::Result { + unimplemented!() + } + } + macro_rules! mock_temporal { + ($type:ty) => { + impl TemporalRegistry<$type> for MockDb { + fn read_registry(&self, _key: RegistryKey) -> anyhow::Result<$type> { + todo!() + } + + fn write_registry( + &mut self, + _key: RegistryKey, + _value: $type, + ) -> anyhow::Result<()> { + todo!() + } + + fn registry_index_lookup( + &self, + _value: &$type, + ) -> anyhow::Result> { + todo!() + } + } + }; + } + mock_temporal!(Address); + mock_temporal!(AssetId); + mock_temporal!(ContractId); + mock_temporal!(ScriptCode); + mock_temporal!(PredicateCode); + + #[tokio::test] + async fn decompress_block_with_unknown_version() { + #[derive(Clone, Serialize, Deserialize)] + enum CompressedBlockWithNewVersions { + V0(crate::CompressedBlockPayloadV0), + NewVersion(u32), + #[serde(untagged)] + Unknown, + } + + let block = + postcard::to_stdvec(&CompressedBlockWithNewVersions::NewVersion(1234)) + .unwrap(); + + let result = decompress(MockDb, block).await; + assert!(matches!(result, Err(DecompressError::Postcard(_)))); + } +} diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 37aa6f6f163..595bb75ed86 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -28,7 +28,7 @@ use tables::RegistrationsPerTable; /// Compressed block, without the preceding version byte. #[derive(Clone, Serialize, Deserialize)] -struct CompressedBlockPayload { +struct CompressedBlockPayloadV0 { /// Temporal registry insertions registrations: RegistrationsPerTable, /// Merkle root of the temporal registry state @@ -39,6 +39,12 @@ struct CompressedBlockPayload { transactions: Vec, } +/// Versioned compressed block. +#[derive(Clone, Serialize, Deserialize)] +enum CompressedBlock { + V0(CompressedBlockPayloadV0), +} + #[cfg(test)] mod tests { use fuel_core_types::{ @@ -141,7 +147,7 @@ mod tests { generated: Empty } }; - let original = CompressedBlockPayload { + let original = CompressedBlockPayloadV0 { registrations: RegistrationsPerTable::try_from(registrations).unwrap(), registrations_root: registrations_root.into(), header, @@ -149,10 +155,10 @@ mod tests { }; let compressed = postcard::to_allocvec(&original).unwrap(); - let decompressed: CompressedBlockPayload = + let decompressed: CompressedBlockPayloadV0 = postcard::from_bytes(&compressed).unwrap(); - let CompressedBlockPayload { + let CompressedBlockPayloadV0 { registrations, registrations_root, header, From dcc32baa625522f6f7f1d66a9ddac33775b43afb Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 25 Sep 2024 16:19:10 +0300 Subject: [PATCH 074/112] Move commented-out height check code to a note in the doc comment --- crates/compression/src/compress.rs | 5 +---- crates/compression/src/decompress.rs | 6 +----- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index e38ed18ecbd..993acb395d7 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -42,14 +42,11 @@ pub enum Error { pub trait CompressDb: TemporalRegistryAll + UtxoIdToPointer {} impl CompressDb for T where T: TemporalRegistryAll + UtxoIdToPointer {} +/// This must be called for all new blocks in sequence, otherwise the result will be garbage. pub async fn compress( db: D, block: &Block, ) -> Result, Error> { - // if *block.header().height() != db.next_block_height()? { - // return Err(Error::NotLatest); - // } - let target = block.transactions().to_vec(); let mut prepare_ctx = PrepareCtx { diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index 49b9d3cc67b..c0157a6028d 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -52,6 +52,7 @@ pub enum DecompressError { pub trait DecompressDb: TemporalRegistryAll + HistoryLookup {} impl DecompressDb for T where T: TemporalRegistryAll + HistoryLookup {} +/// This must be called for all decompressed blocks in sequence, otherwise the result will be garbage. pub async fn decompress( mut db: D, block: Vec, @@ -59,11 +60,6 @@ pub async fn decompress( let compressed: CompressedBlock = postcard::from_bytes(&block)?; let CompressedBlock::V0(compressed) = compressed; - // TODO: should be store height on da just to have this check? - // if *block.header.height != db.next_block_height()? { - // return Err(DecompressError::NotLatest); - // } - // TODO: merkle root verification: https://github.com/FuelLabs/fuel-core/issues/2232 compressed.registrations.write_to_registry(&mut db)?; From 946a2e8f23fef2109fb9ba286ebd2d451c2af7cb Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 26 Sep 2024 05:39:40 +0300 Subject: [PATCH 075/112] Use separate db table for each temporal registry keyspace --- Cargo.lock | 3 + crates/compression/Cargo.toml | 1 + crates/compression/src/compress.rs | 10 +- crates/compression/src/decompress.rs | 17 +- crates/compression/src/eviction_policy.rs | 69 +++--- crates/compression/src/lib.rs | 30 +-- crates/compression/src/ports.rs | 19 +- crates/compression/src/tables.rs | 63 +++--- crates/fuel-core/Cargo.toml | 2 + .../src/graphql_api/da_compression.rs | 195 +++++++++-------- crates/fuel-core/src/graphql_api/ports.rs | 37 +++- crates/fuel-core/src/graphql_api/storage.rs | 39 +++- .../src/graphql_api/storage/da_compression.rs | 204 ++++++++---------- 13 files changed, 362 insertions(+), 327 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a7ba57dc95c..3f203e34fdf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3199,12 +3199,14 @@ dependencies = [ "itertools 0.12.1", "mockall", "num_cpus", + "paste", "postcard", "proptest", "rand", "rocksdb", "serde", "serde_json", + "sha2 0.10.8", "strum 0.25.0", "strum_macros 0.25.3", "tempfile", @@ -3361,6 +3363,7 @@ version = "0.36.0" dependencies = [ "anyhow", "fuel-core-types 0.36.0", + "paste", "postcard", "proptest", "rand", diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 49f95d0dfe3..a81079862fa 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -23,6 +23,7 @@ fuel-core-types = { workspace = true, features = [ "serde", "da-compression", ] } +paste = "1" postcard = { version = "1.0", features = ["use-std"] } rand = { workspace = true, optional = true } serde = { version = "1.0", features = ["derive"] } diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 993acb395d7..27d641c506e 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -2,10 +2,7 @@ use std::collections::HashSet; use crate::{ eviction_policy::CacheEvictor, - ports::{ - EvictorDb, - UtxoIdToPointer, - }, + ports::UtxoIdToPointer, tables::{ PerRegistryKeyspace, PerRegistryKeyspaceMap, @@ -43,10 +40,7 @@ pub trait CompressDb: TemporalRegistryAll + UtxoIdToPointer {} impl CompressDb for T where T: TemporalRegistryAll + UtxoIdToPointer {} /// This must be called for all new blocks in sequence, otherwise the result will be garbage. -pub async fn compress( - db: D, - block: &Block, -) -> Result, Error> { +pub async fn compress(db: D, block: &Block) -> Result, Error> { let target = block.transactions().to_vec(); let mut prepare_ctx = PrepareCtx { diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index c0157a6028d..a4b78f762a2 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -202,7 +202,10 @@ impl DecompressibleBy> for Mint { #[cfg(test)] mod tests { - use crate::ports::TemporalRegistry; + use crate::ports::{ + EvictorDb, + TemporalRegistry, + }; use super::*; use fuel_core_types::{ @@ -247,7 +250,7 @@ mod tests { fn write_registry( &mut self, _key: RegistryKey, - _value: $type, + _value: &$type, ) -> anyhow::Result<()> { todo!() } @@ -259,6 +262,16 @@ mod tests { todo!() } } + + impl EvictorDb<$type> for MockDb { + fn write_latest(&mut self, _key: RegistryKey) -> anyhow::Result<()> { + todo!() + } + + fn read_latest(&mut self) -> anyhow::Result { + todo!() + } + } }; } mock_temporal!(Address); diff --git a/crates/compression/src/eviction_policy.rs b/crates/compression/src/eviction_policy.rs index d57fef0ba3a..b85f5f2eb85 100644 --- a/crates/compression/src/eviction_policy.rs +++ b/crates/compression/src/eviction_policy.rs @@ -1,6 +1,15 @@ use std::collections::HashSet; -use fuel_core_types::fuel_compression::RegistryKey; +use fuel_core_types::{ + fuel_compression::RegistryKey, + fuel_tx::{ + input::PredicateCode, + Address, + AssetId, + ContractId, + ScriptCode, + }, +}; use crate::{ ports::EvictorDb, @@ -15,29 +24,39 @@ pub struct CacheEvictor { pub keep_keys: PerRegistryKeyspace>, } -impl CacheEvictor { - /// Get a key, evicting an old value if necessary - pub fn next_key( - &mut self, - db: &mut D, - keyspace: RegistryKeyspace, - ) -> anyhow::Result - where - D: EvictorDb, - { - // Pick first key not in the set - // TODO: use a proper algo, maybe LRU? - let mut key = db.read_latest(keyspace)?; - - debug_assert!(self.keep_keys[keyspace].len() < 2usize.pow(24).saturating_sub(2)); - - while self.keep_keys[keyspace].contains(&key) { - key = key.next(); +macro_rules! impl_evictor { + ($type:ty) => { paste::paste! { + impl CacheEvictor { + /// Get a key, evicting an old value if necessary + #[allow(non_snake_case)] // Match names of types exactly + pub fn [< next_key_ $type >] ( + &mut self, + db: &mut D, + ) -> anyhow::Result + where + D: EvictorDb<$type>, + { + // Pick first key not in the set + // TODO: use a proper algo, maybe LRU? + let mut key = db.read_latest()?; + + debug_assert!(self.keep_keys[RegistryKeyspace::[<$type>]].len() < 2usize.pow(24).saturating_sub(2)); + + while self.keep_keys[RegistryKeyspace::[<$type>]].contains(&key) { + key = key.next(); + } + + db.write_latest(key)?; + + self.keep_keys[RegistryKeyspace::[<$type>]].insert(key); + Ok(key) + } } - - db.write_latest(keyspace, key)?; - - self.keep_keys[keyspace].insert(key); - Ok(key) - } + }}; } + +impl_evictor!(Address); +impl_evictor!(AssetId); +impl_evictor!(ContractId); +impl_evictor!(ScriptCode); +impl_evictor!(PredicateCode); diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 595bb75ed86..25dda71277e 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -72,35 +72,35 @@ mod tests { fn keyspace() -> impl Strategy { prop_oneof![ - Just(RegistryKeyspace::address), - Just(RegistryKeyspace::asset_id), - Just(RegistryKeyspace::contract_id), - Just(RegistryKeyspace::script_code), - Just(RegistryKeyspace::predicate_code), + Just(RegistryKeyspace::Address), + Just(RegistryKeyspace::AssetId), + Just(RegistryKeyspace::ContractId), + Just(RegistryKeyspace::ScriptCode), + Just(RegistryKeyspace::PredicateCode), ] } fn keyspace_value() -> impl Strategy { (keyspace(), prop::array::uniform32(0..u8::MAX)).prop_map(|(keyspace, value)| { match keyspace { - RegistryKeyspace::address => { - RegistryKeyspaceValue::address(Address::new(value)) + RegistryKeyspace::Address => { + RegistryKeyspaceValue::Address(Address::new(value)) } - RegistryKeyspace::asset_id => { - RegistryKeyspaceValue::asset_id(AssetId::new(value)) + RegistryKeyspace::AssetId => { + RegistryKeyspaceValue::AssetId(AssetId::new(value)) } - RegistryKeyspace::contract_id => { - RegistryKeyspaceValue::contract_id(ContractId::new(value)) + RegistryKeyspace::ContractId => { + RegistryKeyspaceValue::ContractId(ContractId::new(value)) } - RegistryKeyspace::script_code => { + RegistryKeyspace::ScriptCode => { let len = (value[0] % 32) as usize; - RegistryKeyspaceValue::script_code(ScriptCode { + RegistryKeyspaceValue::ScriptCode(ScriptCode { bytes: value[..len].to_vec(), }) } - RegistryKeyspace::predicate_code => { + RegistryKeyspace::PredicateCode => { let len = (value[0] % 32) as usize; - RegistryKeyspaceValue::predicate_code(PredicateCode { + RegistryKeyspaceValue::PredicateCode(PredicateCode { bytes: value[..len].to_vec(), }) } diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index 500b2eb940c..65b06f39a28 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -12,8 +12,6 @@ use fuel_core_types::{ fuel_types::Nonce, }; -use crate::tables::RegistryKeyspace; - /// Rolling cache for compression. /// Holds the latest state which can be event sourced from the compressed blocks. /// The changes done using this trait in a single call to `compress` or `decompress` @@ -23,7 +21,7 @@ pub trait TemporalRegistry { fn read_registry(&self, key: RegistryKey) -> anyhow::Result; /// Reads a value from the registry at its current height. - fn write_registry(&mut self, key: RegistryKey, value: T) -> anyhow::Result<()>; + fn write_registry(&mut self, key: RegistryKey, value: &T) -> anyhow::Result<()>; /// Lookup registry key by the value. fn registry_index_lookup(&self, value: &T) -> anyhow::Result>; @@ -59,14 +57,9 @@ pub struct MessageInfo { } /// Temporal registry evictor state storage, -/// currently backed by a `DaCompressionTemporalRegistryEvictor` -/// column in the offchain database. -pub trait EvictorDb { - fn read_latest(&mut self, keyspace: RegistryKeyspace) -> anyhow::Result; - - fn write_latest( - &mut self, - keyspace: RegistryKeyspace, - key: RegistryKey, - ) -> anyhow::Result<()>; +/// currently backed by a `DaCompressionTemporalRegistryEvictor*` +/// columns in the offchain database. +pub trait EvictorDb { + fn read_latest(&mut self) -> anyhow::Result; + fn write_latest(&mut self, key: RegistryKey) -> anyhow::Result<()>; } diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index b5d8d92d59c..b0994f2d5d1 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -32,29 +32,28 @@ use crate::{ }; macro_rules! tables { - ($($name:ident: $type:ty),*$(,)?) => { + ($($type:ty),*$(,)?) => { paste::paste! { #[doc = "RegistryKey namespaces"] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] - #[allow(non_camel_case_types)] // Match names in structs exactly pub enum RegistryKeyspace { $( - $name, + [<$type>], )* } #[doc = "RegistryKey namespace with an associated typed value"] #[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] - #[allow(non_camel_case_types)] // Match names in structs exactly pub enum RegistryKeyspaceValue { $( - $name($type), + [<$type>]($type), )* } #[doc = "A value for each keyspace"] #[derive(Debug, Clone, Default)] + #[allow(non_snake_case)] // Match type names exactly pub struct PerRegistryKeyspace { - $(pub $name: T,)* + $(pub [<$type>]: T,)* } impl core::ops::Index for PerRegistryKeyspace { type Output = T; @@ -62,7 +61,7 @@ macro_rules! tables { fn index(&self, index: RegistryKeyspace) -> &Self::Output { match index { $( - RegistryKeyspace::$name => &self.$name, + RegistryKeyspace::[<$type>] => &self.[<$type>], )* } } @@ -71,7 +70,7 @@ macro_rules! tables { fn index_mut(&mut self, index: RegistryKeyspace) -> &mut Self::Output { match index { $( - RegistryKeyspace::$name => &mut self.$name, + RegistryKeyspace::[<$type>] => &mut self.[<$type>], )* } } @@ -79,8 +78,9 @@ macro_rules! tables { #[doc = "Key-value mapping for each keyspace"] #[derive(Debug, Clone, Default)] + #[allow(non_snake_case)] // Match type names exactly pub struct PerRegistryKeyspaceMap { - $(pub $name: HashMap,)* + $(pub [<$type>]: HashMap,)* } impl PerRegistryKeyspaceMap { @@ -88,8 +88,8 @@ macro_rules! tables { pub fn insert(&mut self, key: RegistryKey, value: RegistryKeyspaceValue) { match value { $( - RegistryKeyspaceValue::$name(value) => { - self.$name.insert(key, value); + RegistryKeyspaceValue::[<$type>](value) => { + self.[<$type>].insert(key, value); } )* } @@ -98,8 +98,9 @@ macro_rules! tables { #[doc = "The set of registrations for each table, as used in the compressed block header"] #[derive(Debug, Clone, PartialEq, Default, serde::Serialize, serde::Deserialize)] + #[allow(non_snake_case)] // Match type names exactly pub struct RegistrationsPerTable { - $(pub $name: Vec<(RegistryKey, $type)>,)* + $(pub [<$type>]: Vec<(RegistryKey, $type)>,)* } impl TryFrom for RegistrationsPerTable { @@ -108,8 +109,8 @@ macro_rules! tables { fn try_from(value: PerRegistryKeyspaceMap) -> Result { let mut result = Self::default(); $( - for (key, value) in value.$name.into_iter() { - result.$name.push((key, value)); + for (key, value) in value.[<$type>].into_iter() { + result.[<$type>].push((key, value)); } )* Ok(result) @@ -118,18 +119,20 @@ macro_rules! tables { pub trait TemporalRegistryAll: Sized $( + TemporalRegistry<$type> + + EvictorDb<$type> )* {} impl TemporalRegistryAll for T where T: Sized $( + TemporalRegistry<$type> + + EvictorDb<$type> )* {} impl RegistrationsPerTable { pub(crate) fn write_to_registry(&self, registry: &mut R) -> anyhow::Result<()> { $( - for (key, value) in self.$name.iter() { - registry.write_registry(*key, value.clone())?; + for (key, value) in self.[<$type>].iter() { + registry.write_registry(*key, value)?; } )* @@ -147,13 +150,13 @@ macro_rules! tables { return Ok(RegistryKey::ZERO); } if let Some(found) = ctx.db.registry_index_lookup(self)? { - ctx.accessed_keys[RegistryKeyspace::$name].insert(found); + ctx.accessed_keys[RegistryKeyspace::[<$type>]].insert(found); } Ok(RegistryKey::ZERO) } } - impl CompressibleBy> for $type { + impl CompressibleBy> for $type { async fn compress_with( &self, ctx: &mut CompressCtx, @@ -165,8 +168,8 @@ macro_rules! tables { return Ok(found); } - let key = ctx.cache_evictor.next_key(&mut ctx.db, RegistryKeyspace::$name)?; - let old = ctx.changes.$name.insert(key, self.clone()); + let key = ctx.cache_evictor.[< next_key_ $type >](&mut ctx.db)?; + let old = ctx.changes.[<$type>].insert(key, self.clone()); assert!(old.is_none(), "Key collision in registry substitution"); Ok(key) } @@ -184,27 +187,21 @@ macro_rules! tables { } } )* - }; + }}; } -tables!( - address: Address, - asset_id: AssetId, - contract_id: ContractId, - script_code: ScriptCode, - predicate_code: PredicateCode, -); +tables!(Address, AssetId, ContractId, ScriptCode, PredicateCode,); // TODO: move inside the macro when this stabilizes: https://github.com/rust-lang/rust/pull/122808 #[cfg(any(test, feature = "test-helpers"))] impl rand::prelude::Distribution for rand::distributions::Standard { fn sample(&self, rng: &mut R) -> RegistryKeyspace { match rng.gen_range(0..5) { - 0 => RegistryKeyspace::address, - 1 => RegistryKeyspace::asset_id, - 2 => RegistryKeyspace::contract_id, - 3 => RegistryKeyspace::script_code, - _ => RegistryKeyspace::predicate_code, + 0 => RegistryKeyspace::Address, + 1 => RegistryKeyspace::AssetId, + 2 => RegistryKeyspace::ContractId, + 3 => RegistryKeyspace::ScriptCode, + _ => RegistryKeyspace::PredicateCode, } } } diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index 3fbbba83141..ac1626659aa 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -45,6 +45,7 @@ hyper = { workspace = true } indicatif = { workspace = true, default-features = true } itertools = { workspace = true } num_cpus = { version = "1.16.0", optional = true } +paste = "1" postcard = { workspace = true, optional = true } rand = { workspace = true } rocksdb = { version = "0.21", default-features = false, features = [ @@ -53,6 +54,7 @@ rocksdb = { version = "0.21", default-features = false, features = [ ], optional = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true, features = ["raw_value"] } +sha2 = "0.10.8" strum = { workspace = true, features = ["derive"] } strum_macros = { workspace = true } tempfile = { workspace = true, optional = true } diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index 9f594ffc255..f1ceffac514 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -1,11 +1,6 @@ use crate::{ fuel_core_graphql_api::ports::worker::OffChainDatabaseTransaction, - graphql_api::storage::da_compression::{ - DaCompressedBlocks, - DaCompressionTemporalRegistry, - DaCompressionTemporalRegistryEvictor, - DaCompressionTemporalRegistryIndex, - }, + graphql_api::storage::da_compression::*, }; use fuel_core_compression::{ compress::compress, @@ -14,8 +9,6 @@ use fuel_core_compression::{ TemporalRegistry, UtxoIdToPointer, }, - RegistryKeyspace, - RegistryKeyspaceValue, }; use fuel_core_storage::{ not_found, @@ -34,6 +27,10 @@ use fuel_core_types::{ services::executor::Event, }; use futures::FutureExt; +use sha2::{ + Digest, + Sha256, +}; /// Performs DA compression for a block and stores it in the database. pub fn da_compress_block( @@ -67,76 +64,104 @@ struct CompressTx<'a, Tx> { } macro_rules! impl_temporal_registry { - ($($name:ident: $type:ty),*$(,)?) => { - $( - impl<'a, Tx> TemporalRegistry<$type> for CompressTx<'a, Tx> - where - Tx: OffChainDatabaseTransaction, + ($type:ty, $index_value_fn:expr) => { paste::paste! { + impl<'a, Tx> TemporalRegistry<$type> for CompressTx<'a, Tx> + where + Tx: OffChainDatabaseTransaction, + { + fn read_registry( + &self, + key: fuel_core_types::fuel_compression::RegistryKey, + ) -> anyhow::Result<$type> { + Ok(self + .db_tx + .storage_as_ref::<[< DaCompressionTemporalRegistry $type >]>() + .get(&key)? + .ok_or(not_found!([< DaCompressionTemporalRegistry $type>]))? + .into_owned()) + } + + fn write_registry( + &mut self, + key: fuel_core_types::fuel_compression::RegistryKey, + value: &$type, + ) -> anyhow::Result<()> { + // Write the actual value + self.db_tx + .storage_as_mut::<[< DaCompressionTemporalRegistry $type >]>() + .insert(&key, &value)?; + + let value_in_index: [u8; 32] = ($index_value_fn)(value); + + // Remove the overwritten value from index, if any + self.db_tx + .storage_as_mut::<[< DaCompressionTemporalRegistryIndex $type >]>() + .remove(&value_in_index)?; + + // Add the new value to the index + self.db_tx + .storage_as_mut::<[< DaCompressionTemporalRegistryIndex $type >]>() + .insert(&value_in_index, &key)?; + + Ok(()) + } + + fn registry_index_lookup( + &self, + value: &$type, + ) -> anyhow::Result> { - fn read_registry( - &self, - key: fuel_core_types::fuel_compression::RegistryKey, - ) -> anyhow::Result<$type> { - let v = self - .db_tx - .storage_as_ref::() - .get(&(RegistryKeyspace::$name, key))? - .ok_or(not_found!(DaCompressionTemporalRegistry))? - .into_owned(); - match v { - RegistryKeyspaceValue::$name(v) => Ok(v), - _ => anyhow::bail!("Unexpected value in the registry"), - } - } + let value_in_index: [u8; 32] = ($index_value_fn)(value); + Ok(self + .db_tx + .storage_as_ref::<[< DaCompressionTemporalRegistryIndex $type >]>() + .get(&value_in_index)? + .map(|v| v.into_owned())) + } + } - fn write_registry( - &mut self, - key: fuel_core_types::fuel_compression::RegistryKey, - value: $type, - ) -> anyhow::Result<()> { - // Write the actual value - self.db_tx - .storage_as_mut::() - .insert(&(RegistryKeyspace::$name, key), &RegistryKeyspaceValue::$name(value.clone()))?; - - // Remove the overwritten value from index, if any - self.db_tx - .storage_as_mut::() - .remove(&RegistryKeyspaceValue::$name(value.clone()))?; - - // Add the new value to the index - self.db_tx - .storage_as_mut::() - .insert(&RegistryKeyspaceValue::$name(value), &key)?; - - Ok(()) - } + impl<'a, Tx> EvictorDb<$type> for CompressTx<'a, Tx> + where + Tx: OffChainDatabaseTransaction, + { + fn write_latest( + &mut self, + key: fuel_core_types::fuel_compression::RegistryKey, + ) -> anyhow::Result<()> { + self.db_tx + .storage_as_mut::<[< DaCompressionTemporalRegistryEvictor $type >]>() + .insert(&(), &key)?; + Ok(()) + } - fn registry_index_lookup( - &self, - value: &$type, - ) -> anyhow::Result> - { - Ok(self - .db_tx - .storage_as_ref::() - .get(&RegistryKeyspaceValue::$name(value.clone()))? - .map(|v| v.into_owned())) - } + fn read_latest( + &mut self, + ) -> anyhow::Result { + Ok(self + .db_tx + .storage_as_ref::<[< DaCompressionTemporalRegistryEvictor $type >]>() + .get(&())? + .ok_or(not_found!([< DaCompressionTemporalRegistryEvictor $type >]))? + .into_owned()) } + } - )* - }; + }}; } -// Arguments here should match the tables! macro from crates/compression/src/tables.rs -impl_temporal_registry! { - address: Address, - asset_id: AssetId, - contract_id: ContractId, - script_code: ScriptCode, - predicate_code: PredicateCode, -} +impl_temporal_registry!(Address, |v: &Address| **v); +impl_temporal_registry!(AssetId, |v: &AssetId| **v); +impl_temporal_registry!(ContractId, |v: &ContractId| **v); +impl_temporal_registry!(ScriptCode, |v: &ScriptCode| { + let mut hasher = Sha256::new(); + hasher.update(&v.bytes); + hasher.finalize().into() +}); +impl_temporal_registry!(PredicateCode, |v: &PredicateCode| { + let mut hasher = Sha256::new(); + hasher.update(&v.bytes); + hasher.finalize().into() +}); impl<'a, Tx> UtxoIdToPointer for CompressTx<'a, Tx> where @@ -163,31 +188,3 @@ where panic!("UtxoId not found in the block events"); } } - -impl<'a, Tx> EvictorDb for CompressTx<'a, Tx> -where - Tx: OffChainDatabaseTransaction, -{ - fn write_latest( - &mut self, - keyspace: RegistryKeyspace, - key: fuel_core_types::fuel_compression::RegistryKey, - ) -> anyhow::Result<()> { - self.db_tx - .storage_as_mut::() - .insert(&keyspace, &key)?; - Ok(()) - } - - fn read_latest( - &mut self, - keyspace: RegistryKeyspace, - ) -> anyhow::Result { - Ok(self - .db_tx - .storage_as_ref::() - .get(&keyspace)? - .ok_or(not_found!(DaCompressionTemporalRegistryEvictor))? - .into_owned()) - } -} diff --git a/crates/fuel-core/src/graphql_api/ports.rs b/crates/fuel-core/src/graphql_api/ports.rs index 5cc3f498ab4..6b3da08384c 100644 --- a/crates/fuel-core/src/graphql_api/ports.rs +++ b/crates/fuel-core/src/graphql_api/ports.rs @@ -278,12 +278,7 @@ pub mod worker { }, }, graphql_api::storage::{ - da_compression::{ - DaCompressedBlocks, - DaCompressionTemporalRegistry, - DaCompressionTemporalRegistryEvictor, - DaCompressionTemporalRegistryIndex, - }, + da_compression::*, old::{ OldFuelBlockConsensus, OldFuelBlocks, @@ -327,8 +322,7 @@ pub mod worker { fn transaction(&mut self) -> Self::Transaction<'_>; } - pub trait OffChainDatabaseTransaction: - StorageMutate + pub trait OffChainDatabaseTransaction: StorageMutate + StorageMutate + StorageMutate + StorageMutate @@ -338,9 +332,30 @@ pub mod worker { + StorageMutate + StorageMutate + StorageMutate - + StorageMutate - + StorageMutate - + StorageMutate + + StorageMutate + + StorageMutate + + StorageMutate + + StorageMutate + + StorageMutate + + StorageMutate + + StorageMutate + + StorageMutate + + StorageMutate< + DaCompressionTemporalRegistryEvictorContractId, + Error = StorageError, + > + StorageMutate + + StorageMutate + + StorageMutate< + DaCompressionTemporalRegistryEvictorScriptCode, + Error = StorageError, + > + StorageMutate + + StorageMutate< + DaCompressionTemporalRegistryIndexPredicateCode, + Error = StorageError, + > + StorageMutate< + DaCompressionTemporalRegistryEvictorPredicateCode, + Error = StorageError, + > { fn record_tx_id_owner( &mut self, diff --git a/crates/fuel-core/src/graphql_api/storage.rs b/crates/fuel-core/src/graphql_api/storage.rs index c7818dbe38d..b1f8648949b 100644 --- a/crates/fuel-core/src/graphql_api/storage.rs +++ b/crates/fuel-core/src/graphql_api/storage.rs @@ -99,15 +99,36 @@ pub enum Column { DaCompressedBlocks = 14, /// DA compression metadata. DaCompressionMetadata = 15, - /// Temporal registry for DA compression. - /// See [`DaCompressionTemporalRegistry`](da_compression::DaCompressionTemporalRegistry) - DaCompressionTemporalRegistry = 16, - /// Temporal registry lookup index for DA compression. - /// See [`DaCompressionTemporalRegistryIndex`](da_compression::DaCompressionTemporalRegistryIndex) - DaCompressionTemporalRegistryIndex = 17, - /// Temporal registry evictor state. - /// See [`DaCompressionTemporalRegistryEvictor`](da_compression::DaCompressionTemporalRegistryEvictor) - DaCompressionTemporalRegistryEvictor = 18, + /// See [`DaCompressionTemporalRegistryAddress`](da_compression::DaCompressionTemporalRegistryAddress) + DaCompressionTemporalRegistryAddress = 16, + /// See [`DaCompressionTemporalRegistryIndexAddress`](da_compression::DaCompressionTemporalRegistryIndexAddress) + DaCompressionTemporalRegistryIndexAddress = 17, + /// See [`DaCompressionTemporalRegistryEvictorAddress`](da_compression::DaCompressionTemporalRegistryEvictorAddress) + DaCompressionTemporalRegistryEvictorAddress = 18, + /// See [`DaCompressionTemporalRegistryAssetId`](da_compression::DaCompressionTemporalRegistryAssetId) + DaCompressionTemporalRegistryAssetId = 19, + /// See [`DaCompressionTemporalRegistryIndexAssetId`](da_compression::DaCompressionTemporalRegistryIndexAssetId) + DaCompressionTemporalRegistryIndexAssetId = 20, + /// See [`DaCompressionTemporalRegistryEvictorAssetId`](da_compression::DaCompressionTemporalRegistryEvictorAssetId) + DaCompressionTemporalRegistryEvictorAssetId = 21, + /// See [`DaCompressionTemporalRegistryContractId`](da_compression::DaCompressionTemporalRegistryContractId) + DaCompressionTemporalRegistryContractId = 22, + /// See [`DaCompressionTemporalRegistryIndexContractId`](da_compression::DaCompressionTemporalRegistryIndexContractId) + DaCompressionTemporalRegistryIndexContractId = 23, + /// See [`DaCompressionTemporalRegistryEvictorContractId`](da_compression::DaCompressionTemporalRegistryEvictorContractId) + DaCompressionTemporalRegistryEvictorContractId = 24, + /// See [`DaCompressionTemporalRegistryScriptCode`](da_compression::DaCompressionTemporalRegistryScriptCode) + DaCompressionTemporalRegistryScriptCode = 25, + /// See [`DaCompressionTemporalRegistryIndexScriptCode`](da_compression::DaCompressionTemporalRegistryIndexScriptCode) + DaCompressionTemporalRegistryIndexScriptCode = 26, + /// See [`DaCompressionTemporalRegistryEvictorScriptCode`](da_compression::DaCompressionTemporalRegistryEvictorScriptCode) + DaCompressionTemporalRegistryEvictorScriptCode = 27, + /// See [`DaCompressionTemporalRegistryPredicateCode`](da_compression::DaCompressionTemporalRegistryPredicateCode) + DaCompressionTemporalRegistryPredicateCode = 28, + /// See [`DaCompressionTemporalRegistryIndexPredicateCode`](da_compression::DaCompressionTemporalRegistryIndexPredicateCode) + DaCompressionTemporalRegistryIndexPredicateCode = 29, + /// See [`DaCompressionTemporalRegistryEvictorPredicateCode`](da_compression::DaCompressionTemporalRegistryEvictorPredicateCode) + DaCompressionTemporalRegistryEvictorPredicateCode = 30, } impl Column { diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression.rs b/crates/fuel-core/src/graphql_api/storage/da_compression.rs index 56d5cabcb83..1584488dbaf 100644 --- a/crates/fuel-core/src/graphql_api/storage/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/storage/da_compression.rs @@ -1,7 +1,3 @@ -use fuel_core_compression::{ - RegistryKeyspace, - RegistryKeyspaceValue, -}; use fuel_core_storage::{ blueprint::plain::Plain, codec::{ @@ -14,6 +10,13 @@ use fuel_core_storage::{ }; use fuel_core_types::{ fuel_compression::RegistryKey, + fuel_tx::{ + input::PredicateCode, + Address, + AssetId, + ContractId, + ScriptCode, + }, fuel_types::BlockHeight, }; @@ -35,135 +38,112 @@ impl TableWithBlueprint for DaCompressedBlocks { } } -pub struct DaCompressionTemporalRegistry; +macro_rules! temporal_registry { + ($type:ty) => { + paste::paste! { + pub struct [< DaCompressionTemporalRegistry $type >]; -impl Mappable for DaCompressionTemporalRegistry { - type Key = Self::OwnedKey; - type OwnedKey = (RegistryKeyspace, RegistryKey); - type Value = Self::OwnedValue; - type OwnedValue = RegistryKeyspaceValue; -} + impl Mappable for [< DaCompressionTemporalRegistry $type >] { + type Key = Self::OwnedKey; + type OwnedKey = RegistryKey; + type Value = Self::OwnedValue; + type OwnedValue = $type; + } -impl TableWithBlueprint for DaCompressionTemporalRegistry { - type Blueprint = Plain; - type Column = super::Column; + impl TableWithBlueprint for [< DaCompressionTemporalRegistry $type >] { + type Blueprint = Plain; + type Column = super::Column; - fn column() -> Self::Column { - Self::Column::DaCompressionTemporalRegistry - } -} + fn column() -> Self::Column { + Self::Column::[< DaCompressionTemporalRegistry $type >] + } + } -pub struct DaCompressionTemporalRegistryIndex; + pub struct [< DaCompressionTemporalRegistryIndex $type >]; -impl Mappable for DaCompressionTemporalRegistryIndex { - type Key = Self::OwnedKey; - // TODO: should we hash the key? - type OwnedKey = RegistryKeyspaceValue; - type Value = Self::OwnedValue; - type OwnedValue = RegistryKey; -} + impl Mappable for [< DaCompressionTemporalRegistryIndex $type >] { + type Key = Self::OwnedKey; + type OwnedKey = [u8; 32]; // Hash + type Value = Self::OwnedValue; + type OwnedValue = RegistryKey; + } -impl TableWithBlueprint for DaCompressionTemporalRegistryIndex { - type Blueprint = Plain; - type Column = super::Column; + impl TableWithBlueprint for [< DaCompressionTemporalRegistryIndex $type >] { + type Blueprint = Plain; + type Column = super::Column; - fn column() -> Self::Column { - Self::Column::DaCompressionTemporalRegistryIndex - } -} + fn column() -> Self::Column { + Self::Column::[< DaCompressionTemporalRegistryIndex $type >] + } + } -/// This table is used to hold "next key to evict" for each keyspace. -/// In the future we'll likely switch to use LRU or something, in which -/// case this table can be repurposed, iff migrations have been figured out. -pub struct DaCompressionTemporalRegistryEvictor; + /// This table is used to hold "next key to evict" for each keyspace. + /// In the future we'll likely switch to use LRU or something, in which + /// case this table can be repurposed. + pub struct [< DaCompressionTemporalRegistryEvictor $type >]; -impl Mappable for DaCompressionTemporalRegistryEvictor { - type Key = Self::OwnedKey; - type OwnedKey = RegistryKeyspace; - type Value = Self::OwnedValue; - type OwnedValue = RegistryKey; -} + impl Mappable for [< DaCompressionTemporalRegistryEvictor $type >] { + type Key = Self::OwnedKey; + type OwnedKey = (); + type Value = Self::OwnedValue; + type OwnedValue = RegistryKey; + } -impl TableWithBlueprint for DaCompressionTemporalRegistryEvictor { - type Blueprint = Plain; - type Column = super::Column; + impl TableWithBlueprint for [< DaCompressionTemporalRegistryEvictor $type >] { + type Blueprint = Plain; + type Column = super::Column; - fn column() -> Self::Column { - Self::Column::DaCompressionTemporalRegistryEvictor - } + fn column() -> Self::Column { + Self::Column::[< DaCompressionTemporalRegistryEvictor $type >] + } + } + + + #[cfg(test)] + fuel_core_storage::basic_storage_tests!( + [< DaCompressionTemporalRegistry $type >], + RegistryKey::ZERO, + <[< DaCompressionTemporalRegistry $type >] as Mappable>::Value::default(), + <[< DaCompressionTemporalRegistry $type >] as Mappable>::Value::default(), + tests::generate_key + ); + + #[cfg(test)] + fuel_core_storage::basic_storage_tests!( + [< DaCompressionTemporalRegistryIndex $type >], + [0u8; 32], + RegistryKey::ZERO + ); + + #[cfg(test)] + fuel_core_storage::basic_storage_tests!( + [< DaCompressionTemporalRegistryEvictor $type >], + (), + RegistryKey::ZERO + ); + } + }; } +temporal_registry!(Address); +temporal_registry!(AssetId); +temporal_registry!(ContractId); +temporal_registry!(ScriptCode); +temporal_registry!(PredicateCode); + #[cfg(test)] mod tests { - use fuel_core_types::fuel_tx::{ - input::PredicateCode, - ScriptCode, - }; - use super::*; - fn generate_keyspace(rng: &mut impl rand::Rng) -> RegistryKeyspace { - rng.gen() - } - - #[allow(clippy::arithmetic_side_effects)] // Test code, and also safe - fn generate_raw_key(rng: &mut impl rand::Rng) -> RegistryKey { - let raw_key: u32 = rng.gen_range(0..2u32.pow(24) - 2); - RegistryKey::try_from(raw_key).unwrap() - } - - fn generate_registry_key( - rng: &mut impl rand::Rng, - ) -> (RegistryKeyspace, RegistryKey) { - (generate_keyspace(rng), generate_raw_key(rng)) - } - - fn generate_registry_index_key(rng: &mut impl rand::Rng) -> RegistryKeyspaceValue { - let mut bytes: Vec = vec![0u8; rng.gen_range(0..1234)]; - rng.fill(bytes.as_mut_slice()); - - match rng.gen() { - RegistryKeyspace::address => RegistryKeyspaceValue::address(rng.gen()), - RegistryKeyspace::asset_id => RegistryKeyspaceValue::asset_id(rng.gen()), - RegistryKeyspace::contract_id => { - RegistryKeyspaceValue::contract_id(rng.gen()) - } - RegistryKeyspace::script_code => { - RegistryKeyspaceValue::script_code(ScriptCode { bytes }) - } - RegistryKeyspace::predicate_code => { - RegistryKeyspaceValue::predicate_code(PredicateCode { bytes }) - } - } - } - fuel_core_storage::basic_storage_tests!( DaCompressedBlocks, ::Key::default(), ::Value::default() ); - fuel_core_storage::basic_storage_tests!( - DaCompressionTemporalRegistry, - (RegistryKeyspace::address, RegistryKey::ZERO), - RegistryKeyspaceValue::address(fuel_core_types::fuel_tx::Address::zeroed()), - RegistryKeyspaceValue::address(fuel_core_types::fuel_tx::Address::zeroed()), - generate_registry_key - ); - - fuel_core_storage::basic_storage_tests!( - DaCompressionTemporalRegistryIndex, - RegistryKeyspaceValue::address(fuel_core_types::fuel_tx::Address::zeroed()), - RegistryKey::ZERO, - RegistryKey::ZERO, - generate_registry_index_key - ); - - fuel_core_storage::basic_storage_tests!( - DaCompressionTemporalRegistryEvictor, - RegistryKeyspace::address, - RegistryKey::ZERO, - RegistryKey::ZERO, - generate_keyspace - ); + #[allow(clippy::arithmetic_side_effects)] // Test code, and also safe + pub fn generate_key(rng: &mut impl rand::Rng) -> RegistryKey { + let raw_key: u32 = rng.gen_range(0..2u32.pow(24) - 2); + RegistryKey::try_from(raw_key).unwrap() + } } From 7fe125fdfe3a48e1f9de1dd400027ee2d06d66e4 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 26 Sep 2024 05:57:24 +0300 Subject: [PATCH 076/112] Remove RegistryKeyspaceValue type --- crates/compression/src/lib.rs | 77 +++++++------------ crates/compression/src/tables.rs | 21 ----- .../src/graphql_api/storage/da_compression.rs | 2 +- 3 files changed, 29 insertions(+), 71 deletions(-) diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 25dda71277e..3ac9c1a6e3c 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -9,10 +9,7 @@ mod eviction_policy; pub mod ports; mod tables; -pub use tables::{ - RegistryKeyspace, - RegistryKeyspaceValue, -}; +pub use tables::RegistryKeyspace; use serde::{ Deserialize, @@ -56,13 +53,6 @@ mod tests { primitives::Empty, }, fuel_compression::RegistryKey, - fuel_tx::{ - input::PredicateCode, - Address, - AssetId, - ContractId, - ScriptCode, - }, tai64::Tai64, }; use proptest::prelude::*; @@ -80,57 +70,46 @@ mod tests { ] } - fn keyspace_value() -> impl Strategy { - (keyspace(), prop::array::uniform32(0..u8::MAX)).prop_map(|(keyspace, value)| { - match keyspace { - RegistryKeyspace::Address => { - RegistryKeyspaceValue::Address(Address::new(value)) - } - RegistryKeyspace::AssetId => { - RegistryKeyspaceValue::AssetId(AssetId::new(value)) - } - RegistryKeyspace::ContractId => { - RegistryKeyspaceValue::ContractId(ContractId::new(value)) - } - RegistryKeyspace::ScriptCode => { - let len = (value[0] % 32) as usize; - RegistryKeyspaceValue::ScriptCode(ScriptCode { - bytes: value[..len].to_vec(), - }) - } - RegistryKeyspace::PredicateCode => { - let len = (value[0] % 32) as usize; - RegistryKeyspaceValue::PredicateCode(PredicateCode { - bytes: value[..len].to_vec(), - }) - } - } - }) - } - proptest! { /// Serialization for compressed transactions is already tested in fuel-vm, /// but the rest of the block de/serialization is be tested here. #[test] fn postcard_roundtrip( - da_height in 0..u64::MAX, - prev_root in prop::array::uniform32(0..u8::MAX), - height in 0..u32::MAX, - consensus_parameters_version in 0..u32::MAX, - state_transition_bytecode_version in 0..u32::MAX, - registrations_root in prop::array::uniform32(0..u8::MAX), + da_height in 0..=u64::MAX, + prev_root in prop::array::uniform32(0..=u8::MAX), + height in 0..=u32::MAX, + consensus_parameters_version in 0..=u32::MAX, + state_transition_bytecode_version in 0..=u32::MAX, + registrations_root in prop::array::uniform32(0..=u8::MAX), registration_inputs in prop::collection::vec( - (keyspace_value(), prop::num::u16::ANY).prop_map(|(v, rk)| { + (keyspace(), prop::num::u16::ANY, prop::array::uniform32(0..=u8::MAX)).prop_map(|(ks, rk, arr)| { let k = RegistryKey::try_from(rk as u32).unwrap(); - (k, v) + (ks, k, arr) }), 0..123 ), ) { let mut registrations: PerRegistryKeyspaceMap = Default::default(); - for (key, ksv) in registration_inputs { - registrations.insert(key, ksv); + for (ks, key, arr) in registration_inputs { + let value_len_limit = (key.as_u32() % 32) as usize; + match ks { + RegistryKeyspace::Address => { + registrations.Address.insert(key, arr.into()); + } + RegistryKeyspace::AssetId => { + registrations.AssetId.insert(key, arr.into()); + } + RegistryKeyspace::ContractId => { + registrations.ContractId.insert(key, arr.into()); + } + RegistryKeyspace::ScriptCode => { + registrations.ScriptCode.insert(key, arr[..value_len_limit].to_vec().into()); + } + RegistryKeyspace::PredicateCode => { + registrations.PredicateCode.insert(key, arr[..value_len_limit].to_vec().into()); + } + } } let header = PartialBlockHeader { diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index b0994f2d5d1..aa3a473b1e6 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -41,14 +41,6 @@ macro_rules! tables { )* } - #[doc = "RegistryKey namespace with an associated typed value"] - #[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] - pub enum RegistryKeyspaceValue { - $( - [<$type>]($type), - )* - } - #[doc = "A value for each keyspace"] #[derive(Debug, Clone, Default)] #[allow(non_snake_case)] // Match type names exactly @@ -83,19 +75,6 @@ macro_rules! tables { $(pub [<$type>]: HashMap,)* } - impl PerRegistryKeyspaceMap { - #[cfg(test)] - pub fn insert(&mut self, key: RegistryKey, value: RegistryKeyspaceValue) { - match value { - $( - RegistryKeyspaceValue::[<$type>](value) => { - self.[<$type>].insert(key, value); - } - )* - } - } - } - #[doc = "The set of registrations for each table, as used in the compressed block header"] #[derive(Debug, Clone, PartialEq, Default, serde::Serialize, serde::Deserialize)] #[allow(non_snake_case)] // Match type names exactly diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression.rs b/crates/fuel-core/src/graphql_api/storage/da_compression.rs index 1584488dbaf..b9f18216db0 100644 --- a/crates/fuel-core/src/graphql_api/storage/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/storage/da_compression.rs @@ -63,7 +63,7 @@ macro_rules! temporal_registry { impl Mappable for [< DaCompressionTemporalRegistryIndex $type >] { type Key = Self::OwnedKey; - type OwnedKey = [u8; 32]; // Hash + type OwnedKey = [u8; 32]; // if the value is larger than 32 bytes, it's hashed type Value = Self::OwnedValue; type OwnedValue = RegistryKey; } From 8d5fdf20c5d55347dee3724f823d3b1479de5d07 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 26 Sep 2024 06:00:01 +0300 Subject: [PATCH 077/112] Change infallible TryFrom into From impl --- crates/compression/src/compress.rs | 3 +-- crates/compression/src/lib.rs | 2 +- crates/compression/src/tables.rs | 8 +++----- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 27d641c506e..dcff583706c 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -59,8 +59,7 @@ pub async fn compress(db: D, block: &Block) -> Result, Er changes: Default::default(), }; let transactions = target.compress_with(&mut ctx).await?; - let registrations = ctx.changes; - let registrations = RegistrationsPerTable::try_from(registrations)?; + let registrations: RegistrationsPerTable = ctx.changes.into(); // Apply changes to the db registrations.write_to_registry(&mut ctx.db)?; diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 3ac9c1a6e3c..d97e16bf949 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -127,7 +127,7 @@ mod tests { } }; let original = CompressedBlockPayloadV0 { - registrations: RegistrationsPerTable::try_from(registrations).unwrap(), + registrations: RegistrationsPerTable::from(registrations), registrations_root: registrations_root.into(), header, transactions: vec![], diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index aa3a473b1e6..7248c68ba87 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -82,17 +82,15 @@ macro_rules! tables { $(pub [<$type>]: Vec<(RegistryKey, $type)>,)* } - impl TryFrom for RegistrationsPerTable { - type Error = anyhow::Error; - - fn try_from(value: PerRegistryKeyspaceMap) -> Result { + impl From for RegistrationsPerTable { + fn from(value: PerRegistryKeyspaceMap) -> Self { let mut result = Self::default(); $( for (key, value) in value.[<$type>].into_iter() { result.[<$type>].push((key, value)); } )* - Ok(result) + result } } From 137aee26bff21185e01e9fc888b3319eb80ca0a1 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 26 Sep 2024 06:00:29 +0300 Subject: [PATCH 078/112] cargo sort --- crates/compression/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index a81079862fa..eae8d6b29fe 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -31,7 +31,7 @@ thiserror = { workspace = true } [dev-dependencies] proptest = { workspace = true } -tokio = { workspace = true} +tokio = { workspace = true } [features] test-helpers = [ From c5ac5db3cf427e1919e9c3f6d5e70cd5be737946 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 26 Sep 2024 06:09:59 +0300 Subject: [PATCH 079/112] Remove thiserror dependency (no_std support) --- Cargo.lock | 1 - crates/compression/Cargo.toml | 4 ++-- crates/compression/src/compress.rs | 10 +--------- crates/compression/src/decompress.rs | 30 ++++++++++------------------ crates/compression/src/tables.rs | 5 ++--- 5 files changed, 15 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3f203e34fdf..cc80d82b354 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3368,7 +3368,6 @@ dependencies = [ "proptest", "rand", "serde", - "thiserror", "tokio", ] diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index eae8d6b29fe..7c510a1f95d 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -24,14 +24,14 @@ fuel-core-types = { workspace = true, features = [ "da-compression", ] } paste = "1" -postcard = { version = "1.0", features = ["use-std"] } +postcard = "1.0" rand = { workspace = true, optional = true } serde = { version = "1.0", features = ["derive"] } -thiserror = { workspace = true } [dev-dependencies] proptest = { workspace = true } tokio = { workspace = true } +postcard = { version = "1.0", features = ["use-std"] } [features] test-helpers = [ diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index dcff583706c..0eedb1ff12b 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -28,19 +28,11 @@ use fuel_core_types::{ }, }; -#[derive(Debug, thiserror::Error)] -pub enum Error { - #[error("Only the next sequential block can be compressed")] - NotLatest, - #[error("Unknown compression error")] - Other(#[from] anyhow::Error), -} - pub trait CompressDb: TemporalRegistryAll + UtxoIdToPointer {} impl CompressDb for T where T: TemporalRegistryAll + UtxoIdToPointer {} /// This must be called for all new blocks in sequence, otherwise the result will be garbage. -pub async fn compress(db: D, block: &Block) -> Result, Error> { +pub async fn compress(db: D, block: &Block) -> anyhow::Result> { let target = block.transactions().to_vec(); let mut prepare_ctx = PrepareCtx { diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index a4b78f762a2..58ce31c1ba8 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -38,17 +38,6 @@ use fuel_core_types::{ }, }; -#[derive(Debug, thiserror::Error)] -pub enum DecompressError { - #[error("Only the next sequential block can be decompressed")] - NotLatest, - #[error("Deserialization error: {0} (possibly unknown version)")] - Postcard(#[from] postcard::Error), - /// Other errors - #[error("Unknown error: {0}")] - Other(#[from] anyhow::Error), -} - pub trait DecompressDb: TemporalRegistryAll + HistoryLookup {} impl DecompressDb for T where T: TemporalRegistryAll + HistoryLookup {} @@ -56,7 +45,7 @@ impl DecompressDb for T where T: TemporalRegistryAll + HistoryLookup {} pub async fn decompress( mut db: D, block: Vec, -) -> Result { +) -> anyhow::Result { let compressed: CompressedBlock = postcard::from_bytes(&block)?; let CompressedBlock::V0(compressed) = compressed; @@ -100,15 +89,15 @@ pub struct DecompressCtx { } impl ContextError for DecompressCtx { - type Error = DecompressError; + type Error = anyhow::Error; } impl DecompressibleBy> for UtxoId { async fn decompress_with( c: CompressedUtxoId, ctx: &DecompressCtx, - ) -> Result { - Ok(ctx.db.utxo_id(c)?) + ) -> anyhow::Result { + ctx.db.utxo_id(c) } } @@ -124,7 +113,7 @@ where async fn decompress_with( c: as Compressible>::Compressed, ctx: &DecompressCtx, - ) -> Result, DecompressError> { + ) -> anyhow::Result> { let utxo_id = UtxoId::decompress_with(c.utxo_id, ctx).await?; let coin_info = ctx.db.coin(utxo_id)?; let witness_index = c.witness_index.decompress(ctx).await?; @@ -158,7 +147,7 @@ where async fn decompress_with( c: as Compressible>::Compressed, ctx: &DecompressCtx, - ) -> Result, DecompressError> { + ) -> anyhow::Result> { let msg = ctx.db.message(c.nonce)?; let witness_index = c.witness_index.decompress(ctx).await?; let predicate_gas_used = c.predicate_gas_used.decompress(ctx).await?; @@ -188,7 +177,7 @@ impl DecompressibleBy> for Mint { async fn decompress_with( c: Self::Compressed, ctx: &DecompressCtx, - ) -> Result { + ) -> anyhow::Result { Ok(Transaction::mint( Default::default(), // TODO: what should this we do with this? c.input_contract.decompress(ctx).await?, @@ -294,7 +283,8 @@ mod tests { postcard::to_stdvec(&CompressedBlockWithNewVersions::NewVersion(1234)) .unwrap(); - let result = decompress(MockDb, block).await; - assert!(matches!(result, Err(DecompressError::Postcard(_)))); + decompress(MockDb, block) + .await + .expect_err("Decompression should fail gracefully"); } } diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index 7248c68ba87..4ac43c52ed5 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -23,7 +23,6 @@ use crate::{ decompress::{ DecompressCtx, DecompressDb, - DecompressError, }, ports::{ EvictorDb, @@ -156,11 +155,11 @@ macro_rules! tables { async fn decompress_with( key: RegistryKey, ctx: &DecompressCtx, - ) -> Result { + ) -> anyhow::Result { if key == RegistryKey::DEFAULT_VALUE { return Ok(<$type>::default()); } - Ok(ctx.db.read_registry(key)?) + ctx.db.read_registry(key) } } )* From 2a4c1906f462892f10da3e4d7e9df874b330870a Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 26 Sep 2024 06:12:25 +0300 Subject: [PATCH 080/112] cargo sort --- crates/compression/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 7c510a1f95d..6dca78844a2 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -29,9 +29,9 @@ rand = { workspace = true, optional = true } serde = { version = "1.0", features = ["derive"] } [dev-dependencies] +postcard = { version = "1.0", features = ["use-std"] } proptest = { workspace = true } tokio = { workspace = true } -postcard = { version = "1.0", features = ["use-std"] } [features] test-helpers = [ From c04b7dc967f10b8ef1fdf982bd7c1f666e6d2635 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 26 Sep 2024 06:36:53 +0300 Subject: [PATCH 081/112] Make cache evictor keyspace-agnostic --- crates/compression/src/compress.rs | 26 ++++--- crates/compression/src/eviction_policy.rs | 85 ++++++++--------------- crates/compression/src/lib.rs | 15 ++-- crates/compression/src/tables.rs | 65 ++++++++++------- 4 files changed, 93 insertions(+), 98 deletions(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 0eedb1ff12b..feb963a74cc 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -1,11 +1,14 @@ -use std::collections::HashSet; +use std::collections::{ + HashMap, + HashSet, +}; use crate::{ eviction_policy::CacheEvictor, ports::UtxoIdToPointer, tables::{ + CompressCtxKeyspaces, PerRegistryKeyspace, - PerRegistryKeyspaceMap, RegistrationsPerTable, TemporalRegistryAll, }, @@ -45,13 +48,10 @@ pub async fn compress(db: D, block: &Block) -> anyhow::Result CompressibleBy> for UtxoId { } } +#[derive(Debug)] +pub(crate) struct CompressCtxKeyspace { + /// Cache evictor state for this keyspace + pub cache_evictor: CacheEvictor, + /// Changes to the temporary registry, to be included in the compressed block header + pub changes: HashMap, +} + pub struct CompressCtx { pub db: D, - pub cache_evictor: CacheEvictor, - /// Changes to the temporary registry, to be included in the compressed block header - pub changes: PerRegistryKeyspaceMap, + pub(crate) per_keyspace: CompressCtxKeyspaces, } impl ContextError for CompressCtx { diff --git a/crates/compression/src/eviction_policy.rs b/crates/compression/src/eviction_policy.rs index b85f5f2eb85..d25936e62b2 100644 --- a/crates/compression/src/eviction_policy.rs +++ b/crates/compression/src/eviction_policy.rs @@ -1,62 +1,37 @@ use std::collections::HashSet; -use fuel_core_types::{ - fuel_compression::RegistryKey, - fuel_tx::{ - input::PredicateCode, - Address, - AssetId, - ContractId, - ScriptCode, - }, -}; - -use crate::{ - ports::EvictorDb, - tables::{ - PerRegistryKeyspace, - RegistryKeyspace, - }, -}; - -pub struct CacheEvictor { +use fuel_core_types::fuel_compression::RegistryKey; + +use crate::ports::EvictorDb; + +/// Evictor for a single keyspace +#[derive(Debug)] +pub(crate) struct CacheEvictor { + pub keyspace: std::marker::PhantomData, /// Set of keys that must not be evicted - pub keep_keys: PerRegistryKeyspace>, + pub keep_keys: HashSet, } -macro_rules! impl_evictor { - ($type:ty) => { paste::paste! { - impl CacheEvictor { - /// Get a key, evicting an old value if necessary - #[allow(non_snake_case)] // Match names of types exactly - pub fn [< next_key_ $type >] ( - &mut self, - db: &mut D, - ) -> anyhow::Result - where - D: EvictorDb<$type>, - { - // Pick first key not in the set - // TODO: use a proper algo, maybe LRU? - let mut key = db.read_latest()?; - - debug_assert!(self.keep_keys[RegistryKeyspace::[<$type>]].len() < 2usize.pow(24).saturating_sub(2)); - - while self.keep_keys[RegistryKeyspace::[<$type>]].contains(&key) { - key = key.next(); - } - - db.write_latest(key)?; - - self.keep_keys[RegistryKeyspace::[<$type>]].insert(key); - Ok(key) - } +impl CacheEvictor { + /// Get a key, evicting an old value if necessary + #[allow(non_snake_case)] // Match names of types exactly + pub fn next_key(&mut self, db: &mut D) -> anyhow::Result + where + D: EvictorDb, + { + // Pick first key not in the set + // TODO: use a proper algo, maybe LRU? + let mut key = db.read_latest()?; + + debug_assert!(self.keep_keys.len() < 2usize.pow(24).saturating_sub(2)); + + while self.keep_keys.contains(&key) { + key = key.next(); } - }}; -} -impl_evictor!(Address); -impl_evictor!(AssetId); -impl_evictor!(ContractId); -impl_evictor!(ScriptCode); -impl_evictor!(PredicateCode); + db.write_latest(key)?; + + self.keep_keys.insert(key); + Ok(key) + } +} diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index d97e16bf949..032534a155d 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -56,7 +56,6 @@ mod tests { tai64::Tai64, }; use proptest::prelude::*; - use tables::PerRegistryKeyspaceMap; use super::*; @@ -89,25 +88,25 @@ mod tests { 0..123 ), ) { - let mut registrations: PerRegistryKeyspaceMap = Default::default(); + let mut registrations: RegistrationsPerTable = Default::default(); for (ks, key, arr) in registration_inputs { let value_len_limit = (key.as_u32() % 32) as usize; match ks { RegistryKeyspace::Address => { - registrations.Address.insert(key, arr.into()); + registrations.Address.push((key, arr.into())); } RegistryKeyspace::AssetId => { - registrations.AssetId.insert(key, arr.into()); + registrations.AssetId.push((key, arr.into())); } RegistryKeyspace::ContractId => { - registrations.ContractId.insert(key, arr.into()); + registrations.ContractId.push((key, arr.into())); } RegistryKeyspace::ScriptCode => { - registrations.ScriptCode.insert(key, arr[..value_len_limit].to_vec().into()); + registrations.ScriptCode.push((key, arr[..value_len_limit].to_vec().into())); } RegistryKeyspace::PredicateCode => { - registrations.PredicateCode.insert(key, arr[..value_len_limit].to_vec().into()); + registrations.PredicateCode.push((key, arr[..value_len_limit].to_vec().into())); } } } @@ -127,7 +126,7 @@ mod tests { } }; let original = CompressedBlockPayloadV0 { - registrations: RegistrationsPerTable::from(registrations), + registrations, registrations_root: registrations_root.into(), header, transactions: vec![], diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index 4ac43c52ed5..23dca8da09e 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -1,19 +1,3 @@ -use fuel_core_types::{ - fuel_compression::{ - CompressibleBy, - DecompressibleBy, - RegistryKey, - }, - fuel_tx::{ - input::PredicateCode, - Address, - AssetId, - ContractId, - ScriptCode, - }, -}; -use std::collections::HashMap; - use crate::{ compress::{ CompressCtx, @@ -29,6 +13,21 @@ use crate::{ TemporalRegistry, }, }; +use fuel_core_types::{ + fuel_compression::{ + CompressibleBy, + DecompressibleBy, + RegistryKey, + }, + fuel_tx::{ + input::PredicateCode, + Address, + AssetId, + ContractId, + ScriptCode, + }, +}; +use std::collections::HashSet; macro_rules! tables { ($($type:ty),*$(,)?) => { paste::paste! { @@ -67,11 +66,27 @@ macro_rules! tables { } } - #[doc = "Key-value mapping for each keyspace"] - #[derive(Debug, Clone, Default)] + // If Rust had HKTs, we wouldn't have to do this + #[derive(Debug)] #[allow(non_snake_case)] // Match type names exactly - pub struct PerRegistryKeyspaceMap { - $(pub [<$type>]: HashMap,)* + pub(crate) struct CompressCtxKeyspaces { + $(pub [<$type>]: crate::compress::CompressCtxKeyspace<$type>,)* + } + + impl From>> for CompressCtxKeyspaces { + fn from(value: PerRegistryKeyspace>) -> Self { + Self { + $( + [<$type>]: crate::compress::CompressCtxKeyspace { + changes: Default::default(), + cache_evictor: crate::eviction_policy::CacheEvictor { + keyspace: std::marker::PhantomData, + keep_keys: value.[<$type>], + }, + }, + )* + } + } } #[doc = "The set of registrations for each table, as used in the compressed block header"] @@ -81,11 +96,11 @@ macro_rules! tables { $(pub [<$type>]: Vec<(RegistryKey, $type)>,)* } - impl From for RegistrationsPerTable { - fn from(value: PerRegistryKeyspaceMap) -> Self { + impl From for RegistrationsPerTable { + fn from(value: CompressCtxKeyspaces) -> Self { let mut result = Self::default(); $( - for (key, value) in value.[<$type>].into_iter() { + for (key, value) in value.[<$type>].changes.into_iter() { result.[<$type>].push((key, value)); } )* @@ -144,8 +159,8 @@ macro_rules! tables { return Ok(found); } - let key = ctx.cache_evictor.[< next_key_ $type >](&mut ctx.db)?; - let old = ctx.changes.[<$type>].insert(key, self.clone()); + let key = ctx.per_keyspace.[<$type>].cache_evictor.next_key(&mut ctx.db)?; + let old = ctx.per_keyspace.[<$type>].changes.insert(key, self.clone()); assert!(old.is_none(), "Key collision in registry substitution"); Ok(key) } From 34bfcd98d61173e19a52eb4bf554b6171989a799 Mon Sep 17 00:00:00 2001 From: Green Baneling Date: Thu, 26 Sep 2024 14:44:56 +0200 Subject: [PATCH 082/112] Minor improvements to the compression crate (#2254) --- crates/compression/Cargo.toml | 3 +- crates/compression/src/compress.rs | 99 ++-------- crates/compression/src/decompress.rs | 43 +--- crates/compression/src/eviction_policy.rs | 13 +- crates/compression/src/lib.rs | 11 +- crates/compression/src/ports.rs | 50 ++++- crates/compression/src/tables.rs | 187 ++++++++++++++---- .../src/graphql_api/da_compression.rs | 12 +- crates/types/src/blockchain/block.rs | 7 + 9 files changed, 240 insertions(+), 185 deletions(-) diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 6dca78844a2..cb771dc7afa 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -29,9 +29,10 @@ rand = { workspace = true, optional = true } serde = { version = "1.0", features = ["derive"] } [dev-dependencies] +fuel-core-compression = { path = ".", features = ["test-helpers"] } postcard = { version = "1.0", features = ["use-std"] } proptest = { workspace = true } -tokio = { workspace = true } +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } [features] test-helpers = [ diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index feb963a74cc..293342400c3 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -1,14 +1,7 @@ -use std::collections::{ - HashMap, - HashSet, -}; - use crate::{ - eviction_policy::CacheEvictor, ports::UtxoIdToPointer, tables::{ - CompressCtxKeyspaces, - PerRegistryKeyspace, + PrepareCtx, RegistrationsPerTable, TemporalRegistryAll, }, @@ -17,44 +10,29 @@ use crate::{ }; use fuel_core_types::{ blockchain::block::Block, - fuel_compression::{ - CompressibleBy, - ContextError, - RegistryKey, - }, - fuel_tx::{ - Bytes32, - CompressedUtxoId, - Transaction, - TxPointer, - UtxoId, - }, + fuel_compression::CompressibleBy, + fuel_tx::Bytes32, }; pub trait CompressDb: TemporalRegistryAll + UtxoIdToPointer {} impl CompressDb for T where T: TemporalRegistryAll + UtxoIdToPointer {} /// This must be called for all new blocks in sequence, otherwise the result will be garbage. -pub async fn compress(db: D, block: &Block) -> anyhow::Result> { - let target = block.transactions().to_vec(); +pub async fn compress(mut db: D, block: &Block) -> anyhow::Result> +where + D: CompressDb, +{ + let target = block.transactions_vec(); - let mut prepare_ctx = PrepareCtx { - db, - accessed_keys: PerRegistryKeyspace::default(), - }; - let _ = - as CompressibleBy<_>>::compress_with(&target, &mut prepare_ctx) - .await?; + let mut prepare_ctx = PrepareCtx::new(&mut db); + let _ = target.compress_with(&mut prepare_ctx).await?; - let mut ctx = CompressCtx { - db: prepare_ctx.db, - per_keyspace: prepare_ctx.accessed_keys.into(), - }; + let mut ctx = prepare_ctx.into_compression_context(); let transactions = target.compress_with(&mut ctx).await?; - let registrations: RegistrationsPerTable = ctx.per_keyspace.into(); + let registrations: RegistrationsPerTable = ctx.into_registrations(); // Apply changes to the db - registrations.write_to_registry(&mut ctx.db)?; + registrations.write_to_registry(&mut db)?; // Construct the actual compacted block let compact = CompressedBlockPayloadV0 { @@ -69,54 +47,3 @@ pub async fn compress(db: D, block: &Block) -> anyhow::Result { - /// Database handle - pub db: D, - /// Keys accessed during compression. Will not be overwritten. - pub accessed_keys: PerRegistryKeyspace>, -} - -impl ContextError for PrepareCtx { - type Error = anyhow::Error; -} - -impl CompressibleBy> for UtxoId { - async fn compress_with( - &self, - _ctx: &mut PrepareCtx, - ) -> anyhow::Result { - Ok(CompressedUtxoId { - tx_pointer: TxPointer::default(), - output_index: 0, - }) - } -} - -#[derive(Debug)] -pub(crate) struct CompressCtxKeyspace { - /// Cache evictor state for this keyspace - pub cache_evictor: CacheEvictor, - /// Changes to the temporary registry, to be included in the compressed block header - pub changes: HashMap, -} - -pub struct CompressCtx { - pub db: D, - pub(crate) per_keyspace: CompressCtxKeyspaces, -} - -impl ContextError for CompressCtx { - type Error = anyhow::Error; -} - -impl CompressibleBy> for UtxoId { - async fn compress_with( - &self, - ctx: &mut CompressCtx, - ) -> anyhow::Result { - ctx.db.lookup(*self) - } -} diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index 58ce31c1ba8..7c9142620c1 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -4,15 +4,7 @@ use crate::{ CompressedBlock, }; use fuel_core_types::{ - blockchain::{ - block::PartialFuelBlock, - header::{ - ApplicationHeader, - ConsensusHeader, - PartialBlockHeader, - }, - primitives::Empty, - }, + blockchain::block::PartialFuelBlock, fuel_compression::{ Compressible, ContextError, @@ -42,10 +34,10 @@ pub trait DecompressDb: TemporalRegistryAll + HistoryLookup {} impl DecompressDb for T where T: TemporalRegistryAll + HistoryLookup {} /// This must be called for all decompressed blocks in sequence, otherwise the result will be garbage. -pub async fn decompress( - mut db: D, - block: Vec, -) -> anyhow::Result { +pub async fn decompress(mut db: D, block: Vec) -> anyhow::Result +where + D: DecompressDb + TemporalRegistryAll, +{ let compressed: CompressedBlock = postcard::from_bytes(&block)?; let CompressedBlock::V0(compressed) = compressed; @@ -62,24 +54,7 @@ pub async fn decompress( .await?; Ok(PartialFuelBlock { - header: PartialBlockHeader { - application: ApplicationHeader { - da_height: compressed.header.da_height, - consensus_parameters_version: compressed - .header - .consensus_parameters_version, - state_transition_bytecode_version: compressed - .header - .state_transition_bytecode_version, - generated: Empty, - }, - consensus: ConsensusHeader { - prev_root: *compressed.header.prev_root(), - height: *compressed.header.height(), - time: *compressed.header.time(), - generated: Empty, - }, - }, + header: compressed.header, transactions, }) } @@ -232,13 +207,13 @@ mod tests { macro_rules! mock_temporal { ($type:ty) => { impl TemporalRegistry<$type> for MockDb { - fn read_registry(&self, _key: RegistryKey) -> anyhow::Result<$type> { + fn read_registry(&self, _key: &RegistryKey) -> anyhow::Result<$type> { todo!() } fn write_registry( &mut self, - _key: RegistryKey, + _key: &RegistryKey, _value: &$type, ) -> anyhow::Result<()> { todo!() @@ -257,7 +232,7 @@ mod tests { todo!() } - fn read_latest(&mut self) -> anyhow::Result { + fn read_latest(&self) -> anyhow::Result { todo!() } } diff --git a/crates/compression/src/eviction_policy.rs b/crates/compression/src/eviction_policy.rs index d25936e62b2..06b48213e6b 100644 --- a/crates/compression/src/eviction_policy.rs +++ b/crates/compression/src/eviction_policy.rs @@ -7,14 +7,19 @@ use crate::ports::EvictorDb; /// Evictor for a single keyspace #[derive(Debug)] pub(crate) struct CacheEvictor { - pub keyspace: std::marker::PhantomData, /// Set of keys that must not be evicted - pub keep_keys: HashSet, + keep_keys: HashSet, + _keyspace_marker: std::marker::PhantomData, } impl CacheEvictor { - /// Get a key, evicting an old value if necessary - #[allow(non_snake_case)] // Match names of types exactly + pub fn new(keep_keys: HashSet) -> Self { + Self { + keep_keys, + _keyspace_marker: std::marker::PhantomData, + } + } + pub fn next_key(&mut self, db: &mut D) -> anyhow::Result where D: EvictorDb, diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 032534a155d..35d1b06138d 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -44,6 +44,7 @@ enum CompressedBlock { #[cfg(test)] mod tests { + use fuel_core_compression as _; use fuel_core_types::{ blockchain::{ header::{ @@ -94,19 +95,19 @@ mod tests { let value_len_limit = (key.as_u32() % 32) as usize; match ks { RegistryKeyspace::Address => { - registrations.Address.push((key, arr.into())); + registrations.address.push((key, arr.into())); } RegistryKeyspace::AssetId => { - registrations.AssetId.push((key, arr.into())); + registrations.asset_id.push((key, arr.into())); } RegistryKeyspace::ContractId => { - registrations.ContractId.push((key, arr.into())); + registrations.contract_id.push((key, arr.into())); } RegistryKeyspace::ScriptCode => { - registrations.ScriptCode.push((key, arr[..value_len_limit].to_vec().into())); + registrations.script_code.push((key, arr[..value_len_limit].to_vec().into())); } RegistryKeyspace::PredicateCode => { - registrations.PredicateCode.push((key, arr[..value_len_limit].to_vec().into())); + registrations.predicate_code.push((key, arr[..value_len_limit].to_vec().into())); } } } diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index 65b06f39a28..043a84b41aa 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -18,20 +18,46 @@ use fuel_core_types::{ /// must be committed atomically, after which block height must be incremented. pub trait TemporalRegistry { /// Reads a value from the registry at its current height. - fn read_registry(&self, key: RegistryKey) -> anyhow::Result; + fn read_registry(&self, key: &RegistryKey) -> anyhow::Result; /// Reads a value from the registry at its current height. - fn write_registry(&mut self, key: RegistryKey, value: &T) -> anyhow::Result<()>; + fn write_registry(&mut self, key: &RegistryKey, value: &T) -> anyhow::Result<()>; /// Lookup registry key by the value. fn registry_index_lookup(&self, value: &T) -> anyhow::Result>; } +impl TemporalRegistry for &mut D +where + D: TemporalRegistry, +{ + fn read_registry(&self, key: &RegistryKey) -> anyhow::Result { + >::read_registry(self, key) + } + + fn write_registry(&mut self, key: &RegistryKey, value: &T) -> anyhow::Result<()> { + >::write_registry(self, key, value) + } + + fn registry_index_lookup(&self, value: &T) -> anyhow::Result> { + >::registry_index_lookup(self, value) + } +} + /// Lookup for UTXO pointers used for compression. pub trait UtxoIdToPointer { fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result; } +impl UtxoIdToPointer for &mut D +where + D: UtxoIdToPointer, +{ + fn lookup(&self, utxo_id: UtxoId) -> anyhow::Result { + ::lookup(self, utxo_id) + } +} + /// Lookup for history of UTXOs and messages, used for decompression. pub trait HistoryLookup { fn utxo_id(&self, c: CompressedUtxoId) -> anyhow::Result; @@ -56,10 +82,22 @@ pub struct MessageInfo { pub data: Vec, } -/// Temporal registry evictor state storage, -/// currently backed by a `DaCompressionTemporalRegistryEvictor*` -/// columns in the offchain database. +/// Evictor registry to keep track of the latest used key for the type `T`. pub trait EvictorDb { - fn read_latest(&mut self) -> anyhow::Result; + fn read_latest(&self) -> anyhow::Result; + fn write_latest(&mut self, key: RegistryKey) -> anyhow::Result<()>; } + +impl EvictorDb for &mut D +where + D: EvictorDb, +{ + fn read_latest(&self) -> anyhow::Result { + >::read_latest(self) + } + + fn write_latest(&mut self, key: RegistryKey) -> anyhow::Result<()> { + >::write_latest(self, key) + } +} diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs index 23dca8da09e..4e033bb67ec 100644 --- a/crates/compression/src/tables.rs +++ b/crates/compression/src/tables.rs @@ -1,13 +1,10 @@ use crate::{ - compress::{ - CompressCtx, - CompressDb, - PrepareCtx, - }, + compress::CompressDb, decompress::{ DecompressCtx, DecompressDb, }, + eviction_policy::CacheEvictor, ports::{ EvictorDb, TemporalRegistry, @@ -16,6 +13,7 @@ use crate::{ use fuel_core_types::{ fuel_compression::{ CompressibleBy, + ContextError, DecompressibleBy, RegistryKey, }, @@ -23,14 +21,103 @@ use fuel_core_types::{ input::PredicateCode, Address, AssetId, + CompressedUtxoId, ContractId, ScriptCode, + TxPointer, + UtxoId, }, }; -use std::collections::HashSet; +use std::collections::{ + HashMap, + HashSet, +}; + +/// Preparation pass through the block to collect all keys accessed during compression. +/// Returns dummy values. The resulting "compressed block" should be discarded. +pub struct PrepareCtx { + /// Database handle + db: D, + /// Keys accessed during the compression. + accessed_keys: PerRegistryKeyspace>, +} + +impl PrepareCtx { + /// Create a new PrepareCtx around the given database. + pub fn new(db: D) -> Self { + Self { + db, + accessed_keys: PerRegistryKeyspace::default(), + } + } + + /// Converts the preparation context into a [`CompressCtx`] + /// keeping accessed keys to avoid its eviction during compression. + pub fn into_compression_context(self) -> CompressCtx { + CompressCtx { + db: self.db, + per_keyspace: self.accessed_keys.into(), + } + } +} + +impl ContextError for PrepareCtx { + type Error = anyhow::Error; +} + +impl CompressibleBy> for UtxoId +where + D: CompressDb, +{ + async fn compress_with( + &self, + _ctx: &mut PrepareCtx, + ) -> anyhow::Result { + Ok(CompressedUtxoId { + tx_pointer: TxPointer::default(), + output_index: 0, + }) + } +} + +#[derive(Debug)] +struct CompressCtxKeyspace { + /// Cache evictor state for this keyspace + cache_evictor: CacheEvictor, + /// Changes to the temporary registry, to be included in the compressed block header + changes: HashMap, +} + +pub struct CompressCtx { + db: D, + per_keyspace: CompressCtxKeyspaces, +} + +impl CompressCtx { + /// Converts the compression context into a [`RegistrationsPerTable`] + pub fn into_registrations(self) -> RegistrationsPerTable { + self.per_keyspace.into() + } +} + +impl ContextError for CompressCtx { + type Error = anyhow::Error; +} + +impl CompressibleBy> for UtxoId +where + D: CompressDb, +{ + async fn compress_with( + &self, + ctx: &mut CompressCtx, + ) -> anyhow::Result { + ctx.db.lookup(*self) + } +} macro_rules! tables { - ($($type:ty),*$(,)?) => { paste::paste! { + ($($ident:ty: $type:ty),*) => { paste::paste! { #[doc = "RegistryKey namespaces"] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] pub enum RegistryKeyspace { @@ -41,9 +128,8 @@ macro_rules! tables { #[doc = "A value for each keyspace"] #[derive(Debug, Clone, Default)] - #[allow(non_snake_case)] // Match type names exactly pub struct PerRegistryKeyspace { - $(pub [<$type>]: T,)* + $(pub $ident: T,)* } impl core::ops::Index for PerRegistryKeyspace { type Output = T; @@ -51,7 +137,7 @@ macro_rules! tables { fn index(&self, index: RegistryKeyspace) -> &Self::Output { match index { $( - RegistryKeyspace::[<$type>] => &self.[<$type>], + RegistryKeyspace::[<$type>] => &self.$ident, )* } } @@ -60,7 +146,7 @@ macro_rules! tables { fn index_mut(&mut self, index: RegistryKeyspace) -> &mut Self::Output { match index { $( - RegistryKeyspace::[<$type>] => &mut self.[<$type>], + RegistryKeyspace::[<$type>] => &mut self.$ident, )* } } @@ -68,21 +154,17 @@ macro_rules! tables { // If Rust had HKTs, we wouldn't have to do this #[derive(Debug)] - #[allow(non_snake_case)] // Match type names exactly - pub(crate) struct CompressCtxKeyspaces { - $(pub [<$type>]: crate::compress::CompressCtxKeyspace<$type>,)* + struct CompressCtxKeyspaces { + $(pub $ident: CompressCtxKeyspace<$type>,)* } impl From>> for CompressCtxKeyspaces { fn from(value: PerRegistryKeyspace>) -> Self { Self { $( - [<$type>]: crate::compress::CompressCtxKeyspace { + $ident: CompressCtxKeyspace { changes: Default::default(), - cache_evictor: crate::eviction_policy::CacheEvictor { - keyspace: std::marker::PhantomData, - keep_keys: value.[<$type>], - }, + cache_evictor: CacheEvictor::new(value.$ident), }, )* } @@ -91,39 +173,43 @@ macro_rules! tables { #[doc = "The set of registrations for each table, as used in the compressed block header"] #[derive(Debug, Clone, PartialEq, Default, serde::Serialize, serde::Deserialize)] - #[allow(non_snake_case)] // Match type names exactly pub struct RegistrationsPerTable { - $(pub [<$type>]: Vec<(RegistryKey, $type)>,)* + $(pub $ident: Vec<(RegistryKey, $type)>,)* } impl From for RegistrationsPerTable { fn from(value: CompressCtxKeyspaces) -> Self { let mut result = Self::default(); $( - for (key, value) in value.[<$type>].changes.into_iter() { - result.[<$type>].push((key, value)); + for (key, value) in value.$ident.changes.into_iter() { + result.$ident.push((key, value)); } )* result } } - pub trait TemporalRegistryAll: Sized $( - + TemporalRegistry<$type> - + EvictorDb<$type> - )* {} + pub trait TemporalRegistryAll + where + Self: Sized, + $(Self: TemporalRegistry<$type> + EvictorDb<$type>,)* + {} - impl TemporalRegistryAll for T where T: Sized $( - + TemporalRegistry<$type> - + EvictorDb<$type> - )* {} + impl TemporalRegistryAll for T + where + T: Sized, + $(T: TemporalRegistry<$type> + EvictorDb<$type>,)* + {} impl RegistrationsPerTable { - pub(crate) fn write_to_registry(&self, registry: &mut R) -> anyhow::Result<()> { + pub(crate) fn write_to_registry(&self, registry: &mut R) -> anyhow::Result<()> + where + R: TemporalRegistryAll + { $( - for (key, value) in self.[<$type>].iter() { - registry.write_registry(*key, value)?; + for (key, value) in self.$ident.iter() { + registry.write_registry(key, value)?; } )* @@ -132,7 +218,10 @@ macro_rules! tables { } $( - impl CompressibleBy> for $type { + impl CompressibleBy> for $type + where + D: CompressDb + { async fn compress_with( &self, ctx: &mut PrepareCtx, @@ -141,32 +230,38 @@ macro_rules! tables { return Ok(RegistryKey::ZERO); } if let Some(found) = ctx.db.registry_index_lookup(self)? { - ctx.accessed_keys[RegistryKeyspace::[<$type>]].insert(found); + ctx.accessed_keys.$ident.insert(found); } Ok(RegistryKey::ZERO) } } - impl CompressibleBy> for $type { + impl CompressibleBy> for $type + where + D: CompressDb + { async fn compress_with( &self, ctx: &mut CompressCtx, ) -> anyhow::Result { - if *self == Default::default() { + if self == &Default::default() { return Ok(RegistryKey::DEFAULT_VALUE); } if let Some(found) = ctx.db.registry_index_lookup(self)? { return Ok(found); } - let key = ctx.per_keyspace.[<$type>].cache_evictor.next_key(&mut ctx.db)?; - let old = ctx.per_keyspace.[<$type>].changes.insert(key, self.clone()); + let key = ctx.per_keyspace.$ident.cache_evictor.next_key(&mut ctx.db)?; + let old = ctx.per_keyspace.$ident.changes.insert(key, self.clone()); assert!(old.is_none(), "Key collision in registry substitution"); Ok(key) } } - impl DecompressibleBy> for $type { + impl DecompressibleBy> for $type + where + D: DecompressDb + { async fn decompress_with( key: RegistryKey, ctx: &DecompressCtx, @@ -174,14 +269,20 @@ macro_rules! tables { if key == RegistryKey::DEFAULT_VALUE { return Ok(<$type>::default()); } - ctx.db.read_registry(key) + ctx.db.read_registry(&key) } } )* }}; } -tables!(Address, AssetId, ContractId, ScriptCode, PredicateCode,); +tables!( + address: Address, + asset_id: AssetId, + contract_id: ContractId, + script_code: ScriptCode, + predicate_code: PredicateCode +); // TODO: move inside the macro when this stabilizes: https://github.com/rust-lang/rust/pull/122808 #[cfg(any(test, feature = "test-helpers"))] diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index f1ceffac514..20369eea78b 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -71,25 +71,25 @@ macro_rules! impl_temporal_registry { { fn read_registry( &self, - key: fuel_core_types::fuel_compression::RegistryKey, + key: &fuel_core_types::fuel_compression::RegistryKey, ) -> anyhow::Result<$type> { Ok(self .db_tx .storage_as_ref::<[< DaCompressionTemporalRegistry $type >]>() - .get(&key)? + .get(key)? .ok_or(not_found!([< DaCompressionTemporalRegistry $type>]))? .into_owned()) } fn write_registry( &mut self, - key: fuel_core_types::fuel_compression::RegistryKey, + key: &fuel_core_types::fuel_compression::RegistryKey, value: &$type, ) -> anyhow::Result<()> { // Write the actual value self.db_tx .storage_as_mut::<[< DaCompressionTemporalRegistry $type >]>() - .insert(&key, &value)?; + .insert(key, value)?; let value_in_index: [u8; 32] = ($index_value_fn)(value); @@ -101,7 +101,7 @@ macro_rules! impl_temporal_registry { // Add the new value to the index self.db_tx .storage_as_mut::<[< DaCompressionTemporalRegistryIndex $type >]>() - .insert(&value_in_index, &key)?; + .insert(&value_in_index, key)?; Ok(()) } @@ -135,7 +135,7 @@ macro_rules! impl_temporal_registry { } fn read_latest( - &mut self, + &self, ) -> anyhow::Result { Ok(self .db_tx diff --git a/crates/types/src/blockchain/block.rs b/crates/types/src/blockchain/block.rs index 8380b977006..7611082f232 100644 --- a/crates/types/src/blockchain/block.rs +++ b/crates/types/src/blockchain/block.rs @@ -186,6 +186,13 @@ impl Block { } } + /// Get the executed transactions as a `Vec` type. + pub fn transactions_vec(&self) -> &Vec { + match self { + Block::V1(inner) => &inner.transactions, + } + } + /// Get the complete header. pub fn header(&self) -> &BlockHeader { match self { From dcac7d70e222480058e57a0b0122a7801704a65a Mon Sep 17 00:00:00 2001 From: Green Baneling Date: Thu, 26 Sep 2024 16:48:28 +0200 Subject: [PATCH 083/112] Decouple compression, decompression and registry (#2257) With this approach, we have more macro rules, but the logic is split between different modules, and we can see the hierarchy of dependency between modules(instead of everyone depending on everyone). Plus, we don't need the intermediate `CompressCtxKeyspaces` type anymore. --- crates/compression/src/compress.rs | 180 +++++++++++++++- crates/compression/src/decompress.rs | 60 +++++- crates/compression/src/lib.rs | 6 +- crates/compression/src/registry.rs | 108 ++++++++++ crates/compression/src/tables.rs | 299 --------------------------- 5 files changed, 339 insertions(+), 314 deletions(-) create mode 100644 crates/compression/src/registry.rs delete mode 100644 crates/compression/src/tables.rs diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 293342400c3..f9f1ee6fdda 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -1,7 +1,8 @@ use crate::{ + eviction_policy::CacheEvictor, ports::UtxoIdToPointer, - tables::{ - PrepareCtx, + registry::{ + PerRegistryKeyspace, RegistrationsPerTable, TemporalRegistryAll, }, @@ -10,8 +11,28 @@ use crate::{ }; use fuel_core_types::{ blockchain::block::Block, - fuel_compression::CompressibleBy, - fuel_tx::Bytes32, + fuel_compression::{ + CompressibleBy, + ContextError, + RegistryKey, + }, + fuel_tx::{ + input::PredicateCode, + Bytes32, + CompressedUtxoId, + ScriptCode, + TxPointer, + UtxoId, + }, + fuel_types::{ + Address, + AssetId, + ContractId, + }, +}; +use std::collections::{ + HashMap, + HashSet, }; pub trait CompressDb: TemporalRegistryAll + UtxoIdToPointer {} @@ -29,7 +50,7 @@ where let mut ctx = prepare_ctx.into_compression_context(); let transactions = target.compress_with(&mut ctx).await?; - let registrations: RegistrationsPerTable = ctx.into_registrations(); + let registrations: RegistrationsPerTable = ctx.into(); // Apply changes to the db registrations.write_to_registry(&mut db)?; @@ -47,3 +68,152 @@ where Ok(compressed) } + +/// Preparation pass through the block to collect all keys accessed during compression. +/// Returns dummy values. The resulting "compressed block" should be discarded. +pub struct PrepareCtx { + /// Database handle + db: D, + /// Keys accessed during the compression. + accessed_keys: PerRegistryKeyspace>, +} + +impl PrepareCtx { + /// Create a new PrepareCtx around the given database. + pub fn new(db: D) -> Self { + Self { + db, + accessed_keys: PerRegistryKeyspace::default(), + } + } +} + +impl ContextError for PrepareCtx { + type Error = anyhow::Error; +} + +impl CompressibleBy> for UtxoId +where + D: CompressDb, +{ + async fn compress_with( + &self, + _ctx: &mut PrepareCtx, + ) -> anyhow::Result { + Ok(CompressedUtxoId { + tx_pointer: TxPointer::default(), + output_index: 0, + }) + } +} + +#[derive(Debug)] +struct CompressCtxKeyspace { + /// Cache evictor state for this keyspace + cache_evictor: CacheEvictor, + /// Changes to the temporary registry, to be included in the compressed block header + changes: HashMap, +} + +macro_rules! compression { + ($($ident:ty: $type:ty),*) => { paste::paste! { + pub struct CompressCtx { + db: D, + $($ident: CompressCtxKeyspace<$type>,)* + } + + impl PrepareCtx { + /// Converts the preparation context into a [`CompressCtx`] + /// keeping accessed keys to avoid its eviction during compression. + pub fn into_compression_context(self) -> CompressCtx { + CompressCtx { + db: self.db, + $( + $ident: CompressCtxKeyspace { + changes: Default::default(), + cache_evictor: CacheEvictor::new(self.accessed_keys.$ident.into()), + }, + )* + } + } + } + + impl From> for RegistrationsPerTable { + fn from(value: CompressCtx) -> Self { + let mut result = Self::default(); + $( + for (key, value) in value.$ident.changes.into_iter() { + result.$ident.push((key, value)); + } + )* + result + } + } + + $( + impl CompressibleBy> for $type + where + D: CompressDb + { + async fn compress_with( + &self, + ctx: &mut PrepareCtx, + ) -> anyhow::Result { + if *self == <$type>::default() { + return Ok(RegistryKey::ZERO); + } + if let Some(found) = ctx.db.registry_index_lookup(self)? { + ctx.accessed_keys.$ident.insert(found); + } + Ok(RegistryKey::ZERO) + } + } + + impl CompressibleBy> for $type + where + D: CompressDb + { + async fn compress_with( + &self, + ctx: &mut CompressCtx, + ) -> anyhow::Result { + if self == &Default::default() { + return Ok(RegistryKey::DEFAULT_VALUE); + } + if let Some(found) = ctx.db.registry_index_lookup(self)? { + return Ok(found); + } + + let key = ctx.$ident.cache_evictor.next_key(&mut ctx.db)?; + let old = ctx.$ident.changes.insert(key, self.clone()); + assert!(old.is_none(), "Key collision in registry substitution"); + Ok(key) + } + } + )* + }}; +} + +compression!( + address: Address, + asset_id: AssetId, + contract_id: ContractId, + script_code: ScriptCode, + predicate_code: PredicateCode +); + +impl ContextError for CompressCtx { + type Error = anyhow::Error; +} + +impl CompressibleBy> for UtxoId +where + D: CompressDb, +{ + async fn compress_with( + &self, + ctx: &mut CompressCtx, + ) -> anyhow::Result { + ctx.db.lookup(*self) + } +} diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index 7c9142620c1..db6ad7267f6 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -1,6 +1,9 @@ use crate::{ - ports::HistoryLookup, - tables::TemporalRegistryAll, + ports::{ + HistoryLookup, + TemporalRegistry, + }, + registry::TemporalRegistryAll, CompressedBlock, }; use fuel_core_types::{ @@ -10,6 +13,7 @@ use fuel_core_types::{ ContextError, Decompress, DecompressibleBy, + RegistryKey, }, fuel_tx::{ input::{ @@ -22,12 +26,19 @@ use fuel_core_types::{ MessageSpecification, }, AsField, + PredicateCode, }, + AssetId, CompressedUtxoId, Mint, + ScriptCode, Transaction, UtxoId, }, + fuel_types::{ + Address, + ContractId, + }, }; pub trait DecompressDb: TemporalRegistryAll + HistoryLookup {} @@ -36,7 +47,7 @@ impl DecompressDb for T where T: TemporalRegistryAll + HistoryLookup {} /// This must be called for all decompressed blocks in sequence, otherwise the result will be garbage. pub async fn decompress(mut db: D, block: Vec) -> anyhow::Result where - D: DecompressDb + TemporalRegistryAll, + D: DecompressDb, { let compressed: CompressedBlock = postcard::from_bytes(&block)?; let CompressedBlock::V0(compressed) = compressed; @@ -60,14 +71,23 @@ where } pub struct DecompressCtx { - pub db: D, + db: D, } -impl ContextError for DecompressCtx { +impl DecompressCtx { + pub fn new(db: D) -> Self { + Self { db } + } +} + +impl ContextError for DecompressCtx { type Error = anyhow::Error; } -impl DecompressibleBy> for UtxoId { +impl DecompressibleBy> for UtxoId +where + D: HistoryLookup, +{ async fn decompress_with( c: CompressedUtxoId, ctx: &DecompressCtx, @@ -76,6 +96,29 @@ impl DecompressibleBy> for UtxoId { } } +macro_rules! decompress_impl { + ($($type:ty),*) => { paste::paste! { + $( + impl DecompressibleBy> for $type + where + D: TemporalRegistry<$type> + { + async fn decompress_with( + key: RegistryKey, + ctx: &DecompressCtx, + ) -> anyhow::Result { + if key == RegistryKey::DEFAULT_VALUE { + return Ok(<$type>::default()); + } + ctx.db.read_registry(&key) + } + } + )* + }}; +} + +decompress_impl!(AssetId, ContractId, Address, PredicateCode, ScriptCode); + impl DecompressibleBy> for Coin where D: DecompressDb, @@ -148,7 +191,10 @@ where } } -impl DecompressibleBy> for Mint { +impl DecompressibleBy> for Mint +where + D: DecompressDb, +{ async fn decompress_with( c: Self::Compressed, ctx: &DecompressCtx, diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 35d1b06138d..35e9b183e06 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -7,9 +7,9 @@ pub mod compress; pub mod decompress; mod eviction_policy; pub mod ports; -mod tables; +mod registry; -pub use tables::RegistryKeyspace; +pub use registry::RegistryKeyspace; use serde::{ Deserialize, @@ -21,7 +21,7 @@ use fuel_core_types::{ fuel_tx::CompressedTransaction, fuel_types::Bytes32, }; -use tables::RegistrationsPerTable; +use registry::RegistrationsPerTable; /// Compressed block, without the preceding version byte. #[derive(Clone, Serialize, Deserialize)] diff --git a/crates/compression/src/registry.rs b/crates/compression/src/registry.rs new file mode 100644 index 00000000000..819002be616 --- /dev/null +++ b/crates/compression/src/registry.rs @@ -0,0 +1,108 @@ +use crate::ports::{ + EvictorDb, + TemporalRegistry, +}; +use fuel_core_types::{ + fuel_compression::RegistryKey, + fuel_tx::{ + input::PredicateCode, + Address, + AssetId, + ContractId, + ScriptCode, + }, +}; + +macro_rules! tables { + ($($ident:ty: $type:ty),*) => { paste::paste! { + #[doc = "RegistryKey namespaces"] + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] + pub enum RegistryKeyspace { + $( + [<$type>], + )* + } + + #[doc = "A value for each keyspace"] + #[derive(Debug, Clone, Default)] + pub struct PerRegistryKeyspace { + $(pub $ident: T,)* + } + impl core::ops::Index for PerRegistryKeyspace { + type Output = T; + + fn index(&self, index: RegistryKeyspace) -> &Self::Output { + match index { + $( + RegistryKeyspace::[<$type>] => &self.$ident, + )* + } + } + } + impl core::ops::IndexMut for PerRegistryKeyspace { + fn index_mut(&mut self, index: RegistryKeyspace) -> &mut Self::Output { + match index { + $( + RegistryKeyspace::[<$type>] => &mut self.$ident, + )* + } + } + } + + #[doc = "The set of registrations for each table, as used in the compressed block header"] + #[derive(Debug, Clone, PartialEq, Default, serde::Serialize, serde::Deserialize)] + pub struct RegistrationsPerTable { + $(pub $ident: Vec<(RegistryKey, $type)>,)* + } + + pub trait TemporalRegistryAll + where + Self: Sized, + $(Self: TemporalRegistry<$type> + EvictorDb<$type>,)* + {} + + impl TemporalRegistryAll for T + where + T: Sized, + $(T: TemporalRegistry<$type> + EvictorDb<$type>,)* + {} + + + impl RegistrationsPerTable { + pub(crate) fn write_to_registry(&self, registry: &mut R) -> anyhow::Result<()> + where + R: TemporalRegistryAll + { + $( + for (key, value) in self.$ident.iter() { + registry.write_registry(key, value)?; + } + )* + + Ok(()) + } + } + }}; +} + +tables!( + address: Address, + asset_id: AssetId, + contract_id: ContractId, + script_code: ScriptCode, + predicate_code: PredicateCode +); + +// TODO: move inside the macro when this stabilizes: https://github.com/rust-lang/rust/pull/122808 +#[cfg(any(test, feature = "test-helpers"))] +impl rand::prelude::Distribution for rand::distributions::Standard { + fn sample(&self, rng: &mut R) -> RegistryKeyspace { + match rng.gen_range(0..5) { + 0 => RegistryKeyspace::Address, + 1 => RegistryKeyspace::AssetId, + 2 => RegistryKeyspace::ContractId, + 3 => RegistryKeyspace::ScriptCode, + _ => RegistryKeyspace::PredicateCode, + } + } +} diff --git a/crates/compression/src/tables.rs b/crates/compression/src/tables.rs deleted file mode 100644 index 4e033bb67ec..00000000000 --- a/crates/compression/src/tables.rs +++ /dev/null @@ -1,299 +0,0 @@ -use crate::{ - compress::CompressDb, - decompress::{ - DecompressCtx, - DecompressDb, - }, - eviction_policy::CacheEvictor, - ports::{ - EvictorDb, - TemporalRegistry, - }, -}; -use fuel_core_types::{ - fuel_compression::{ - CompressibleBy, - ContextError, - DecompressibleBy, - RegistryKey, - }, - fuel_tx::{ - input::PredicateCode, - Address, - AssetId, - CompressedUtxoId, - ContractId, - ScriptCode, - TxPointer, - UtxoId, - }, -}; -use std::collections::{ - HashMap, - HashSet, -}; - -/// Preparation pass through the block to collect all keys accessed during compression. -/// Returns dummy values. The resulting "compressed block" should be discarded. -pub struct PrepareCtx { - /// Database handle - db: D, - /// Keys accessed during the compression. - accessed_keys: PerRegistryKeyspace>, -} - -impl PrepareCtx { - /// Create a new PrepareCtx around the given database. - pub fn new(db: D) -> Self { - Self { - db, - accessed_keys: PerRegistryKeyspace::default(), - } - } - - /// Converts the preparation context into a [`CompressCtx`] - /// keeping accessed keys to avoid its eviction during compression. - pub fn into_compression_context(self) -> CompressCtx { - CompressCtx { - db: self.db, - per_keyspace: self.accessed_keys.into(), - } - } -} - -impl ContextError for PrepareCtx { - type Error = anyhow::Error; -} - -impl CompressibleBy> for UtxoId -where - D: CompressDb, -{ - async fn compress_with( - &self, - _ctx: &mut PrepareCtx, - ) -> anyhow::Result { - Ok(CompressedUtxoId { - tx_pointer: TxPointer::default(), - output_index: 0, - }) - } -} - -#[derive(Debug)] -struct CompressCtxKeyspace { - /// Cache evictor state for this keyspace - cache_evictor: CacheEvictor, - /// Changes to the temporary registry, to be included in the compressed block header - changes: HashMap, -} - -pub struct CompressCtx { - db: D, - per_keyspace: CompressCtxKeyspaces, -} - -impl CompressCtx { - /// Converts the compression context into a [`RegistrationsPerTable`] - pub fn into_registrations(self) -> RegistrationsPerTable { - self.per_keyspace.into() - } -} - -impl ContextError for CompressCtx { - type Error = anyhow::Error; -} - -impl CompressibleBy> for UtxoId -where - D: CompressDb, -{ - async fn compress_with( - &self, - ctx: &mut CompressCtx, - ) -> anyhow::Result { - ctx.db.lookup(*self) - } -} - -macro_rules! tables { - ($($ident:ty: $type:ty),*) => { paste::paste! { - #[doc = "RegistryKey namespaces"] - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] - pub enum RegistryKeyspace { - $( - [<$type>], - )* - } - - #[doc = "A value for each keyspace"] - #[derive(Debug, Clone, Default)] - pub struct PerRegistryKeyspace { - $(pub $ident: T,)* - } - impl core::ops::Index for PerRegistryKeyspace { - type Output = T; - - fn index(&self, index: RegistryKeyspace) -> &Self::Output { - match index { - $( - RegistryKeyspace::[<$type>] => &self.$ident, - )* - } - } - } - impl core::ops::IndexMut for PerRegistryKeyspace { - fn index_mut(&mut self, index: RegistryKeyspace) -> &mut Self::Output { - match index { - $( - RegistryKeyspace::[<$type>] => &mut self.$ident, - )* - } - } - } - - // If Rust had HKTs, we wouldn't have to do this - #[derive(Debug)] - struct CompressCtxKeyspaces { - $(pub $ident: CompressCtxKeyspace<$type>,)* - } - - impl From>> for CompressCtxKeyspaces { - fn from(value: PerRegistryKeyspace>) -> Self { - Self { - $( - $ident: CompressCtxKeyspace { - changes: Default::default(), - cache_evictor: CacheEvictor::new(value.$ident), - }, - )* - } - } - } - - #[doc = "The set of registrations for each table, as used in the compressed block header"] - #[derive(Debug, Clone, PartialEq, Default, serde::Serialize, serde::Deserialize)] - pub struct RegistrationsPerTable { - $(pub $ident: Vec<(RegistryKey, $type)>,)* - } - - impl From for RegistrationsPerTable { - fn from(value: CompressCtxKeyspaces) -> Self { - let mut result = Self::default(); - $( - for (key, value) in value.$ident.changes.into_iter() { - result.$ident.push((key, value)); - } - )* - result - } - } - - pub trait TemporalRegistryAll - where - Self: Sized, - $(Self: TemporalRegistry<$type> + EvictorDb<$type>,)* - {} - - impl TemporalRegistryAll for T - where - T: Sized, - $(T: TemporalRegistry<$type> + EvictorDb<$type>,)* - {} - - - impl RegistrationsPerTable { - pub(crate) fn write_to_registry(&self, registry: &mut R) -> anyhow::Result<()> - where - R: TemporalRegistryAll - { - $( - for (key, value) in self.$ident.iter() { - registry.write_registry(key, value)?; - } - )* - - Ok(()) - } - } - - $( - impl CompressibleBy> for $type - where - D: CompressDb - { - async fn compress_with( - &self, - ctx: &mut PrepareCtx, - ) -> anyhow::Result { - if *self == <$type>::default() { - return Ok(RegistryKey::ZERO); - } - if let Some(found) = ctx.db.registry_index_lookup(self)? { - ctx.accessed_keys.$ident.insert(found); - } - Ok(RegistryKey::ZERO) - } - } - - impl CompressibleBy> for $type - where - D: CompressDb - { - async fn compress_with( - &self, - ctx: &mut CompressCtx, - ) -> anyhow::Result { - if self == &Default::default() { - return Ok(RegistryKey::DEFAULT_VALUE); - } - if let Some(found) = ctx.db.registry_index_lookup(self)? { - return Ok(found); - } - - let key = ctx.per_keyspace.$ident.cache_evictor.next_key(&mut ctx.db)?; - let old = ctx.per_keyspace.$ident.changes.insert(key, self.clone()); - assert!(old.is_none(), "Key collision in registry substitution"); - Ok(key) - } - } - - impl DecompressibleBy> for $type - where - D: DecompressDb - { - async fn decompress_with( - key: RegistryKey, - ctx: &DecompressCtx, - ) -> anyhow::Result { - if key == RegistryKey::DEFAULT_VALUE { - return Ok(<$type>::default()); - } - ctx.db.read_registry(&key) - } - } - )* - }}; -} - -tables!( - address: Address, - asset_id: AssetId, - contract_id: ContractId, - script_code: ScriptCode, - predicate_code: PredicateCode -); - -// TODO: move inside the macro when this stabilizes: https://github.com/rust-lang/rust/pull/122808 -#[cfg(any(test, feature = "test-helpers"))] -impl rand::prelude::Distribution for rand::distributions::Standard { - fn sample(&self, rng: &mut R) -> RegistryKeyspace { - match rng.gen_range(0..5) { - 0 => RegistryKeyspace::Address, - 1 => RegistryKeyspace::AssetId, - 2 => RegistryKeyspace::ContractId, - 3 => RegistryKeyspace::ScriptCode, - _ => RegistryKeyspace::PredicateCode, - } - } -} From eda9e77797c76d4abe0b451958be421c897a44e1 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 26 Sep 2024 18:10:16 +0300 Subject: [PATCH 084/112] Reduce DB access in evictor by caching the latest key --- Cargo.lock | 1 + crates/compression/src/compress.rs | 34 ++++++++++--------- crates/compression/src/eviction_policy.rs | 40 +++++++++++++++-------- 3 files changed, 47 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cc80d82b354..2900472fba7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3362,6 +3362,7 @@ name = "fuel-core-compression" version = "0.36.0" dependencies = [ "anyhow", + "fuel-core-compression", "fuel-core-types 0.36.0", "paste", "postcard", diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index f9f1ee6fdda..64fac395580 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -48,9 +48,9 @@ where let mut prepare_ctx = PrepareCtx::new(&mut db); let _ = target.compress_with(&mut prepare_ctx).await?; - let mut ctx = prepare_ctx.into_compression_context(); + let mut ctx = prepare_ctx.into_compression_context()?; let transactions = target.compress_with(&mut ctx).await?; - let registrations: RegistrationsPerTable = ctx.into(); + let registrations: RegistrationsPerTable = ctx.finalize()?; // Apply changes to the db registrations.write_to_registry(&mut db)?; @@ -122,31 +122,35 @@ macro_rules! compression { $($ident: CompressCtxKeyspace<$type>,)* } - impl PrepareCtx { + impl PrepareCtx where D: CompressDb { /// Converts the preparation context into a [`CompressCtx`] /// keeping accessed keys to avoid its eviction during compression. - pub fn into_compression_context(self) -> CompressCtx { - CompressCtx { - db: self.db, + /// Initializes the cache evictors from the database, which may fail. + pub fn into_compression_context(mut self) -> anyhow::Result> { + Ok(CompressCtx { $( $ident: CompressCtxKeyspace { changes: Default::default(), - cache_evictor: CacheEvictor::new(self.accessed_keys.$ident.into()), + cache_evictor: CacheEvictor::new_from_db(&mut self.db, self.accessed_keys.$ident.into())?, }, )* - } + db: self.db, + }) } } - impl From> for RegistrationsPerTable { - fn from(value: CompressCtx) -> Self { - let mut result = Self::default(); + impl CompressCtx where D: CompressDb { + /// Finalizes the compression context, returning the changes to the registry. + /// Commits the cache evictor states to the database. + fn finalize(mut self) -> anyhow::Result { + let mut registrations = RegistrationsPerTable::default(); $( - for (key, value) in value.$ident.changes.into_iter() { - result.$ident.push((key, value)); + self.$ident.cache_evictor.commit(&mut self.db)?; + for (key, value) in self.$ident.changes.into_iter() { + registrations.$ident.push((key, value)); } )* - result + Ok(registrations) } } @@ -184,7 +188,7 @@ macro_rules! compression { return Ok(found); } - let key = ctx.$ident.cache_evictor.next_key(&mut ctx.db)?; + let key = ctx.$ident.cache_evictor.next_key(); let old = ctx.$ident.changes.insert(key, self.clone()); assert!(old.is_none(), "Key collision in registry substitution"); Ok(key) diff --git a/crates/compression/src/eviction_policy.rs b/crates/compression/src/eviction_policy.rs index 06b48213e6b..5639475d264 100644 --- a/crates/compression/src/eviction_policy.rs +++ b/crates/compression/src/eviction_policy.rs @@ -6,37 +6,51 @@ use crate::ports::EvictorDb; /// Evictor for a single keyspace #[derive(Debug)] +#[must_use = "Evictor must be committed to the database to persist state"] pub(crate) struct CacheEvictor { /// Set of keys that must not be evicted keep_keys: HashSet, + /// Next key to be used + next_key: RegistryKey, + /// Marker for the keyspace type _keyspace_marker: std::marker::PhantomData, } impl CacheEvictor { - pub fn new(keep_keys: HashSet) -> Self { - Self { + /// Create new evictor, reading state from the database + pub fn new_from_db( + db: &mut D, + keep_keys: HashSet, + ) -> anyhow::Result + where + D: EvictorDb, + { + Ok(Self { keep_keys, + next_key: db.read_latest()?, _keyspace_marker: std::marker::PhantomData, - } + }) } - pub fn next_key(&mut self, db: &mut D) -> anyhow::Result - where - D: EvictorDb, - { + pub fn next_key(&mut self) -> RegistryKey { // Pick first key not in the set // TODO: use a proper algo, maybe LRU? - let mut key = db.read_latest()?; debug_assert!(self.keep_keys.len() < 2usize.pow(24).saturating_sub(2)); - while self.keep_keys.contains(&key) { - key = key.next(); + while self.keep_keys.contains(&self.next_key) { + self.next_key = self.next_key.next(); } - db.write_latest(key)?; + self.keep_keys.insert(self.next_key); + self.next_key + } - self.keep_keys.insert(key); - Ok(key) + /// Commit the current state of the evictor to the database + pub fn commit(self, db: &mut D) -> anyhow::Result<()> + where + D: EvictorDb, + { + db.write_latest(self.next_key) } } From 331531d18723b8372b07969900c3b683f0e1d9bd Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 26 Sep 2024 18:13:18 +0300 Subject: [PATCH 085/112] Convert panic into an error --- crates/fuel-core/src/graphql_api/da_compression.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index 20369eea78b..4a281d0356a 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -185,6 +185,6 @@ where _ => {} } } - panic!("UtxoId not found in the block events"); + anyhow::bail!("UtxoId not found in the block events"); } } From ee3e844626238e54e200ddc88965e232168c90d8 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 27 Sep 2024 06:19:21 +0300 Subject: [PATCH 086/112] Replace the old index values correctly --- .../src/graphql_api/da_compression.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index 4a281d0356a..4832ee8ddce 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -87,21 +87,23 @@ macro_rules! impl_temporal_registry { value: &$type, ) -> anyhow::Result<()> { // Write the actual value - self.db_tx + let old_value = self.db_tx .storage_as_mut::<[< DaCompressionTemporalRegistry $type >]>() - .insert(key, value)?; - - let value_in_index: [u8; 32] = ($index_value_fn)(value); + .replace(key, value)?; // Remove the overwritten value from index, if any - self.db_tx - .storage_as_mut::<[< DaCompressionTemporalRegistryIndex $type >]>() - .remove(&value_in_index)?; + if let Some(old_value) = old_value { + let old_value_in_index: [u8; 32] = ($index_value_fn)(&old_value); + self.db_tx + .storage_as_mut::<[< DaCompressionTemporalRegistryIndex $type >]>() + .remove(&old_value_in_index)?; + } // Add the new value to the index + let new_value_in_index: [u8; 32] = ($index_value_fn)(value); self.db_tx .storage_as_mut::<[< DaCompressionTemporalRegistryIndex $type >]>() - .insert(&value_in_index, key)?; + .insert(&new_value_in_index, key)?; Ok(()) } From b5edb0d51bbc5669ecc6660d73e2a1d05703e4b8 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 27 Sep 2024 06:22:57 +0300 Subject: [PATCH 087/112] Rename EvictorDb methods --- crates/compression/src/decompress.rs | 7 +++++-- crates/compression/src/eviction_policy.rs | 4 ++-- crates/compression/src/ports.rs | 13 ++++++------- crates/fuel-core/src/graphql_api/da_compression.rs | 4 ++-- 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index db6ad7267f6..5d50eb7c6fe 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -274,11 +274,14 @@ mod tests { } impl EvictorDb<$type> for MockDb { - fn write_latest(&mut self, _key: RegistryKey) -> anyhow::Result<()> { + fn set_latest_assigned_key( + &mut self, + _key: RegistryKey, + ) -> anyhow::Result<()> { todo!() } - fn read_latest(&self) -> anyhow::Result { + fn get_latest_assigned_key(&self) -> anyhow::Result { todo!() } } diff --git a/crates/compression/src/eviction_policy.rs b/crates/compression/src/eviction_policy.rs index 5639475d264..3ef2ac0d60c 100644 --- a/crates/compression/src/eviction_policy.rs +++ b/crates/compression/src/eviction_policy.rs @@ -27,7 +27,7 @@ impl CacheEvictor { { Ok(Self { keep_keys, - next_key: db.read_latest()?, + next_key: db.get_latest_assigned_key()?, _keyspace_marker: std::marker::PhantomData, }) } @@ -51,6 +51,6 @@ impl CacheEvictor { where D: EvictorDb, { - db.write_latest(self.next_key) + db.set_latest_assigned_key(self.next_key) } } diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index 043a84b41aa..dd161235eef 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -84,20 +84,19 @@ pub struct MessageInfo { /// Evictor registry to keep track of the latest used key for the type `T`. pub trait EvictorDb { - fn read_latest(&self) -> anyhow::Result; - - fn write_latest(&mut self, key: RegistryKey) -> anyhow::Result<()>; + fn get_latest_assigned_key(&self) -> anyhow::Result; + fn set_latest_assigned_key(&mut self, key: RegistryKey) -> anyhow::Result<()>; } impl EvictorDb for &mut D where D: EvictorDb, { - fn read_latest(&self) -> anyhow::Result { - >::read_latest(self) + fn get_latest_assigned_key(&self) -> anyhow::Result { + >::get_latest_assigned_key(self) } - fn write_latest(&mut self, key: RegistryKey) -> anyhow::Result<()> { - >::write_latest(self, key) + fn set_latest_assigned_key(&mut self, key: RegistryKey) -> anyhow::Result<()> { + >::set_latest_assigned_key(self, key) } } diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index 4832ee8ddce..debcc88bbb3 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -126,7 +126,7 @@ macro_rules! impl_temporal_registry { where Tx: OffChainDatabaseTransaction, { - fn write_latest( + fn set_latest_assigned_key( &mut self, key: fuel_core_types::fuel_compression::RegistryKey, ) -> anyhow::Result<()> { @@ -136,7 +136,7 @@ macro_rules! impl_temporal_registry { Ok(()) } - fn read_latest( + fn get_latest_assigned_key( &self, ) -> anyhow::Result { Ok(self From 9b4cbb0d341e5d536dbfacf621310ed5138532a6 Mon Sep 17 00:00:00 2001 From: Green Baneling Date: Fri, 27 Sep 2024 05:56:21 +0200 Subject: [PATCH 088/112] Minimized the number of the tables for metadata and reverse index (#2259) - Minimized the number of tables for metadata and reverse index since we don't need control over this information. - Use explicitly the `VersionedCompressedBlock` type as the input type for compression and decompression. It is up to the `fuel-core` decide how to represent the final compressed block(postcard or something else). - Added initialization of the `next_key`. - Use a more performant codec for each table of the type instead of a Postcard codec. --------- Co-authored-by: Hannes Karppila <2204863+Dentosal@users.noreply.github.com> --- Cargo.lock | 3 +- crates/compression/Cargo.toml | 3 +- crates/compression/src/compress.rs | 12 +- crates/compression/src/decompress.rs | 25 ++-- crates/compression/src/eviction_policy.rs | 9 +- crates/compression/src/lib.rs | 27 ++-- crates/compression/src/ports.rs | 4 +- crates/compression/src/registry.rs | 12 +- crates/fuel-core/Cargo.toml | 1 - .../src/graphql_api/da_compression.rs | 65 ++++---- crates/fuel-core/src/graphql_api/ports.rs | 28 +--- crates/fuel-core/src/graphql_api/storage.rs | 36 ++--- .../src/graphql_api/storage/da_compression.rs | 141 ++++++++++-------- .../storage/da_compression/metadata_key.rs | 34 +++++ .../da_compression/predicate_code_codec.rs | 28 ++++ .../storage/da_compression/reverse_key.rs | 82 ++++++++++ .../da_compression/script_code_codec.rs | 28 ++++ .../service/adapters/graphql_api/off_chain.rs | 30 ++-- crates/storage/src/lib.rs | 8 +- crates/types/src/blockchain/header.rs | 4 +- 20 files changed, 375 insertions(+), 205 deletions(-) create mode 100644 crates/fuel-core/src/graphql_api/storage/da_compression/metadata_key.rs create mode 100644 crates/fuel-core/src/graphql_api/storage/da_compression/predicate_code_codec.rs create mode 100644 crates/fuel-core/src/graphql_api/storage/da_compression/reverse_key.rs create mode 100644 crates/fuel-core/src/graphql_api/storage/da_compression/script_code_codec.rs diff --git a/Cargo.lock b/Cargo.lock index 2900472fba7..14bed586801 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3206,7 +3206,6 @@ dependencies = [ "rocksdb", "serde", "serde_json", - "sha2 0.10.8", "strum 0.25.0", "strum_macros 0.25.3", "tempfile", @@ -3369,6 +3368,8 @@ dependencies = [ "proptest", "rand", "serde", + "strum 0.25.0", + "strum_macros 0.25.3", "tokio", ] diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index cb771dc7afa..79390c351c4 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -24,9 +24,10 @@ fuel-core-types = { workspace = true, features = [ "da-compression", ] } paste = "1" -postcard = "1.0" rand = { workspace = true, optional = true } serde = { version = "1.0", features = ["derive"] } +strum = { workspace = true } +strum_macros = { workspace = true } [dev-dependencies] fuel-core-compression = { path = ".", features = ["test-helpers"] } diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 64fac395580..d375e1dc57d 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -6,8 +6,8 @@ use crate::{ RegistrationsPerTable, TemporalRegistryAll, }, - CompressedBlock, CompressedBlockPayloadV0, + VersionedCompressedBlock, }; use fuel_core_types::{ blockchain::block::Block, @@ -39,7 +39,10 @@ pub trait CompressDb: TemporalRegistryAll + UtxoIdToPointer {} impl CompressDb for T where T: TemporalRegistryAll + UtxoIdToPointer {} /// This must be called for all new blocks in sequence, otherwise the result will be garbage. -pub async fn compress(mut db: D, block: &Block) -> anyhow::Result> +pub async fn compress( + mut db: D, + block: &Block, +) -> anyhow::Result where D: CompressDb, { @@ -63,10 +66,7 @@ where transactions, }; - let compressed = postcard::to_allocvec(&CompressedBlock::V0(compact)) - .expect("Serialization cannot fail"); - - Ok(compressed) + Ok(VersionedCompressedBlock::V0(compact)) } /// Preparation pass through the block to collect all keys accessed during compression. diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index 5d50eb7c6fe..47eca16f117 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -4,7 +4,7 @@ use crate::{ TemporalRegistry, }, registry::TemporalRegistryAll, - CompressedBlock, + VersionedCompressedBlock, }; use fuel_core_types::{ blockchain::block::PartialFuelBlock, @@ -45,12 +45,14 @@ pub trait DecompressDb: TemporalRegistryAll + HistoryLookup {} impl DecompressDb for T where T: TemporalRegistryAll + HistoryLookup {} /// This must be called for all decompressed blocks in sequence, otherwise the result will be garbage. -pub async fn decompress(mut db: D, block: Vec) -> anyhow::Result +pub async fn decompress( + mut db: D, + block: VersionedCompressedBlock, +) -> anyhow::Result where D: DecompressDb, { - let compressed: CompressedBlock = postcard::from_bytes(&block)?; - let CompressedBlock::V0(compressed) = compressed; + let VersionedCompressedBlock::V0(compressed) = block; // TODO: merkle root verification: https://github.com/FuelLabs/fuel-core/issues/2232 @@ -281,7 +283,7 @@ mod tests { todo!() } - fn get_latest_assigned_key(&self) -> anyhow::Result { + fn get_latest_assigned_key(&self) -> anyhow::Result> { todo!() } } @@ -303,12 +305,17 @@ mod tests { Unknown, } - let block = + // Given + let bad_block = postcard::to_stdvec(&CompressedBlockWithNewVersions::NewVersion(1234)) .unwrap(); - decompress(MockDb, block) - .await - .expect_err("Decompression should fail gracefully"); + // When + let result: Result = + postcard::from_bytes(&bad_block); + + // Then + let _ = + result.expect_err("should fail to deserialize because of unknown version"); } } diff --git a/crates/compression/src/eviction_policy.rs b/crates/compression/src/eviction_policy.rs index 3ef2ac0d60c..6343de83d8c 100644 --- a/crates/compression/src/eviction_policy.rs +++ b/crates/compression/src/eviction_policy.rs @@ -25,9 +25,16 @@ impl CacheEvictor { where D: EvictorDb, { + let latest_key = db.get_latest_assigned_key()?; + let next_key = if let Some(latest_key) = latest_key { + latest_key.next() + } else { + RegistryKey::ZERO + }; + Ok(Self { keep_keys, - next_key: db.get_latest_assigned_key()?, + next_key, _keyspace_marker: std::marker::PhantomData, }) } diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 35e9b183e06..42c13882899 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -11,11 +11,6 @@ mod registry; pub use registry::RegistryKeyspace; -use serde::{ - Deserialize, - Serialize, -}; - use fuel_core_types::{ blockchain::header::PartialBlockHeader, fuel_tx::CompressedTransaction, @@ -24,24 +19,30 @@ use fuel_core_types::{ use registry::RegistrationsPerTable; /// Compressed block, without the preceding version byte. -#[derive(Clone, Serialize, Deserialize)] -struct CompressedBlockPayloadV0 { +#[derive(Debug, Default, Clone, PartialEq, serde::Serialize, serde::Deserialize)] +pub struct CompressedBlockPayloadV0 { /// Temporal registry insertions - registrations: RegistrationsPerTable, + pub registrations: RegistrationsPerTable, /// Merkle root of the temporal registry state - registrations_root: Bytes32, + pub registrations_root: Bytes32, /// Compressed block header - header: PartialBlockHeader, + pub header: PartialBlockHeader, /// Compressed transactions - transactions: Vec, + pub transactions: Vec, } /// Versioned compressed block. -#[derive(Clone, Serialize, Deserialize)] -enum CompressedBlock { +#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)] +pub enum VersionedCompressedBlock { V0(CompressedBlockPayloadV0), } +impl Default for VersionedCompressedBlock { + fn default() -> Self { + Self::V0(Default::default()) + } +} + #[cfg(test)] mod tests { use fuel_core_compression as _; diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index dd161235eef..6490ed6af34 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -84,7 +84,7 @@ pub struct MessageInfo { /// Evictor registry to keep track of the latest used key for the type `T`. pub trait EvictorDb { - fn get_latest_assigned_key(&self) -> anyhow::Result; + fn get_latest_assigned_key(&self) -> anyhow::Result>; fn set_latest_assigned_key(&mut self, key: RegistryKey) -> anyhow::Result<()>; } @@ -92,7 +92,7 @@ impl EvictorDb for &mut D where D: EvictorDb, { - fn get_latest_assigned_key(&self) -> anyhow::Result { + fn get_latest_assigned_key(&self) -> anyhow::Result> { >::get_latest_assigned_key(self) } diff --git a/crates/compression/src/registry.rs b/crates/compression/src/registry.rs index 819002be616..a00dc8bc998 100644 --- a/crates/compression/src/registry.rs +++ b/crates/compression/src/registry.rs @@ -16,7 +16,7 @@ use fuel_core_types::{ macro_rules! tables { ($($ident:ty: $type:ty),*) => { paste::paste! { #[doc = "RegistryKey namespaces"] - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize, strum_macros::EnumCount)] pub enum RegistryKeyspace { $( [<$type>], @@ -24,7 +24,7 @@ macro_rules! tables { } #[doc = "A value for each keyspace"] - #[derive(Debug, Clone, Default)] + #[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct PerRegistryKeyspace { $(pub $ident: T,)* } @@ -50,7 +50,7 @@ macro_rules! tables { } #[doc = "The set of registrations for each table, as used in the compressed block header"] - #[derive(Debug, Clone, PartialEq, Default, serde::Serialize, serde::Deserialize)] + #[derive(Debug, Clone, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] pub struct RegistrationsPerTable { $(pub $ident: Vec<(RegistryKey, $type)>,)* } @@ -97,12 +97,14 @@ tables!( #[cfg(any(test, feature = "test-helpers"))] impl rand::prelude::Distribution for rand::distributions::Standard { fn sample(&self, rng: &mut R) -> RegistryKeyspace { - match rng.gen_range(0..5) { + use strum::EnumCount; + match rng.gen_range(0..RegistryKeyspace::COUNT) { 0 => RegistryKeyspace::Address, 1 => RegistryKeyspace::AssetId, 2 => RegistryKeyspace::ContractId, 3 => RegistryKeyspace::ScriptCode, - _ => RegistryKeyspace::PredicateCode, + 4 => RegistryKeyspace::PredicateCode, + _ => unreachable!("New keyspace is added but not supported here"), } } } diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index ac1626659aa..b21dc94bb8e 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -54,7 +54,6 @@ rocksdb = { version = "0.21", default-features = false, features = [ ], optional = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true, features = ["raw_value"] } -sha2 = "0.10.8" strum = { workspace = true, features = ["derive"] } strum_macros = { workspace = true } tempfile = { workspace = true, optional = true } diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index debcc88bbb3..3f40ea2080d 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -1,6 +1,9 @@ -use crate::{ - fuel_core_graphql_api::ports::worker::OffChainDatabaseTransaction, - graphql_api::storage::da_compression::*, +use crate::fuel_core_graphql_api::{ + ports::worker::OffChainDatabaseTransaction, + storage::da_compression::{ + metadata_key::MetadataKey, + *, + }, }; use fuel_core_compression::{ compress::compress, @@ -27,10 +30,6 @@ use fuel_core_types::{ services::executor::Event, }; use futures::FutureExt; -use sha2::{ - Digest, - Sha256, -}; /// Performs DA compression for a block and stores it in the database. pub fn da_compress_block( @@ -64,7 +63,7 @@ struct CompressTx<'a, Tx> { } macro_rules! impl_temporal_registry { - ($type:ty, $index_value_fn:expr) => { paste::paste! { + ($type:ty) => { paste::paste! { impl<'a, Tx> TemporalRegistry<$type> for CompressTx<'a, Tx> where Tx: OffChainDatabaseTransaction, @@ -93,17 +92,17 @@ macro_rules! impl_temporal_registry { // Remove the overwritten value from index, if any if let Some(old_value) = old_value { - let old_value_in_index: [u8; 32] = ($index_value_fn)(&old_value); + let old_reverse_key = old_value.into(); self.db_tx - .storage_as_mut::<[< DaCompressionTemporalRegistryIndex $type >]>() - .remove(&old_value_in_index)?; + .storage_as_mut::() + .remove(&old_reverse_key)?; } // Add the new value to the index - let new_value_in_index: [u8; 32] = ($index_value_fn)(value); + let reverse_key = old_value.into(); self.db_tx - .storage_as_mut::<[< DaCompressionTemporalRegistryIndex $type >]>() - .insert(&new_value_in_index, key)?; + .storage_as_mut::() + .insert(&reverse_key, key)?; Ok(()) } @@ -113,11 +112,11 @@ macro_rules! impl_temporal_registry { value: &$type, ) -> anyhow::Result> { - let value_in_index: [u8; 32] = ($index_value_fn)(value); + let reverse_key = value.into(); Ok(self .db_tx - .storage_as_ref::<[< DaCompressionTemporalRegistryIndex $type >]>() - .get(&value_in_index)? + .storage_as_ref::() + .get(&reverse_key)? .map(|v| v.into_owned())) } } @@ -131,39 +130,31 @@ macro_rules! impl_temporal_registry { key: fuel_core_types::fuel_compression::RegistryKey, ) -> anyhow::Result<()> { self.db_tx - .storage_as_mut::<[< DaCompressionTemporalRegistryEvictor $type >]>() - .insert(&(), &key)?; + .storage_as_mut::() + .insert(&MetadataKey::$type, &key)?; Ok(()) } fn get_latest_assigned_key( &self, - ) -> anyhow::Result { + ) -> anyhow::Result> { Ok(self .db_tx - .storage_as_ref::<[< DaCompressionTemporalRegistryEvictor $type >]>() - .get(&())? - .ok_or(not_found!([< DaCompressionTemporalRegistryEvictor $type >]))? - .into_owned()) + .storage_as_ref::() + .get(&MetadataKey::$type)? + .map(|v| v.into_owned()) + ) } } }}; } -impl_temporal_registry!(Address, |v: &Address| **v); -impl_temporal_registry!(AssetId, |v: &AssetId| **v); -impl_temporal_registry!(ContractId, |v: &ContractId| **v); -impl_temporal_registry!(ScriptCode, |v: &ScriptCode| { - let mut hasher = Sha256::new(); - hasher.update(&v.bytes); - hasher.finalize().into() -}); -impl_temporal_registry!(PredicateCode, |v: &PredicateCode| { - let mut hasher = Sha256::new(); - hasher.update(&v.bytes); - hasher.finalize().into() -}); +impl_temporal_registry!(Address); +impl_temporal_registry!(AssetId); +impl_temporal_registry!(ContractId); +impl_temporal_registry!(ScriptCode); +impl_temporal_registry!(PredicateCode); impl<'a, Tx> UtxoIdToPointer for CompressTx<'a, Tx> where diff --git a/crates/fuel-core/src/graphql_api/ports.rs b/crates/fuel-core/src/graphql_api/ports.rs index 6b3da08384c..3edfddeea37 100644 --- a/crates/fuel-core/src/graphql_api/ports.rs +++ b/crates/fuel-core/src/graphql_api/ports.rs @@ -322,7 +322,8 @@ pub mod worker { fn transaction(&mut self) -> Self::Transaction<'_>; } - pub trait OffChainDatabaseTransaction: StorageMutate + pub trait OffChainDatabaseTransaction: + StorageMutate + StorageMutate + StorageMutate + StorageMutate @@ -333,29 +334,12 @@ pub mod worker { + StorageMutate + StorageMutate + StorageMutate - + StorageMutate - + StorageMutate + StorageMutate - + StorageMutate - + StorageMutate + StorageMutate - + StorageMutate - + StorageMutate< - DaCompressionTemporalRegistryEvictorContractId, - Error = StorageError, - > + StorageMutate - + StorageMutate - + StorageMutate< - DaCompressionTemporalRegistryEvictorScriptCode, - Error = StorageError, - > + StorageMutate - + StorageMutate< - DaCompressionTemporalRegistryIndexPredicateCode, - Error = StorageError, - > + StorageMutate< - DaCompressionTemporalRegistryEvictorPredicateCode, - Error = StorageError, - > + + StorageMutate + + StorageMutate + + StorageMutate + + StorageMutate { fn record_tx_id_owner( &mut self, diff --git a/crates/fuel-core/src/graphql_api/storage.rs b/crates/fuel-core/src/graphql_api/storage.rs index b1f8648949b..d06af3aef86 100644 --- a/crates/fuel-core/src/graphql_api/storage.rs +++ b/crates/fuel-core/src/graphql_api/storage.rs @@ -97,38 +97,20 @@ pub enum Column { /// DA compression and postcard serialized blocks. /// See [`DaCompressedBlocks`](da_compression::DaCompressedBlocks) DaCompressedBlocks = 14, - /// DA compression metadata. - DaCompressionMetadata = 15, + /// See [`DaCompressionTemporalRegistryIndex`](da_compression::DaCompressionTemporalRegistryIndex) + DaCompressionTemporalRegistryIndex = 15, + /// See [`DaCompressionTemporalRegistryMetadata`](da_compression::DaCompressionTemporalRegistryMetadata) + DaCompressionTemporalRegistryMetadata = 16, /// See [`DaCompressionTemporalRegistryAddress`](da_compression::DaCompressionTemporalRegistryAddress) - DaCompressionTemporalRegistryAddress = 16, - /// See [`DaCompressionTemporalRegistryIndexAddress`](da_compression::DaCompressionTemporalRegistryIndexAddress) - DaCompressionTemporalRegistryIndexAddress = 17, - /// See [`DaCompressionTemporalRegistryEvictorAddress`](da_compression::DaCompressionTemporalRegistryEvictorAddress) - DaCompressionTemporalRegistryEvictorAddress = 18, + DaCompressionTemporalRegistryAddress = 17, /// See [`DaCompressionTemporalRegistryAssetId`](da_compression::DaCompressionTemporalRegistryAssetId) - DaCompressionTemporalRegistryAssetId = 19, - /// See [`DaCompressionTemporalRegistryIndexAssetId`](da_compression::DaCompressionTemporalRegistryIndexAssetId) - DaCompressionTemporalRegistryIndexAssetId = 20, - /// See [`DaCompressionTemporalRegistryEvictorAssetId`](da_compression::DaCompressionTemporalRegistryEvictorAssetId) - DaCompressionTemporalRegistryEvictorAssetId = 21, + DaCompressionTemporalRegistryAssetId = 18, /// See [`DaCompressionTemporalRegistryContractId`](da_compression::DaCompressionTemporalRegistryContractId) - DaCompressionTemporalRegistryContractId = 22, - /// See [`DaCompressionTemporalRegistryIndexContractId`](da_compression::DaCompressionTemporalRegistryIndexContractId) - DaCompressionTemporalRegistryIndexContractId = 23, - /// See [`DaCompressionTemporalRegistryEvictorContractId`](da_compression::DaCompressionTemporalRegistryEvictorContractId) - DaCompressionTemporalRegistryEvictorContractId = 24, + DaCompressionTemporalRegistryContractId = 19, /// See [`DaCompressionTemporalRegistryScriptCode`](da_compression::DaCompressionTemporalRegistryScriptCode) - DaCompressionTemporalRegistryScriptCode = 25, - /// See [`DaCompressionTemporalRegistryIndexScriptCode`](da_compression::DaCompressionTemporalRegistryIndexScriptCode) - DaCompressionTemporalRegistryIndexScriptCode = 26, - /// See [`DaCompressionTemporalRegistryEvictorScriptCode`](da_compression::DaCompressionTemporalRegistryEvictorScriptCode) - DaCompressionTemporalRegistryEvictorScriptCode = 27, + DaCompressionTemporalRegistryScriptCode = 20, /// See [`DaCompressionTemporalRegistryPredicateCode`](da_compression::DaCompressionTemporalRegistryPredicateCode) - DaCompressionTemporalRegistryPredicateCode = 28, - /// See [`DaCompressionTemporalRegistryIndexPredicateCode`](da_compression::DaCompressionTemporalRegistryIndexPredicateCode) - DaCompressionTemporalRegistryIndexPredicateCode = 29, - /// See [`DaCompressionTemporalRegistryEvictorPredicateCode`](da_compression::DaCompressionTemporalRegistryEvictorPredicateCode) - DaCompressionTemporalRegistryEvictorPredicateCode = 30, + DaCompressionTemporalRegistryPredicateCode = 21, } impl Column { diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression.rs b/crates/fuel-core/src/graphql_api/storage/da_compression.rs index b9f18216db0..87831de2e77 100644 --- a/crates/fuel-core/src/graphql_api/storage/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/storage/da_compression.rs @@ -1,3 +1,10 @@ +use crate::fuel_core_graphql_api::storage::da_compression::{ + metadata_key::MetadataKey, + predicate_code_codec::PredicateCodeCodec, + reverse_key::ReverseKey, + script_code_codec::ScriptCodeCodec, +}; +use fuel_core_compression::VersionedCompressedBlock; use fuel_core_storage::{ blueprint::plain::Plain, codec::{ @@ -20,17 +27,23 @@ use fuel_core_types::{ fuel_types::BlockHeight, }; +pub mod metadata_key; +pub mod predicate_code_codec; +pub mod reverse_key; +pub mod script_code_codec; + +/// The table for the compressed blocks sent to DA. pub struct DaCompressedBlocks; impl Mappable for DaCompressedBlocks { type Key = Self::OwnedKey; type OwnedKey = BlockHeight; type Value = Self::OwnedValue; - type OwnedValue = Vec; + type OwnedValue = VersionedCompressedBlock; } impl TableWithBlueprint for DaCompressedBlocks { - type Blueprint = Plain, Raw>; + type Blueprint = Plain, Postcard>; type Column = super::Column; fn column() -> Self::Column { @@ -38,63 +51,67 @@ impl TableWithBlueprint for DaCompressedBlocks { } } -macro_rules! temporal_registry { - ($type:ty) => { - paste::paste! { - pub struct [< DaCompressionTemporalRegistry $type >]; +/// Mapping from the type to the register key in the temporal registry. +pub struct DaCompressionTemporalRegistryIndex; - impl Mappable for [< DaCompressionTemporalRegistry $type >] { - type Key = Self::OwnedKey; - type OwnedKey = RegistryKey; - type Value = Self::OwnedValue; - type OwnedValue = $type; - } +impl Mappable for DaCompressionTemporalRegistryIndex { + type Key = Self::OwnedKey; + type OwnedKey = ReverseKey; + type Value = Self::OwnedValue; + type OwnedValue = RegistryKey; +} - impl TableWithBlueprint for [< DaCompressionTemporalRegistry $type >] { - type Blueprint = Plain; - type Column = super::Column; +impl TableWithBlueprint for DaCompressionTemporalRegistryIndex { + // TODO: Use Raw codec for value instead of Postcard + type Blueprint = Plain; + type Column = super::Column; - fn column() -> Self::Column { - Self::Column::[< DaCompressionTemporalRegistry $type >] - } - } + fn column() -> Self::Column { + Self::Column::DaCompressionTemporalRegistryIndex + } +} - pub struct [< DaCompressionTemporalRegistryIndex $type >]; +/// This table is used to hold "next key to evict" for each keyspace. +/// In the future we'll likely switch to use LRU or something, in which +/// case this table can be repurposed. +pub struct DaCompressionTemporalRegistryMetadata; - impl Mappable for [< DaCompressionTemporalRegistryIndex $type >] { - type Key = Self::OwnedKey; - type OwnedKey = [u8; 32]; // if the value is larger than 32 bytes, it's hashed - type Value = Self::OwnedValue; - type OwnedValue = RegistryKey; - } +impl Mappable for DaCompressionTemporalRegistryMetadata { + type Key = Self::OwnedKey; + type OwnedKey = MetadataKey; + type Value = Self::OwnedValue; + type OwnedValue = RegistryKey; +} - impl TableWithBlueprint for [< DaCompressionTemporalRegistryIndex $type >] { - type Blueprint = Plain; - type Column = super::Column; +impl TableWithBlueprint for DaCompressionTemporalRegistryMetadata { + // TODO: Use Raw codec for value instead of Postcard + type Blueprint = Plain; + type Column = super::Column; - fn column() -> Self::Column { - Self::Column::[< DaCompressionTemporalRegistryIndex $type >] - } - } + fn column() -> Self::Column { + Self::Column::DaCompressionTemporalRegistryMetadata + } +} - /// This table is used to hold "next key to evict" for each keyspace. - /// In the future we'll likely switch to use LRU or something, in which - /// case this table can be repurposed. - pub struct [< DaCompressionTemporalRegistryEvictor $type >]; +macro_rules! temporal_registry { + ($type:ty, $code:ty) => { + paste::paste! { + pub struct [< DaCompressionTemporalRegistry $type >]; - impl Mappable for [< DaCompressionTemporalRegistryEvictor $type >] { + impl Mappable for [< DaCompressionTemporalRegistry $type >] { type Key = Self::OwnedKey; - type OwnedKey = (); + type OwnedKey = RegistryKey; type Value = Self::OwnedValue; - type OwnedValue = RegistryKey; + type OwnedValue = $type; } - impl TableWithBlueprint for [< DaCompressionTemporalRegistryEvictor $type >] { - type Blueprint = Plain; + impl TableWithBlueprint for [< DaCompressionTemporalRegistry $type >] { + // TODO: Use Raw codec for value instead of Postcard + type Blueprint = Plain; type Column = super::Column; fn column() -> Self::Column { - Self::Column::[< DaCompressionTemporalRegistryEvictor $type >] + Self::Column::[< DaCompressionTemporalRegistry $type >] } } @@ -107,34 +124,34 @@ macro_rules! temporal_registry { <[< DaCompressionTemporalRegistry $type >] as Mappable>::Value::default(), tests::generate_key ); - - #[cfg(test)] - fuel_core_storage::basic_storage_tests!( - [< DaCompressionTemporalRegistryIndex $type >], - [0u8; 32], - RegistryKey::ZERO - ); - - #[cfg(test)] - fuel_core_storage::basic_storage_tests!( - [< DaCompressionTemporalRegistryEvictor $type >], - (), - RegistryKey::ZERO - ); } }; } -temporal_registry!(Address); -temporal_registry!(AssetId); -temporal_registry!(ContractId); -temporal_registry!(ScriptCode); -temporal_registry!(PredicateCode); +temporal_registry!(Address, Raw); +temporal_registry!(AssetId, Raw); +temporal_registry!(ContractId, Raw); +temporal_registry!(ScriptCode, ScriptCodeCodec); +temporal_registry!(PredicateCode, PredicateCodeCodec); #[cfg(test)] mod tests { use super::*; + #[cfg(test)] + fuel_core_storage::basic_storage_tests!( + DaCompressionTemporalRegistryIndex, + ReverseKey::Address(Address::zeroed()), + RegistryKey::ZERO + ); + + #[cfg(test)] + fuel_core_storage::basic_storage_tests!( + DaCompressionTemporalRegistryMetadata, + MetadataKey::Address, + RegistryKey::ZERO + ); + fuel_core_storage::basic_storage_tests!( DaCompressedBlocks, ::Key::default(), diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression/metadata_key.rs b/crates/fuel-core/src/graphql_api/storage/da_compression/metadata_key.rs new file mode 100644 index 00000000000..2b356eb0eeb --- /dev/null +++ b/crates/fuel-core/src/graphql_api/storage/da_compression/metadata_key.rs @@ -0,0 +1,34 @@ +/// The metadata key used by `DaCompressionTemporalRegistryMetadata` table to +/// store progress of the evictor. +#[derive( + Debug, + Clone, + Copy, + PartialEq, + Eq, + serde::Serialize, + serde::Deserialize, + strum::EnumCount, +)] +pub enum MetadataKey { + Address, + AssetId, + ContractId, + ScriptCode, + PredicateCode, +} + +#[cfg(feature = "test-helpers")] +impl rand::distributions::Distribution for rand::distributions::Standard { + fn sample(&self, rng: &mut R) -> MetadataKey { + use strum::EnumCount; + match rng.next_u32() as usize % MetadataKey::COUNT { + 0 => MetadataKey::Address, + 1 => MetadataKey::AssetId, + 2 => MetadataKey::ContractId, + 3 => MetadataKey::ScriptCode, + 4 => MetadataKey::PredicateCode, + _ => unreachable!("New metadata key is added but not supported here"), + } + } +} diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression/predicate_code_codec.rs b/crates/fuel-core/src/graphql_api/storage/da_compression/predicate_code_codec.rs new file mode 100644 index 00000000000..6c165c09f3a --- /dev/null +++ b/crates/fuel-core/src/graphql_api/storage/da_compression/predicate_code_codec.rs @@ -0,0 +1,28 @@ +use fuel_core_storage::codec::{ + Decode, + Encode, +}; +use fuel_core_types::fuel_tx::input::PredicateCode; +use std::{ + borrow::Cow, + ops::Deref, +}; + +// TODO: Remove this codec when the `PredicateCode` implements +// `AsRef<[u8]>` and `TryFrom<[u8]>` and use `Raw` codec instead. + +pub struct PredicateCodeCodec; + +impl Encode for PredicateCodeCodec { + type Encoder<'a> = Cow<'a, [u8]>; + + fn encode(t: &PredicateCode) -> Self::Encoder<'_> { + Cow::Borrowed(t.deref()) + } +} + +impl Decode for PredicateCodeCodec { + fn decode(bytes: &[u8]) -> anyhow::Result { + Ok(bytes.to_vec().into()) + } +} diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression/reverse_key.rs b/crates/fuel-core/src/graphql_api/storage/da_compression/reverse_key.rs new file mode 100644 index 00000000000..3c24d3a817f --- /dev/null +++ b/crates/fuel-core/src/graphql_api/storage/da_compression/reverse_key.rs @@ -0,0 +1,82 @@ +use fuel_core_types::{ + fuel_tx::{ + input::PredicateCode, + ScriptCode, + }, + fuel_types::{ + Address, + AssetId, + Bytes32, + ContractId, + }, +}; +use std::ops::Deref; + +#[derive( + Debug, + Clone, + Copy, + PartialEq, + Eq, + serde::Serialize, + serde::Deserialize, + strum::EnumCount, +)] +/// The reverse key for the temporal registry index. +/// By this key we can find the registry key from the temporal registry. +pub enum ReverseKey { + Address(Address), + AssetId(AssetId), + ContractId(ContractId), + /// Hash of the script code. + ScriptCode(Bytes32), + /// Hash of the predicate code. + PredicateCode(Bytes32), +} + +impl From<&Address> for ReverseKey { + fn from(address: &Address) -> Self { + Self::Address(*address) + } +} + +impl From<&AssetId> for ReverseKey { + fn from(asset_id: &AssetId) -> Self { + Self::AssetId(*asset_id) + } +} + +impl From<&ContractId> for ReverseKey { + fn from(contract_id: &ContractId) -> Self { + Self::ContractId(*contract_id) + } +} + +impl From<&ScriptCode> for ReverseKey { + fn from(script_code: &ScriptCode) -> Self { + let hash = fuel_core_types::fuel_crypto::Hasher::hash(script_code.deref()); + ReverseKey::ScriptCode(hash) + } +} + +impl From<&PredicateCode> for ReverseKey { + fn from(predicate_code: &PredicateCode) -> Self { + let hash = fuel_core_types::fuel_crypto::Hasher::hash(predicate_code.deref()); + ReverseKey::PredicateCode(hash) + } +} + +#[cfg(feature = "test-helpers")] +impl rand::distributions::Distribution for rand::distributions::Standard { + fn sample(&self, rng: &mut R) -> ReverseKey { + use strum::EnumCount; + match rng.next_u32() as usize % ReverseKey::COUNT { + 0 => ReverseKey::Address(Address::default()), + 1 => ReverseKey::AssetId(AssetId::default()), + 2 => ReverseKey::ContractId(ContractId::default()), + 3 => ReverseKey::ScriptCode(Bytes32::default()), + 4 => ReverseKey::PredicateCode(Bytes32::default()), + _ => unreachable!("New reverse key is added but not supported here"), + } + } +} diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression/script_code_codec.rs b/crates/fuel-core/src/graphql_api/storage/da_compression/script_code_codec.rs new file mode 100644 index 00000000000..a4d6c8d1ac3 --- /dev/null +++ b/crates/fuel-core/src/graphql_api/storage/da_compression/script_code_codec.rs @@ -0,0 +1,28 @@ +use fuel_core_storage::codec::{ + Decode, + Encode, +}; +use fuel_core_types::fuel_tx::ScriptCode; +use std::{ + borrow::Cow, + ops::Deref, +}; + +// TODO: Remove this codec when the `ScriptCode` implements +// `AsRef<[u8]>` and `TryFrom<[u8]>` and use `Raw` codec instead. + +pub struct ScriptCodeCodec; + +impl Encode for ScriptCodeCodec { + type Encoder<'a> = Cow<'a, [u8]>; + + fn encode(t: &ScriptCode) -> Self::Encoder<'_> { + Cow::Borrowed(t.deref()) + } +} + +impl Decode for ScriptCodeCodec { + fn decode(bytes: &[u8]) -> anyhow::Result { + Ok(bytes.to_vec().into()) + } +} diff --git a/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs index 9006a2a0cd3..092c83da438 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs @@ -11,27 +11,29 @@ use crate::{ }, storage::{ contracts::ContractsInfo, + da_compression::DaCompressedBlocks, relayed_transactions::RelayedTransactionStatuses, transactions::OwnedTransactionIndexCursor, }, }, - graphql_api::storage::{ - da_compression::DaCompressedBlocks, - old::{ - OldFuelBlockConsensus, - OldFuelBlocks, - OldTransactions, - }, + graphql_api::storage::old::{ + OldFuelBlockConsensus, + OldFuelBlocks, + OldTransactions, }, }; use fuel_core_storage::{ + blueprint::BlueprintInspect, + codec::Encode, iter::{ BoxedIter, IntoBoxedIter, IterDirection, IteratorOverTable, }, + kv_store::KeyValueInspect, not_found, + structured_storage::TableWithBlueprint, transactional::{ IntoTransaction, StorageTransaction, @@ -73,10 +75,16 @@ impl OffChainDatabase for OffChainIterableKeyValueView { } fn da_compressed_block(&self, height: &BlockHeight) -> StorageResult> { - self.storage_as_ref::() - .get(height)? - .ok_or_else(|| not_found!("DaCompressedBlock")) - .map(std::borrow::Cow::into_owned) + let column = ::column(); + let encoder = + <::Blueprint as BlueprintInspect< + DaCompressedBlocks, + Self, + >>::KeyCodec::encode(height); + + self.get(encoder.as_ref(), column)? + .ok_or_else(|| not_found!(DaCompressedBlocks)) + .map(|value| value.as_ref().clone()) } fn tx_status(&self, tx_id: &TxId) -> StorageResult { diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index ee971992a06..8fd5a0a9b80 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -70,7 +70,7 @@ pub enum Error { #[display(fmt = "error occurred in the underlying datastore `{_0:?}`")] DatabaseError(Box), /// This error should be created with `not_found` macro. - #[display(fmt = "resource of type `{_0}` was not found at the: {_1}")] + #[display(fmt = "resource was not found in table `{_0}` at the: {_1}")] NotFound(&'static str, &'static str), // TODO: Do we need this type at all? /// Unknown or not expected(by architecture) error. @@ -194,7 +194,7 @@ macro_rules! not_found { }; ($ty: path) => { $crate::Error::NotFound( - ::core::any::type_name::<<$ty as $crate::Mappable>::OwnedValue>(), + ::core::any::type_name::<$ty>(), concat!(file!(), ":", line!()), ) }; @@ -209,12 +209,12 @@ mod test { #[rustfmt::skip] assert_eq!( format!("{}", not_found!("BlockId")), - format!("resource of type `BlockId` was not found at the: {}:{}", file!(), line!() - 1) + format!("resource was not found in table `BlockId` at the: {}:{}", file!(), line!() - 1) ); #[rustfmt::skip] assert_eq!( format!("{}", not_found!(Coins)), - format!("resource of type `fuel_core_types::entities::coins::coin::CompressedCoin` was not found at the: {}:{}", file!(), line!() - 1) + format!("resource was not found in table `fuel_core_storage::tables::Coins` at the: {}:{}", file!(), line!() - 1) ); } } diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 4575c9631b2..e487502c2bc 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -150,7 +150,7 @@ impl BlockHeader { #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(any(test, feature = "test-helpers"), derive(Default))] +#[derive(Default)] /// A partially complete fuel block header that does not /// have any generated fields because it has not been executed yet. pub struct PartialBlockHeader { @@ -188,7 +188,6 @@ pub struct ApplicationHeader { pub generated: Generated, } -#[cfg(any(test, feature = "test-helpers"))] impl Default for ApplicationHeader where Generated: Default, @@ -543,7 +542,6 @@ impl ConsensusHeader { } } -#[cfg(any(test, feature = "test-helpers"))] impl Default for ConsensusHeader where T: Default, From 40a1a3dc2e4408168d7ec30a4ed4aab5b914ee7b Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 27 Sep 2024 07:09:18 +0300 Subject: [PATCH 089/112] Check if the change is already in the current compression batch --- crates/compression/src/compress.rs | 8 ++++++++ crates/fuel-core/src/graphql_api/da_compression.rs | 4 ++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index d375e1dc57d..6c36ba59f66 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -113,6 +113,8 @@ struct CompressCtxKeyspace { cache_evictor: CacheEvictor, /// Changes to the temporary registry, to be included in the compressed block header changes: HashMap, + /// Reverse lookup into changes + changes_lookup: HashMap, } macro_rules! compression { @@ -131,6 +133,7 @@ macro_rules! compression { $( $ident: CompressCtxKeyspace { changes: Default::default(), + changes_lookup: Default::default(), cache_evictor: CacheEvictor::new_from_db(&mut self.db, self.accessed_keys.$ident.into())?, }, )* @@ -187,10 +190,15 @@ macro_rules! compression { if let Some(found) = ctx.db.registry_index_lookup(self)? { return Ok(found); } + if let Some(found) = ctx.$ident.changes_lookup.get(self) { + return Ok(*found); + } let key = ctx.$ident.cache_evictor.next_key(); let old = ctx.$ident.changes.insert(key, self.clone()); + let old_rev = ctx.$ident.changes_lookup.insert(self.clone(), key); assert!(old.is_none(), "Key collision in registry substitution"); + assert!(old_rev.is_none(), "Key collision in registry substitution"); Ok(key) } } diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index 3f40ea2080d..d249d7b522d 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -92,14 +92,14 @@ macro_rules! impl_temporal_registry { // Remove the overwritten value from index, if any if let Some(old_value) = old_value { - let old_reverse_key = old_value.into(); + let old_reverse_key = (&old_value).into(); self.db_tx .storage_as_mut::() .remove(&old_reverse_key)?; } // Add the new value to the index - let reverse_key = old_value.into(); + let reverse_key = value.into(); self.db_tx .storage_as_mut::() .insert(&reverse_key, key)?; From 1e70c771aab437eb3a7fbd69f7381c5cb0f0a6f7 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Fri, 27 Sep 2024 07:17:23 +0300 Subject: [PATCH 090/112] Make paste a workspace dependency --- Cargo.toml | 1 + crates/storage/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 73f866e5e51..70f80853433 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -131,6 +131,7 @@ test-strategy = "0.3" parquet = { version = "49.0", default-features = false } rayon = "1.10.0" bytes = "1.5.0" +paste = "1.0" pretty_assertions = "1.4.0" proptest = "1.1" pin-project-lite = "0.2" diff --git a/crates/storage/Cargo.toml b/crates/storage/Cargo.toml index fbb71e24327..8750f3ae8b0 100644 --- a/crates/storage/Cargo.toml +++ b/crates/storage/Cargo.toml @@ -28,7 +28,7 @@ impl-tools = "0.10" itertools = { workspace = true, features = ["use_alloc"] } mockall = { workspace = true, optional = true } num_enum = { workspace = true } -paste = "1" +paste = { workspace = true } postcard = { workspace = true, features = ["alloc"] } primitive-types = { workspace = true, default-features = false } rand = { workspace = true, optional = true } From 4ac18b396338b1d89419e0c76908e78c8ff72118 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 30 Sep 2024 08:20:49 +0300 Subject: [PATCH 091/112] Use same workspace dep "paste" for all crates --- crates/compression/Cargo.toml | 2 +- crates/fuel-core/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/compression/Cargo.toml b/crates/compression/Cargo.toml index 79390c351c4..40e092d1e00 100644 --- a/crates/compression/Cargo.toml +++ b/crates/compression/Cargo.toml @@ -23,7 +23,7 @@ fuel-core-types = { workspace = true, features = [ "serde", "da-compression", ] } -paste = "1" +paste = { workspace = true } rand = { workspace = true, optional = true } serde = { version = "1.0", features = ["derive"] } strum = { workspace = true } diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index b21dc94bb8e..42cd3f51cb7 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -45,7 +45,7 @@ hyper = { workspace = true } indicatif = { workspace = true, default-features = true } itertools = { workspace = true } num_cpus = { version = "1.16.0", optional = true } -paste = "1" +paste = { workspace = true } postcard = { workspace = true, optional = true } rand = { workspace = true } rocksdb = { version = "0.21", default-features = false, features = [ From 051089f4b0540b85daa22141218c1a6e12f558cb Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 30 Sep 2024 10:23:35 +0300 Subject: [PATCH 092/112] Add retention time parameter, move the feature behind a cli flag --- Cargo.lock | 1 + bin/fuel-core/Cargo.toml | 1 + bin/fuel-core/src/cli/run.rs | 21 ++++++- crates/compression/src/compress.rs | 56 ++++++++++++------- crates/compression/src/config.rs | 33 +++++++++++ crates/compression/src/decompress.rs | 33 ++++++++--- crates/compression/src/lib.rs | 2 + crates/compression/src/ports.rs | 27 +++++++-- crates/compression/src/registry.rs | 7 +-- .../src/graphql_api/da_compression.rs | 35 +++++++++++- crates/fuel-core/src/graphql_api/ports.rs | 3 +- crates/fuel-core/src/graphql_api/storage.rs | 16 +++--- .../src/graphql_api/storage/da_compression.rs | 42 +++++++++++--- .../{metadata_key.rs => evictor_cache.rs} | 2 +- .../storage/da_compression/timestamps.rs | 46 +++++++++++++++ .../src/graphql_api/worker_service.rs | 19 ++++++- .../src/graphql_api/worker_service/tests.rs | 1 + crates/fuel-core/src/service/config.rs | 7 ++- crates/fuel-core/src/service/sub_services.rs | 1 + 19 files changed, 293 insertions(+), 60 deletions(-) create mode 100644 crates/compression/src/config.rs rename crates/fuel-core/src/graphql_api/storage/da_compression/{metadata_key.rs => evictor_cache.rs} (91%) create mode 100644 crates/fuel-core/src/graphql_api/storage/da_compression/timestamps.rs diff --git a/Cargo.lock b/Cargo.lock index 14bed586801..2f8dc91b098 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3275,6 +3275,7 @@ dependencies = [ "dotenvy", "fuel-core", "fuel-core-chain-config", + "fuel-core-compression", "fuel-core-poa", "fuel-core-storage", "fuel-core-types 0.36.0", diff --git a/bin/fuel-core/Cargo.toml b/bin/fuel-core/Cargo.toml index a15848bb37e..aa111e7637c 100644 --- a/bin/fuel-core/Cargo.toml +++ b/bin/fuel-core/Cargo.toml @@ -27,6 +27,7 @@ dirs = "4.0" dotenvy = { version = "0.15", optional = true } fuel-core = { workspace = true, features = ["wasm-executor"] } fuel-core-chain-config = { workspace = true } +fuel-core-compression = { workspace = true } fuel-core-poa = { workspace = true } fuel-core-types = { workspace = true, features = ["std"] } hex = { workspace = true } diff --git a/bin/fuel-core/src/cli/run.rs b/bin/fuel-core/src/cli/run.rs index 44be9d57a98..d930ed01276 100644 --- a/bin/fuel-core/src/cli/run.rs +++ b/bin/fuel-core/src/cli/run.rs @@ -20,7 +20,10 @@ use fuel_core::{ CombinedDatabase, CombinedDatabaseConfig, }, - fuel_core_graphql_api::ServiceConfig as GraphQLConfig, + fuel_core_graphql_api::{ + worker_service::DaCompressionConfig, + ServiceConfig as GraphQLConfig, + }, producer::Config as ProducerConfig, service::{ config::Trigger, @@ -190,6 +193,11 @@ pub struct Command { #[cfg(feature = "aws-kms")] pub consensus_aws_kms: Option, + /// If given, the node will produce and store da-compressed blocks + /// with the given retention time. + #[arg(long = "da-compression", env)] + pub da_compression: Option, + /// A new block is produced instantly when transactions are available. #[clap(flatten)] pub poa_trigger: PoATriggerArgs, @@ -272,6 +280,7 @@ impl Command { consensus_key, #[cfg(feature = "aws-kms")] consensus_aws_kms, + da_compression, poa_trigger, predefined_blocks_path, coinbase_recipient, @@ -418,6 +427,15 @@ impl Command { let block_importer = fuel_core::service::config::fuel_core_importer::Config::new(); + let da_compression = match da_compression { + Some(retention) => { + DaCompressionConfig::Enabled(fuel_core_compression::Config { + temporal_registry_retention: retention.into(), + }) + } + None => DaCompressionConfig::Disabled, + }; + let TxPoolArgs { tx_pool_ttl, tx_max_number, @@ -475,6 +493,7 @@ impl Command { min_gas_price, gas_price_threshold_percent, block_importer, + da_compression, #[cfg(feature = "relayer")] relayer: relayer_cfg, #[cfg(feature = "p2p")] diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 6c36ba59f66..31d8bffe47e 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -1,6 +1,11 @@ use crate::{ + config::Config, eviction_policy::CacheEvictor, - ports::UtxoIdToPointer, + ports::{ + EvictorDb, + TemporalRegistry, + UtxoIdToPointer, + }, registry::{ PerRegistryKeyspace, RegistrationsPerTable, @@ -9,6 +14,7 @@ use crate::{ CompressedBlockPayloadV0, VersionedCompressedBlock, }; +use anyhow::Context; use fuel_core_types::{ blockchain::block::Block, fuel_compression::{ @@ -29,6 +35,7 @@ use fuel_core_types::{ AssetId, ContractId, }, + tai64::Tai64, }; use std::collections::{ HashMap, @@ -40,6 +47,7 @@ impl CompressDb for T where T: TemporalRegistryAll + UtxoIdToPointer {} /// This must be called for all new blocks in sequence, otherwise the result will be garbage. pub async fn compress( + config: Config, mut db: D, block: &Block, ) -> anyhow::Result @@ -48,7 +56,12 @@ where { let target = block.transactions_vec(); - let mut prepare_ctx = PrepareCtx::new(&mut db); + let mut prepare_ctx = PrepareCtx { + config, + timestamp: block.header().time(), + db: &mut db, + accessed_keys: Default::default(), + }; let _ = target.compress_with(&mut prepare_ctx).await?; let mut ctx = prepare_ctx.into_compression_context()?; @@ -56,7 +69,7 @@ where let registrations: RegistrationsPerTable = ctx.finalize()?; // Apply changes to the db - registrations.write_to_registry(&mut db)?; + registrations.write_to_registry(&mut db, block.header().consensus().time)?; // Construct the actual compacted block let compact = CompressedBlockPayloadV0 { @@ -72,20 +85,13 @@ where /// Preparation pass through the block to collect all keys accessed during compression. /// Returns dummy values. The resulting "compressed block" should be discarded. pub struct PrepareCtx { + pub config: Config, + /// Current timestamp + pub timestamp: Tai64, /// Database handle - db: D, + pub db: D, /// Keys accessed during the compression. - accessed_keys: PerRegistryKeyspace>, -} - -impl PrepareCtx { - /// Create a new PrepareCtx around the given database. - pub fn new(db: D) -> Self { - Self { - db, - accessed_keys: PerRegistryKeyspace::default(), - } - } + pub accessed_keys: PerRegistryKeyspace>, } impl ContextError for PrepareCtx { @@ -120,6 +126,8 @@ struct CompressCtxKeyspace { macro_rules! compression { ($($ident:ty: $type:ty),*) => { paste::paste! { pub struct CompressCtx { + config: Config, + timestamp: Tai64, db: D, $($ident: CompressCtxKeyspace<$type>,)* } @@ -137,6 +145,8 @@ macro_rules! compression { cache_evictor: CacheEvictor::new_from_db(&mut self.db, self.accessed_keys.$ident.into())?, }, )* + config: self.config, + timestamp: self.timestamp, db: self.db, }) } @@ -160,7 +170,7 @@ macro_rules! compression { $( impl CompressibleBy> for $type where - D: CompressDb + D: TemporalRegistry<$type> + EvictorDb<$type> { async fn compress_with( &self, @@ -170,7 +180,11 @@ macro_rules! compression { return Ok(RegistryKey::ZERO); } if let Some(found) = ctx.db.registry_index_lookup(self)? { - ctx.accessed_keys.$ident.insert(found); + let key_timestamp = ctx.db.read_timestamp(&found) + .context("Database invariant violated: no timestamp stored but key found")?; + if ctx.config.is_timestamp_accessible(ctx.timestamp, key_timestamp)? { + ctx.accessed_keys.$ident.insert(found); + } } Ok(RegistryKey::ZERO) } @@ -178,7 +192,7 @@ macro_rules! compression { impl CompressibleBy> for $type where - D: CompressDb + D: TemporalRegistry<$type> + EvictorDb<$type> { async fn compress_with( &self, @@ -188,7 +202,11 @@ macro_rules! compression { return Ok(RegistryKey::DEFAULT_VALUE); } if let Some(found) = ctx.db.registry_index_lookup(self)? { - return Ok(found); + let key_timestamp = ctx.db.read_timestamp(&found) + .context("Database invariant violated: no timestamp stored but key found")?; + if ctx.config.is_timestamp_accessible(ctx.timestamp, key_timestamp)? { + return Ok(found); + } } if let Some(found) = ctx.$ident.changes_lookup.get(self) { return Ok(*found); diff --git a/crates/compression/src/config.rs b/crates/compression/src/config.rs new file mode 100644 index 00000000000..9ad669a7f53 --- /dev/null +++ b/crates/compression/src/config.rs @@ -0,0 +1,33 @@ +use core::time::Duration; + +use fuel_core_types::tai64::{ + Tai64, + Tai64N, +}; + +#[derive(Debug, Clone, Copy)] +pub struct Config { + /// How long entries in the temporal registry are valid. + /// After this time has passed, the entry is considered stale and must not be used. + /// If the value is needed again, it must be re-registered. + pub temporal_registry_retention: Duration, +} + +impl Config { + /// Given timestamp of the currnet block and a key in an older block, + /// is the key is still accessible? + /// Returns error if the arguments are not valid block timestamps, + /// or if the block is older than the key. + pub fn is_timestamp_accessible( + &self, + block_timestamp: Tai64, + key_timestamp: Tai64, + ) -> anyhow::Result { + let block = Tai64N(block_timestamp, 0); + let key = Tai64N(key_timestamp, 0); + let duration = block + .duration_since(&key) + .map_err(|_| anyhow::anyhow!("Invalid timestamp ordering"))?; + Ok(duration <= self.temporal_registry_retention) + } +} diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index 47eca16f117..c3baa909824 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -1,4 +1,5 @@ use crate::{ + config::Config, ports::{ HistoryLookup, TemporalRegistry, @@ -39,6 +40,7 @@ use fuel_core_types::{ Address, ContractId, }, + tai64::Tai64, }; pub trait DecompressDb: TemporalRegistryAll + HistoryLookup {} @@ -46,6 +48,7 @@ impl DecompressDb for T where T: TemporalRegistryAll + HistoryLookup {} /// This must be called for all decompressed blocks in sequence, otherwise the result will be garbage. pub async fn decompress( + config: Config, mut db: D, block: VersionedCompressedBlock, ) -> anyhow::Result @@ -56,9 +59,15 @@ where // TODO: merkle root verification: https://github.com/FuelLabs/fuel-core/issues/2232 - compressed.registrations.write_to_registry(&mut db)?; + compressed + .registrations + .write_to_registry(&mut db, compressed.header.consensus.time)?; - let ctx = DecompressCtx { db }; + let ctx = DecompressCtx { + config, + timestamp: compressed.header.consensus.time, + db, + }; let transactions = as DecompressibleBy<_>>::decompress_with( compressed.transactions, @@ -73,13 +82,10 @@ where } pub struct DecompressCtx { - db: D, -} - -impl DecompressCtx { - pub fn new(db: D) -> Self { - Self { db } - } + pub config: Config, + /// Timestamp of the block being decompressed + pub timestamp: Tai64, + pub db: D, } impl ContextError for DecompressCtx { @@ -112,6 +118,10 @@ macro_rules! decompress_impl { if key == RegistryKey::DEFAULT_VALUE { return Ok(<$type>::default()); } + let key_timestamp = ctx.db.read_timestamp(&key)?; + if !ctx.config.is_timestamp_accessible(ctx.timestamp, key_timestamp)? { + anyhow::bail!("Timestamp not accessible"); + } ctx.db.read_registry(&key) } } @@ -259,10 +269,15 @@ mod tests { todo!() } + fn read_timestamp(&self, _key: &RegistryKey) -> anyhow::Result { + todo!() + } + fn write_registry( &mut self, _key: &RegistryKey, _value: &$type, + _timestamp: Tai64, ) -> anyhow::Result<()> { todo!() } diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 42c13882899..3556257f65f 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -4,11 +4,13 @@ #![deny(warnings)] pub mod compress; +pub mod config; pub mod decompress; mod eviction_policy; pub mod ports; mod registry; +pub use config::Config; pub use registry::RegistryKeyspace; use fuel_core_types::{ diff --git a/crates/compression/src/ports.rs b/crates/compression/src/ports.rs index 6490ed6af34..50b2acd6fe8 100644 --- a/crates/compression/src/ports.rs +++ b/crates/compression/src/ports.rs @@ -10,6 +10,7 @@ use fuel_core_types::{ Word, }, fuel_types::Nonce, + tai64::Tai64, }; /// Rolling cache for compression. @@ -20,8 +21,17 @@ pub trait TemporalRegistry { /// Reads a value from the registry at its current height. fn read_registry(&self, key: &RegistryKey) -> anyhow::Result; - /// Reads a value from the registry at its current height. - fn write_registry(&mut self, key: &RegistryKey, value: &T) -> anyhow::Result<()>; + /// Reads timestamp of the value from the registry. + fn read_timestamp(&self, key: &RegistryKey) -> anyhow::Result; + + /// Writes a value from to the registry. The timestamp is the time of the block, + /// and it is used for key retention. + fn write_registry( + &mut self, + key: &RegistryKey, + value: &T, + timestamp: Tai64, + ) -> anyhow::Result<()>; /// Lookup registry key by the value. fn registry_index_lookup(&self, value: &T) -> anyhow::Result>; @@ -35,8 +45,17 @@ where >::read_registry(self, key) } - fn write_registry(&mut self, key: &RegistryKey, value: &T) -> anyhow::Result<()> { - >::write_registry(self, key, value) + fn read_timestamp(&self, key: &RegistryKey) -> anyhow::Result { + >::read_timestamp(self, key) + } + + fn write_registry( + &mut self, + key: &RegistryKey, + value: &T, + timestamp: Tai64, + ) -> anyhow::Result<()> { + >::write_registry(self, key, value, timestamp) } fn registry_index_lookup(&self, value: &T) -> anyhow::Result> { diff --git a/crates/compression/src/registry.rs b/crates/compression/src/registry.rs index a00dc8bc998..5e46aea4874 100644 --- a/crates/compression/src/registry.rs +++ b/crates/compression/src/registry.rs @@ -11,6 +11,7 @@ use fuel_core_types::{ ContractId, ScriptCode, }, + tai64::Tai64, }; macro_rules! tables { @@ -57,25 +58,23 @@ macro_rules! tables { pub trait TemporalRegistryAll where - Self: Sized, $(Self: TemporalRegistry<$type> + EvictorDb<$type>,)* {} impl TemporalRegistryAll for T where - T: Sized, $(T: TemporalRegistry<$type> + EvictorDb<$type>,)* {} impl RegistrationsPerTable { - pub(crate) fn write_to_registry(&self, registry: &mut R) -> anyhow::Result<()> + pub(crate) fn write_to_registry(&self, registry: &mut R, timestamp: Tai64) -> anyhow::Result<()> where R: TemporalRegistryAll { $( for (key, value) in self.$ident.iter() { - registry.write_registry(key, value)?; + registry.write_registry(key, value, timestamp)?; } )* diff --git a/crates/fuel-core/src/graphql_api/da_compression.rs b/crates/fuel-core/src/graphql_api/da_compression.rs index d249d7b522d..e9d11d1c22e 100644 --- a/crates/fuel-core/src/graphql_api/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/da_compression.rs @@ -1,12 +1,17 @@ use crate::fuel_core_graphql_api::{ ports::worker::OffChainDatabaseTransaction, storage::da_compression::{ - metadata_key::MetadataKey, + evictor_cache::MetadataKey, + timestamps::{ + TimestampKey, + TimestampKeyspace, + }, *, }, }; use fuel_core_compression::{ compress::compress, + config::Config, ports::{ EvictorDb, TemporalRegistry, @@ -28,11 +33,13 @@ use fuel_core_types::{ ScriptCode, }, services::executor::Event, + tai64::Tai64, }; use futures::FutureExt; /// Performs DA compression for a block and stores it in the database. pub fn da_compress_block( + config: Config, block: &Block, block_events: &[Event], db_tx: &mut T, @@ -41,6 +48,7 @@ where T: OffChainDatabaseTransaction, { let compressed = compress( + config, CompressTx { db_tx, block_events, @@ -80,10 +88,26 @@ macro_rules! impl_temporal_registry { .into_owned()) } + fn read_timestamp( + &self, + key: &fuel_core_types::fuel_compression::RegistryKey, + ) -> anyhow::Result { + Ok(self + .db_tx + .storage_as_ref::<[< DaCompressionTemporalRegistryTimestamps >]>() + .get(&TimestampKey { + keyspace: TimestampKeyspace::$type, + key: *key, + })? + .ok_or(not_found!(DaCompressionTemporalRegistryTimestamps))? + .into_owned()) + } + fn write_registry( &mut self, key: &fuel_core_types::fuel_compression::RegistryKey, value: &$type, + timestamp: Tai64, ) -> anyhow::Result<()> { // Write the actual value let old_value = self.db_tx @@ -104,6 +128,11 @@ macro_rules! impl_temporal_registry { .storage_as_mut::() .insert(&reverse_key, key)?; + // Update the timestamp + self.db_tx + .storage_as_mut::() + .insert(&TimestampKey { keyspace: TimestampKeyspace::$type, key: *key }, ×tamp)?; + Ok(()) } @@ -130,7 +159,7 @@ macro_rules! impl_temporal_registry { key: fuel_core_types::fuel_compression::RegistryKey, ) -> anyhow::Result<()> { self.db_tx - .storage_as_mut::() + .storage_as_mut::() .insert(&MetadataKey::$type, &key)?; Ok(()) } @@ -140,7 +169,7 @@ macro_rules! impl_temporal_registry { ) -> anyhow::Result> { Ok(self .db_tx - .storage_as_ref::() + .storage_as_ref::() .get(&MetadataKey::$type)? .map(|v| v.into_owned()) ) diff --git a/crates/fuel-core/src/graphql_api/ports.rs b/crates/fuel-core/src/graphql_api/ports.rs index 3edfddeea37..e9a9e5255b4 100644 --- a/crates/fuel-core/src/graphql_api/ports.rs +++ b/crates/fuel-core/src/graphql_api/ports.rs @@ -339,7 +339,8 @@ pub mod worker { + StorageMutate + StorageMutate + StorageMutate - + StorageMutate + + StorageMutate + + StorageMutate { fn record_tx_id_owner( &mut self, diff --git a/crates/fuel-core/src/graphql_api/storage.rs b/crates/fuel-core/src/graphql_api/storage.rs index d06af3aef86..8f8cfcd1f19 100644 --- a/crates/fuel-core/src/graphql_api/storage.rs +++ b/crates/fuel-core/src/graphql_api/storage.rs @@ -99,18 +99,20 @@ pub enum Column { DaCompressedBlocks = 14, /// See [`DaCompressionTemporalRegistryIndex`](da_compression::DaCompressionTemporalRegistryIndex) DaCompressionTemporalRegistryIndex = 15, - /// See [`DaCompressionTemporalRegistryMetadata`](da_compression::DaCompressionTemporalRegistryMetadata) - DaCompressionTemporalRegistryMetadata = 16, + /// See [`DaCompressionTemporalRegistryTimestamps`](da_compression::DaCompressionTemporalRegistryTimestamps) + DaCompressionTemporalRegistryTimestamps = 16, + /// See [`DaCompressionTemporalRegistryEvictorCache`](da_compression::DaCompressionTemporalRegistryEvictorCache) + DaCompressionTemporalRegistryEvictorCache = 17, /// See [`DaCompressionTemporalRegistryAddress`](da_compression::DaCompressionTemporalRegistryAddress) - DaCompressionTemporalRegistryAddress = 17, + DaCompressionTemporalRegistryAddress = 18, /// See [`DaCompressionTemporalRegistryAssetId`](da_compression::DaCompressionTemporalRegistryAssetId) - DaCompressionTemporalRegistryAssetId = 18, + DaCompressionTemporalRegistryAssetId = 19, /// See [`DaCompressionTemporalRegistryContractId`](da_compression::DaCompressionTemporalRegistryContractId) - DaCompressionTemporalRegistryContractId = 19, + DaCompressionTemporalRegistryContractId = 20, /// See [`DaCompressionTemporalRegistryScriptCode`](da_compression::DaCompressionTemporalRegistryScriptCode) - DaCompressionTemporalRegistryScriptCode = 20, + DaCompressionTemporalRegistryScriptCode = 21, /// See [`DaCompressionTemporalRegistryPredicateCode`](da_compression::DaCompressionTemporalRegistryPredicateCode) - DaCompressionTemporalRegistryPredicateCode = 21, + DaCompressionTemporalRegistryPredicateCode = 22, } impl Column { diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression.rs b/crates/fuel-core/src/graphql_api/storage/da_compression.rs index 87831de2e77..5f7812fc8de 100644 --- a/crates/fuel-core/src/graphql_api/storage/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/storage/da_compression.rs @@ -1,8 +1,9 @@ -use crate::fuel_core_graphql_api::storage::da_compression::{ - metadata_key::MetadataKey, +use self::{ + evictor_cache::MetadataKey, predicate_code_codec::PredicateCodeCodec, reverse_key::ReverseKey, script_code_codec::ScriptCodeCodec, + timestamps::TimestampKey, }; use fuel_core_compression::VersionedCompressedBlock; use fuel_core_storage::{ @@ -25,12 +26,14 @@ use fuel_core_types::{ ScriptCode, }, fuel_types::BlockHeight, + tai64::Tai64, }; -pub mod metadata_key; +pub mod evictor_cache; pub mod predicate_code_codec; pub mod reverse_key; pub mod script_code_codec; +pub mod timestamps; /// The table for the compressed blocks sent to DA. pub struct DaCompressedBlocks; @@ -51,7 +54,7 @@ impl TableWithBlueprint for DaCompressedBlocks { } } -/// Mapping from the type to the register key in the temporal registry. +/// Mapping from the type to the registry key in the temporal registry. pub struct DaCompressionTemporalRegistryIndex; impl Mappable for DaCompressionTemporalRegistryIndex { @@ -71,25 +74,46 @@ impl TableWithBlueprint for DaCompressionTemporalRegistryIndex { } } +/// This table keeps track of last written timestamp for each key, +/// so that we can keep track of expiration. +pub struct DaCompressionTemporalRegistryTimestamps; + +impl Mappable for DaCompressionTemporalRegistryTimestamps { + type Key = Self::OwnedKey; + type OwnedKey = TimestampKey; + type Value = Self::OwnedValue; + type OwnedValue = Tai64; +} + +impl TableWithBlueprint for DaCompressionTemporalRegistryTimestamps { + // TODO: Use Raw codec for value instead of Postcard + type Blueprint = Plain; + type Column = super::Column; + + fn column() -> Self::Column { + Self::Column::DaCompressionTemporalRegistryTimestamps + } +} + /// This table is used to hold "next key to evict" for each keyspace. /// In the future we'll likely switch to use LRU or something, in which /// case this table can be repurposed. -pub struct DaCompressionTemporalRegistryMetadata; +pub struct DaCompressionTemporalRegistryEvictorCache; -impl Mappable for DaCompressionTemporalRegistryMetadata { +impl Mappable for DaCompressionTemporalRegistryEvictorCache { type Key = Self::OwnedKey; type OwnedKey = MetadataKey; type Value = Self::OwnedValue; type OwnedValue = RegistryKey; } -impl TableWithBlueprint for DaCompressionTemporalRegistryMetadata { +impl TableWithBlueprint for DaCompressionTemporalRegistryEvictorCache { // TODO: Use Raw codec for value instead of Postcard type Blueprint = Plain; type Column = super::Column; fn column() -> Self::Column { - Self::Column::DaCompressionTemporalRegistryMetadata + Self::Column::DaCompressionTemporalRegistryEvictorCache } } @@ -147,7 +171,7 @@ mod tests { #[cfg(test)] fuel_core_storage::basic_storage_tests!( - DaCompressionTemporalRegistryMetadata, + DaCompressionTemporalRegistryEvictorCache, MetadataKey::Address, RegistryKey::ZERO ); diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression/metadata_key.rs b/crates/fuel-core/src/graphql_api/storage/da_compression/evictor_cache.rs similarity index 91% rename from crates/fuel-core/src/graphql_api/storage/da_compression/metadata_key.rs rename to crates/fuel-core/src/graphql_api/storage/da_compression/evictor_cache.rs index 2b356eb0eeb..870d02722f6 100644 --- a/crates/fuel-core/src/graphql_api/storage/da_compression/metadata_key.rs +++ b/crates/fuel-core/src/graphql_api/storage/da_compression/evictor_cache.rs @@ -1,4 +1,4 @@ -/// The metadata key used by `DaCompressionTemporalRegistryMetadata` table to +/// The metadata key used by `DaCompressionTemporalRegistryEvictorCache` table to /// store progress of the evictor. #[derive( Debug, diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression/timestamps.rs b/crates/fuel-core/src/graphql_api/storage/da_compression/timestamps.rs new file mode 100644 index 00000000000..923ebcd2f2a --- /dev/null +++ b/crates/fuel-core/src/graphql_api/storage/da_compression/timestamps.rs @@ -0,0 +1,46 @@ +use fuel_core_types::fuel_compression::RegistryKey; + +/// The metadata key used by `DaCompressionTemporalRegistryTimsetamps` table to +/// keep track of when each key was last updated. +#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub struct TimestampKey { + /// The column where the key is stored. + pub keyspace: TimestampKeyspace, + /// The key itself. + pub key: RegistryKey, +} + +#[derive( + Debug, + Clone, + Copy, + PartialEq, + Eq, + serde::Serialize, + serde::Deserialize, + strum::EnumCount, +)] +pub enum TimestampKeyspace { + Address, + AssetId, + ContractId, + ScriptCode, + PredicateCode, +} + +#[cfg(feature = "test-helpers")] +impl rand::distributions::Distribution + for rand::distributions::Standard +{ + fn sample(&self, rng: &mut R) -> TimestampKeyspace { + use strum::EnumCount; + match rng.next_u32() as usize % TimestampKeyspace::COUNT { + 0 => TimestampKeyspace::Address, + 1 => TimestampKeyspace::AssetId, + 2 => TimestampKeyspace::ContractId, + 3 => TimestampKeyspace::ScriptCode, + 4 => TimestampKeyspace::PredicateCode, + _ => unreachable!("New metadata key is added but not supported here"), + } + } +} diff --git a/crates/fuel-core/src/graphql_api/worker_service.rs b/crates/fuel-core/src/graphql_api/worker_service.rs index 98aeffb105f..a1736aeefc7 100644 --- a/crates/fuel-core/src/graphql_api/worker_service.rs +++ b/crates/fuel-core/src/graphql_api/worker_service.rs @@ -98,9 +98,16 @@ use std::{ #[cfg(test)] mod tests; +#[derive(Debug, Clone)] +pub enum DaCompressionConfig { + Disabled, + Enabled(fuel_core_compression::config::Config), +} + /// The initialization task recovers the state of the GraphQL service database on startup. pub struct InitializeTask { chain_id: ChainId, + da_compression_config: DaCompressionConfig, continue_on_error: bool, tx_pool: TxPool, blocks_events: BoxStream, @@ -116,6 +123,7 @@ pub struct Task { block_importer: BoxStream, database: D, chain_id: ChainId, + da_compression_config: DaCompressionConfig, continue_on_error: bool, } @@ -151,7 +159,12 @@ where &mut transaction, )?; - da_compress_block(block, &result.events, &mut transaction)?; + match self.da_compression_config { + DaCompressionConfig::Disabled => {} + DaCompressionConfig::Enabled(config) => { + da_compress_block(config, block, &result.events, &mut transaction)?; + } + } transaction.commit()?; @@ -462,6 +475,7 @@ where let InitializeTask { chain_id, + da_compression_config, tx_pool, block_importer, blocks_events, @@ -475,6 +489,7 @@ where block_importer: blocks_events, database: off_chain_database, chain_id, + da_compression_config, continue_on_error, }; @@ -586,6 +601,7 @@ pub fn new_service( on_chain_database: OnChain, off_chain_database: OffChain, chain_id: ChainId, + da_compression_config: DaCompressionConfig, continue_on_error: bool, ) -> ServiceRunner> where @@ -601,6 +617,7 @@ where on_chain_database, off_chain_database, chain_id, + da_compression_config, continue_on_error, }) } diff --git a/crates/fuel-core/src/graphql_api/worker_service/tests.rs b/crates/fuel-core/src/graphql_api/worker_service/tests.rs index b6eef2c7826..8b9ad758975 100644 --- a/crates/fuel-core/src/graphql_api/worker_service/tests.rs +++ b/crates/fuel-core/src/graphql_api/worker_service/tests.rs @@ -81,6 +81,7 @@ fn worker_task_with_block_importer_and_db( block_importer, database, chain_id, + da_compression_config: DaCompressionConfig::Disabled, continue_on_error: false, } } diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index 092909929a1..bd04f40a0d5 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -31,7 +31,10 @@ use fuel_core_types::blockchain::header::StateTransitionBytecodeVersion; use crate::{ combined_database::CombinedDatabaseConfig, - graphql_api::ServiceConfig as GraphQLConfig, + graphql_api::{ + worker_service::DaCompressionConfig, + ServiceConfig as GraphQLConfig, + }, }; #[derive(Clone, Debug)] @@ -57,6 +60,7 @@ pub struct Config { pub gas_price_change_percent: u64, pub min_gas_price: u64, pub gas_price_threshold_percent: u64, + pub da_compression: DaCompressionConfig, pub block_importer: fuel_core_importer::Config, #[cfg(feature = "relayer")] pub relayer: Option, @@ -157,6 +161,7 @@ impl Config { block_producer: fuel_core_producer::Config { ..Default::default() }, + da_compression: DaCompressionConfig::Disabled, starting_gas_price, gas_price_change_percent, min_gas_price, diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index ea3e62a9cd6..3e4a208aead 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -300,6 +300,7 @@ pub fn init_sub_services( database.on_chain().clone(), database.off_chain().clone(), chain_id, + config.da_compression.clone(), config.continue_on_error, ); From b27d6f4b38d3f6aea23647b10b88743735cce581 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 30 Sep 2024 10:28:28 +0300 Subject: [PATCH 093/112] Remove registrations_root from compressed blocks v0 --- crates/compression/src/compress.rs | 2 -- crates/compression/src/lib.rs | 7 ------- 2 files changed, 9 deletions(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 31d8bffe47e..133236298fc 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -24,7 +24,6 @@ use fuel_core_types::{ }, fuel_tx::{ input::PredicateCode, - Bytes32, CompressedUtxoId, ScriptCode, TxPointer, @@ -74,7 +73,6 @@ where // Construct the actual compacted block let compact = CompressedBlockPayloadV0 { registrations, - registrations_root: Bytes32::default(), /* TODO: https://github.com/FuelLabs/fuel-core/issues/2232 */ header: block.header().into(), transactions, }; diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 3556257f65f..4b22d2e8700 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -16,7 +16,6 @@ pub use registry::RegistryKeyspace; use fuel_core_types::{ blockchain::header::PartialBlockHeader, fuel_tx::CompressedTransaction, - fuel_types::Bytes32, }; use registry::RegistrationsPerTable; @@ -25,8 +24,6 @@ use registry::RegistrationsPerTable; pub struct CompressedBlockPayloadV0 { /// Temporal registry insertions pub registrations: RegistrationsPerTable, - /// Merkle root of the temporal registry state - pub registrations_root: Bytes32, /// Compressed block header pub header: PartialBlockHeader, /// Compressed transactions @@ -83,7 +80,6 @@ mod tests { height in 0..=u32::MAX, consensus_parameters_version in 0..=u32::MAX, state_transition_bytecode_version in 0..=u32::MAX, - registrations_root in prop::array::uniform32(0..=u8::MAX), registration_inputs in prop::collection::vec( (keyspace(), prop::num::u16::ANY, prop::array::uniform32(0..=u8::MAX)).prop_map(|(ks, rk, arr)| { let k = RegistryKey::try_from(rk as u32).unwrap(); @@ -131,7 +127,6 @@ mod tests { }; let original = CompressedBlockPayloadV0 { registrations, - registrations_root: registrations_root.into(), header, transactions: vec![], }; @@ -142,13 +137,11 @@ mod tests { let CompressedBlockPayloadV0 { registrations, - registrations_root, header, transactions, } = decompressed; assert_eq!(registrations, original.registrations); - assert_eq!(registrations_root, original.registrations_root); assert_eq!(header.da_height, da_height.into()); assert_eq!(*header.prev_root(), prev_root.into()); From ba0ca71f20145cd4c0171a6300bd77db6db1e8f6 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 30 Sep 2024 10:56:58 +0300 Subject: [PATCH 094/112] Add basic_storage_tests for DaCompressionTemporalRegistryTimestamps --- .../src/graphql_api/storage/da_compression.rs | 10 ++++++++++ .../graphql_api/storage/da_compression/timestamps.rs | 11 +++++++++++ 2 files changed, 21 insertions(+) diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression.rs b/crates/fuel-core/src/graphql_api/storage/da_compression.rs index 5f7812fc8de..54b21073ed6 100644 --- a/crates/fuel-core/src/graphql_api/storage/da_compression.rs +++ b/crates/fuel-core/src/graphql_api/storage/da_compression.rs @@ -169,6 +169,16 @@ mod tests { RegistryKey::ZERO ); + #[cfg(test)] + fuel_core_storage::basic_storage_tests!( + DaCompressionTemporalRegistryTimestamps, + TimestampKey { + keyspace: timestamps::TimestampKeyspace::Address, + key: RegistryKey::ZERO + }, + Tai64::UNIX_EPOCH + ); + #[cfg(test)] fuel_core_storage::basic_storage_tests!( DaCompressionTemporalRegistryEvictorCache, diff --git a/crates/fuel-core/src/graphql_api/storage/da_compression/timestamps.rs b/crates/fuel-core/src/graphql_api/storage/da_compression/timestamps.rs index 923ebcd2f2a..dc8f016f50b 100644 --- a/crates/fuel-core/src/graphql_api/storage/da_compression/timestamps.rs +++ b/crates/fuel-core/src/graphql_api/storage/da_compression/timestamps.rs @@ -28,6 +28,17 @@ pub enum TimestampKeyspace { PredicateCode, } +#[cfg(feature = "test-helpers")] +impl rand::distributions::Distribution for rand::distributions::Standard { + #![allow(clippy::arithmetic_side_effects)] // Test-only code, and also safe + fn sample(&self, rng: &mut R) -> TimestampKey { + TimestampKey { + keyspace: rng.gen(), + key: RegistryKey::try_from(rng.gen_range(0..2u32.pow(24) - 2)).unwrap(), + } + } +} + #[cfg(feature = "test-helpers")] impl rand::distributions::Distribution for rand::distributions::Standard From c8ebcb25a1c6284fa21d77ea3d54b222352def6c Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 30 Sep 2024 11:04:42 +0300 Subject: [PATCH 095/112] Update tests to match config changes --- Cargo.lock | 1 + tests/Cargo.toml | 1 + tests/tests/da_compression.rs | 24 +++++++++++++----------- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f8dc91b098..3176e199038 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3699,6 +3699,7 @@ dependencies = [ "fuel-core-benches", "fuel-core-bin", "fuel-core-client", + "fuel-core-compression", "fuel-core-executor", "fuel-core-gas-price-service", "fuel-core-p2p", diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 2d60439028f..071e76b5ef1 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -34,6 +34,7 @@ fuel-core = { path = "../crates/fuel-core", default-features = false, features = fuel-core-benches = { path = "../benches" } fuel-core-bin = { path = "../bin/fuel-core", features = ["parquet", "p2p"] } fuel-core-client = { path = "../crates/client", features = ["test-helpers"] } +fuel-core-compression = { path = "../crates/compression" } fuel-core-executor = { workspace = true } fuel-core-gas-price-service = { path = "../crates/services/gas_price_service" } fuel-core-p2p = { path = "../crates/services/p2p", features = [ diff --git a/tests/tests/da_compression.rs b/tests/tests/da_compression.rs index 2400659e974..9e8450e9f20 100644 --- a/tests/tests/da_compression.rs +++ b/tests/tests/da_compression.rs @@ -1,5 +1,7 @@ +use core::time::Duration; use fuel_core::{ combined_database::CombinedDatabase, + fuel_core_graphql_api::worker_service::DaCompressionConfig, p2p_test_helpers::*, service::{ Config, @@ -32,6 +34,9 @@ async fn can_fetch_da_compressed_block_from_graphql() { let db = CombinedDatabase::default(); let mut config = Config::local_node(); config.consensus_signer = SignMode::Key(Secret::new(poa_secret.into())); + config.da_compression = DaCompressionConfig::Enabled(fuel_core_compression::Config { + temporal_registry_retention: Duration::from_secs(3600), + }); let srv = FuelService::from_combined_database(db.clone(), config) .await .unwrap(); @@ -60,6 +65,12 @@ async fn da_compressed_blocks_are_available_from_non_block_producing_nodes() { // Create a producer and a validator that share the same key pair. let secret = SecretKey::random(&mut rng); let pub_key = Input::owner(&secret.public_key()); + + let mut config = Config::local_node(); + config.da_compression = DaCompressionConfig::Enabled(fuel_core_compression::Config { + temporal_registry_retention: Duration::from_secs(3600), + }); + let Nodes { mut producers, mut validators, @@ -70,14 +81,13 @@ async fn da_compressed_blocks_are_available_from_non_block_producing_nodes() { ProducerSetup::new(secret).with_txs(1).with_name("Alice"), )], [Some(ValidatorSetup::new(pub_key).with_name("Bob"))], - None, + Some(config), ) .await; let mut producer = producers.pop().unwrap(); let mut validator = validators.pop().unwrap(); - let p_client = FuelClient::from(producer.node.shared.graph_ql.bound_address); let v_client = FuelClient::from(validator.node.shared.graph_ql.bound_address); // Insert some txs @@ -87,17 +97,9 @@ async fn da_compressed_blocks_are_available_from_non_block_producing_nodes() { let block_height = 1u32.into(); - let p_block = p_client - .da_compressed_block(block_height) - .await - .unwrap() - .expect("Compressed block not available from producer"); - - let v_block = v_client + let _ = v_client .da_compressed_block(block_height) .await .unwrap() .expect("Compressed block not available from validator"); - - assert!(p_block == v_block); } From 2264fbd12a33ff80d7034b25c75e0811d710fd45 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 30 Sep 2024 11:06:58 +0300 Subject: [PATCH 096/112] Remove unnecessary changes from Cargo.lock --- Cargo.lock | 62 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3176e199038..79f114ad772 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -169,9 +169,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.88" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e1496f8fb1fbf272686b8d37f523dab3e4a7443300055e74cdaa449f3114356" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arbitrary" @@ -740,9 +740,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "1.43.0" +version = "1.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f7cb482caa5444d445c94417b9c74e49a849beb09ede4f2f4c3c15f8157387" +checksum = "c6550445e0913c9383375f4a5a2f550817567a19a178107fce1e1afd767f802a" dependencies = [ "aws-credential-types", "aws-runtime", @@ -762,9 +762,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.42.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27bf24cd0d389daa923e974b0e7c38daf308fc21e963c049f57980235017175e" +checksum = "70a9d27ed1c12b1140c47daf1bc541606c43fdafd918c4797d520db0043ceef2" dependencies = [ "aws-credential-types", "aws-runtime", @@ -784,9 +784,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.43.0" +version = "1.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b43b3220f1c46ac0e9dcc0a97d94b93305dacb36d1dd393996300c6b9b74364" +checksum = "44514a6ca967686cde1e2a1b81df6ef1883d0e3e570da8d8bc5c491dcb6fc29b" dependencies = [ "aws-credential-types", "aws-runtime", @@ -806,9 +806,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.42.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1c46924fb1add65bba55636e12812cae2febf68c0f37361766f627ddcca91ce" +checksum = "cd7a4d279762a35b9df97209f6808b95d4fe78547fe2316b4d200a0283960c5a" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1330,9 +1330,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.18" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -2935,7 +2935,7 @@ dependencies = [ "tokio", "tracing", "walkdir", - "yansi", + "yansi 0.5.1", ] [[package]] @@ -4751,9 +4751,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -6449,9 +6449,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ea5043e58958ee56f3e15a90aee535795cd7dfd319846288d93c5b57d85cbe" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -7017,12 +7017,12 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" dependencies = [ "diff", - "yansi", + "yansi 1.0.1", ] [[package]] @@ -8593,9 +8593,9 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "12.11.0" +version = "12.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1db5ac243c7d7f8439eb3b8f0357888b37cf3732957e91383b0ad61756374e" +checksum = "9fdf97c441f18a4f92425b896a4ec7a27e03631a0b1047ec4e34e9916a9a167e" dependencies = [ "debugid", "memmap2", @@ -8605,9 +8605,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.11.0" +version = "12.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea26e430c27d4a8a5dea4c4b81440606c7c1a415bd611451ef6af8c81416afc3" +checksum = "bc8ece6b129e97e53d1fbb3f61d33a6a9e5369b11d01228c068094d6d134eaea" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -9087,9 +9087,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ "indexmap 2.5.0", "serde", @@ -9326,9 +9326,9 @@ checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] @@ -10216,6 +10216,12 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "yansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + [[package]] name = "yasna" version = "0.5.2" From 2c929cc0a6ce3ea839fc2ab17b11721f4034c2b3 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Mon, 30 Sep 2024 11:08:10 +0300 Subject: [PATCH 097/112] Fix typo --- crates/compression/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/compression/src/config.rs b/crates/compression/src/config.rs index 9ad669a7f53..cc111119c59 100644 --- a/crates/compression/src/config.rs +++ b/crates/compression/src/config.rs @@ -14,7 +14,7 @@ pub struct Config { } impl Config { - /// Given timestamp of the currnet block and a key in an older block, + /// Given timestamp of the current block and a key in an older block, /// is the key is still accessible? /// Returns error if the arguments are not valid block timestamps, /// or if the block is older than the key. From 9437df16c9db6a9dd615a21b38dda06a1f0cf049 Mon Sep 17 00:00:00 2001 From: Hannes Karppila <2204863+Dentosal@users.noreply.github.com> Date: Tue, 1 Oct 2024 00:34:49 +0300 Subject: [PATCH 098/112] Fix typo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Rafał Chabowski <88321181+rafal-ch@users.noreply.github.com> --- crates/compression/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/compression/README.md b/crates/compression/README.md index 685a5d2a3ae..0768e6a1287 100644 --- a/crates/compression/README.md +++ b/crates/compression/README.md @@ -6,7 +6,7 @@ Each compressed block begins with a version field, so that it's possible to chan ## Temporal registry -This crate provides offchain registries for different types such as `AssetId`, `ContractId`, scripts, and predicates. Each registry is a key-value store with three-byte key. The registires are essentially compression caches. The three byte key allows cache size of 16 million values before reregistering the older values. +This crate provides offchain registries for different types such as `AssetId`, `ContractId`, scripts, and predicates. Each registry is a key-value store with three-byte key. The registries are essentially compression caches. The three byte key allows cache size of 16 million values before reregistering the older values. The registries allow replacing repeated objects with their respective keys, so if an object is used multiple times in a short interval (couple of months, maybe), then the full value From 9c888f87f011e0d0b5a76abbf8838680c0de6e07 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 1 Oct 2024 08:23:47 +0300 Subject: [PATCH 099/112] Move write_to_registry inside CompressCtx::finalize --- crates/compression/src/compress.rs | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 133236298fc..9b7ee2786f4 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -67,17 +67,11 @@ where let transactions = target.compress_with(&mut ctx).await?; let registrations: RegistrationsPerTable = ctx.finalize()?; - // Apply changes to the db - registrations.write_to_registry(&mut db, block.header().consensus().time)?; - - // Construct the actual compacted block - let compact = CompressedBlockPayloadV0 { + Ok(VersionedCompressedBlock::V0(CompressedBlockPayloadV0 { registrations, header: block.header().into(), transactions, - }; - - Ok(VersionedCompressedBlock::V0(compact)) + })) } /// Preparation pass through the block to collect all keys accessed during compression. @@ -152,7 +146,7 @@ macro_rules! compression { impl CompressCtx where D: CompressDb { /// Finalizes the compression context, returning the changes to the registry. - /// Commits the cache evictor states to the database. + /// Commits the registrations and cache evictor states to the database. fn finalize(mut self) -> anyhow::Result { let mut registrations = RegistrationsPerTable::default(); $( @@ -161,6 +155,7 @@ macro_rules! compression { registrations.$ident.push((key, value)); } )* + registrations.write_to_registry(&mut self.db, self.timestamp)?; Ok(registrations) } } From 1764850e9607432ae96925cf83a59864e4eb3333 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 1 Oct 2024 08:29:30 +0300 Subject: [PATCH 100/112] Fix Cargo.lock after merge issue --- Cargo.lock | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3d1c60c1002..a3e8f9b0b2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3977,18 +3977,11 @@ dependencies = [ "bitflags 2.6.0", "derivative", "derive_more", -<<<<<<< HEAD - "fuel-asm 0.57.0", - "fuel-compression", - "fuel-crypto 0.57.0", - "fuel-merkle 0.57.0", - "fuel-types 0.57.0", -======= "fuel-asm 0.57.1", + "fuel-compression", "fuel-crypto 0.57.1", "fuel-merkle 0.57.1", "fuel-types 0.57.1", ->>>>>>> master "hashbrown 0.14.5", "itertools 0.10.5", "postcard", From a3d85a0c66529dd4ac31e56a57da129358411207 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 1 Oct 2024 12:08:43 +0300 Subject: [PATCH 101/112] More Cargo.lock updates --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a3e8f9b0b2f..4eac7ef2d8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3151,12 +3151,12 @@ dependencies = [ [[package]] name = "fuel-compression" -version = "0.57.0" +version = "0.57.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ddebdc0c7440995bd89ae62bc5bbc7196ffd8dcff8cad78ffc55d5b2744e4d8" +checksum = "83d62c1ba6352f7bbcf1f2abcdd1c1092c7f5155653d75243b915c4e9ae500b4" dependencies = [ - "fuel-derive 0.57.0", - "fuel-types 0.57.0", + "fuel-derive 0.57.1", + "fuel-types 0.57.1", "serde", ] From d4526598c0bd4c5bae011b66db2924447c171c10 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Tue, 1 Oct 2024 12:08:57 +0300 Subject: [PATCH 102/112] Remove some unnecessary pub specifiers --- crates/compression/src/compress.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 9b7ee2786f4..8ae383dc262 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -76,14 +76,14 @@ where /// Preparation pass through the block to collect all keys accessed during compression. /// Returns dummy values. The resulting "compressed block" should be discarded. -pub struct PrepareCtx { - pub config: Config, +struct PrepareCtx { + config: Config, /// Current timestamp - pub timestamp: Tai64, + timestamp: Tai64, /// Database handle - pub db: D, + db: D, /// Keys accessed during the compression. - pub accessed_keys: PerRegistryKeyspace>, + accessed_keys: PerRegistryKeyspace>, } impl ContextError for PrepareCtx { From 0967f631978d39ea2f83a21dd69b2ed3177d13e8 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Wed, 2 Oct 2024 17:04:03 +0300 Subject: [PATCH 103/112] Address review comments --- crates/compression/src/compress.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 8ae383dc262..6fd596fb136 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -44,7 +44,9 @@ use std::collections::{ pub trait CompressDb: TemporalRegistryAll + UtxoIdToPointer {} impl CompressDb for T where T: TemporalRegistryAll + UtxoIdToPointer {} -/// This must be called for all new blocks in sequence, otherwise the result will be garbage. +/// This must be called for all new blocks in sequence, otherwise the result will be garbage, since +/// the registry is valid for only the current block height. On any other height you could be +/// referring to keys that have already been overwritten, or have not been written to yet. pub async fn compress( config: Config, mut db: D, @@ -194,6 +196,9 @@ macro_rules! compression { if self == &Default::default() { return Ok(RegistryKey::DEFAULT_VALUE); } + if let Some(found) = ctx.$ident.changes_lookup.get(self) { + return Ok(*found); + } if let Some(found) = ctx.db.registry_index_lookup(self)? { let key_timestamp = ctx.db.read_timestamp(&found) .context("Database invariant violated: no timestamp stored but key found")?; @@ -201,9 +206,6 @@ macro_rules! compression { return Ok(found); } } - if let Some(found) = ctx.$ident.changes_lookup.get(self) { - return Ok(*found); - } let key = ctx.$ident.cache_evictor.next_key(); let old = ctx.$ident.changes.insert(key, self.clone()); From f92b49779acabad61852b39d33047d975423ac42 Mon Sep 17 00:00:00 2001 From: Hannes Karppila <2204863+Dentosal@users.noreply.github.com> Date: Wed, 2 Oct 2024 17:06:03 +0300 Subject: [PATCH 104/112] Update crates/compression/src/lib.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Rafał Chabowski <88321181+rafal-ch@users.noreply.github.com> --- crates/compression/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/compression/src/lib.rs b/crates/compression/src/lib.rs index 4b22d2e8700..bd4b0fdcbba 100644 --- a/crates/compression/src/lib.rs +++ b/crates/compression/src/lib.rs @@ -72,7 +72,7 @@ mod tests { proptest! { /// Serialization for compressed transactions is already tested in fuel-vm, - /// but the rest of the block de/serialization is be tested here. + /// but the rest of the block de/serialization is tested here. #[test] fn postcard_roundtrip( da_height in 0..=u64::MAX, From 448dcc0b61e0f3f1f9620a36a060048c99d09b4d Mon Sep 17 00:00:00 2001 From: Hannes Karppila <2204863+Dentosal@users.noreply.github.com> Date: Wed, 2 Oct 2024 17:08:20 +0300 Subject: [PATCH 105/112] Update crates/compression/src/compress.rs Co-authored-by: Green Baneling --- crates/compression/src/compress.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 6fd596fb136..e188d8450b2 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -211,7 +211,7 @@ macro_rules! compression { let old = ctx.$ident.changes.insert(key, self.clone()); let old_rev = ctx.$ident.changes_lookup.insert(self.clone(), key); assert!(old.is_none(), "Key collision in registry substitution"); - assert!(old_rev.is_none(), "Key collision in registry substitution"); + debug_assert!(old_rev.is_none(), "Key collision in registry substitution"); Ok(key) } } From f01176b6f93ef180aa2ff293c32d1f7e6aa08dec Mon Sep 17 00:00:00 2001 From: Hannes Karppila <2204863+Dentosal@users.noreply.github.com> Date: Wed, 2 Oct 2024 17:08:43 +0300 Subject: [PATCH 106/112] Update crates/compression/src/compress.rs Co-authored-by: Green Baneling --- crates/compression/src/compress.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index e188d8450b2..e66da123cda 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -210,7 +210,7 @@ macro_rules! compression { let key = ctx.$ident.cache_evictor.next_key(); let old = ctx.$ident.changes.insert(key, self.clone()); let old_rev = ctx.$ident.changes_lookup.insert(self.clone(), key); - assert!(old.is_none(), "Key collision in registry substitution"); + debug_assert!(old.is_none(), "Key collision in registry substitution"); debug_assert!(old_rev.is_none(), "Key collision in registry substitution"); Ok(key) } From 8787971aa53bd341a013080aa4079499d8d5641a Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 3 Oct 2024 15:18:00 +0300 Subject: [PATCH 107/112] Reduce db lookups in PrepareCtx --- crates/compression/src/compress.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index e66da123cda..00afa40d036 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -175,10 +175,12 @@ macro_rules! compression { return Ok(RegistryKey::ZERO); } if let Some(found) = ctx.db.registry_index_lookup(self)? { - let key_timestamp = ctx.db.read_timestamp(&found) - .context("Database invariant violated: no timestamp stored but key found")?; - if ctx.config.is_timestamp_accessible(ctx.timestamp, key_timestamp)? { - ctx.accessed_keys.$ident.insert(found); + if !ctx.accessed_keys.$ident.contains(&found) { + let key_timestamp = ctx.db.read_timestamp(&found) + .context("Database invariant violated: no timestamp stored but key found")?; + if ctx.config.is_timestamp_accessible(ctx.timestamp, key_timestamp)? { + ctx.accessed_keys.$ident.insert(found); + } } } Ok(RegistryKey::ZERO) From cf03b7fa86e80ff6bdd730db841de9623dd778c9 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 3 Oct 2024 16:33:05 +0300 Subject: [PATCH 108/112] Make integration tests sligtly more accurate --- Cargo.lock | 1 + tests/Cargo.toml | 1 + tests/tests/da_compression.rs | 36 ++++++++++++++++++++++++----------- 3 files changed, 27 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0141cad28d3..b22d61919ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3716,6 +3716,7 @@ dependencies = [ "insta", "itertools 0.12.1", "k256", + "postcard", "pretty_assertions", "primitive-types", "proptest", diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 071e76b5ef1..57f0fe20d9b 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -57,6 +57,7 @@ hyper = { workspace = true, features = ["server"] } insta = { workspace = true } itertools = { workspace = true } k256 = { version = "0.13.3", features = ["ecdsa-core"] } +postcard = { workspace = true } primitive-types = { workspace = true, default-features = false } rand = { workspace = true } reqwest = { workspace = true } diff --git a/tests/tests/da_compression.rs b/tests/tests/da_compression.rs index 9e8450e9f20..7bc3c4e03de 100644 --- a/tests/tests/da_compression.rs +++ b/tests/tests/da_compression.rs @@ -12,12 +12,18 @@ use fuel_core_client::client::{ types::TransactionStatus, FuelClient, }; +use fuel_core_compression::VersionedCompressedBlock; use fuel_core_poa::signer::SignMode; use fuel_core_types::{ + fuel_asm::{ + op, + RegId, + }, fuel_crypto::SecretKey, fuel_tx::{ + GasCosts, Input, - Transaction, + TransactionBuilder, }, secrecy::Secret, }; @@ -34,28 +40,35 @@ async fn can_fetch_da_compressed_block_from_graphql() { let db = CombinedDatabase::default(); let mut config = Config::local_node(); config.consensus_signer = SignMode::Key(Secret::new(poa_secret.into())); - config.da_compression = DaCompressionConfig::Enabled(fuel_core_compression::Config { + let compression_config = fuel_core_compression::Config { temporal_registry_retention: Duration::from_secs(3600), - }); + }; + config.da_compression = DaCompressionConfig::Enabled(compression_config); let srv = FuelService::from_combined_database(db.clone(), config) .await .unwrap(); let client = FuelClient::from(srv.bound_address); - let status = client - .submit_and_await_commit(&Transaction::default_test_tx()) - .await - .unwrap(); + let tx = + TransactionBuilder::script([op::ret(RegId::ONE)].into_iter().collect(), vec![]) + .max_fee_limit(0) + .script_gas_limit(1_000_000) + .with_gas_costs(GasCosts::free()) + .add_random_fee_input() + .finalize_as_transaction(); + + let status = client.submit_and_await_commit(&tx).await.unwrap(); let block_height = match status { TransactionStatus::Success { block_height, .. } => block_height, - _ => { - panic!("unexpected result") + other => { + panic!("unexpected result {other:?}") } }; let block = client.da_compressed_block(block_height).await.unwrap(); - assert!(block.is_some()); + let block = block.expect("Unable to get compressed block"); + let _: VersionedCompressedBlock = postcard::from_bytes(&block).unwrap(); } #[tokio::test(flavor = "multi_thread")] @@ -97,9 +110,10 @@ async fn da_compressed_blocks_are_available_from_non_block_producing_nodes() { let block_height = 1u32.into(); - let _ = v_client + let block = v_client .da_compressed_block(block_height) .await .unwrap() .expect("Compressed block not available from validator"); + let _: VersionedCompressedBlock = postcard::from_bytes(&block).unwrap(); } From 4061e8cdaf0895b3fa642d61adc7943a3bfdef80 Mon Sep 17 00:00:00 2001 From: Hannes Karppila Date: Thu, 3 Oct 2024 16:34:07 +0300 Subject: [PATCH 109/112] Split TemporalRegistryAll to get correct bounds for decompression --- crates/compression/src/compress.rs | 5 +++-- crates/compression/src/decompress.rs | 12 ++++++------ crates/compression/src/registry.rs | 14 ++++++++++++-- 3 files changed, 21 insertions(+), 10 deletions(-) diff --git a/crates/compression/src/compress.rs b/crates/compression/src/compress.rs index 00afa40d036..0ad14e39f55 100644 --- a/crates/compression/src/compress.rs +++ b/crates/compression/src/compress.rs @@ -7,6 +7,7 @@ use crate::{ UtxoIdToPointer, }, registry::{ + EvictorDbAll, PerRegistryKeyspace, RegistrationsPerTable, TemporalRegistryAll, @@ -41,8 +42,8 @@ use std::collections::{ HashSet, }; -pub trait CompressDb: TemporalRegistryAll + UtxoIdToPointer {} -impl CompressDb for T where T: TemporalRegistryAll + UtxoIdToPointer {} +pub trait CompressDb: TemporalRegistryAll + EvictorDbAll + UtxoIdToPointer {} +impl CompressDb for T where T: TemporalRegistryAll + EvictorDbAll + UtxoIdToPointer {} /// This must be called for all new blocks in sequence, otherwise the result will be garbage, since /// the registry is valid for only the current block height. On any other height you could be diff --git a/crates/compression/src/decompress.rs b/crates/compression/src/decompress.rs index c3baa909824..15565ce8433 100644 --- a/crates/compression/src/decompress.rs +++ b/crates/compression/src/decompress.rs @@ -266,11 +266,11 @@ mod tests { ($type:ty) => { impl TemporalRegistry<$type> for MockDb { fn read_registry(&self, _key: &RegistryKey) -> anyhow::Result<$type> { - todo!() + unimplemented!() } fn read_timestamp(&self, _key: &RegistryKey) -> anyhow::Result { - todo!() + unimplemented!() } fn write_registry( @@ -279,14 +279,14 @@ mod tests { _value: &$type, _timestamp: Tai64, ) -> anyhow::Result<()> { - todo!() + unimplemented!() } fn registry_index_lookup( &self, _value: &$type, ) -> anyhow::Result> { - todo!() + unimplemented!() } } @@ -295,11 +295,11 @@ mod tests { &mut self, _key: RegistryKey, ) -> anyhow::Result<()> { - todo!() + unimplemented!() } fn get_latest_assigned_key(&self) -> anyhow::Result> { - todo!() + unimplemented!() } } }; diff --git a/crates/compression/src/registry.rs b/crates/compression/src/registry.rs index 5e46aea4874..0bf1e3a5967 100644 --- a/crates/compression/src/registry.rs +++ b/crates/compression/src/registry.rs @@ -58,12 +58,22 @@ macro_rules! tables { pub trait TemporalRegistryAll where - $(Self: TemporalRegistry<$type> + EvictorDb<$type>,)* + $(Self: TemporalRegistry<$type>,)* {} impl TemporalRegistryAll for T where - $(T: TemporalRegistry<$type> + EvictorDb<$type>,)* + $(T: TemporalRegistry<$type>,)* + {} + + pub trait EvictorDbAll + where + $(Self: EvictorDb<$type>,)* + {} + + impl EvictorDbAll for T + where + $(T: EvictorDb<$type>,)* {} From 466904629f8f953409d745be2d5d27c901e24577 Mon Sep 17 00:00:00 2001 From: green Date: Fri, 4 Oct 2024 21:56:36 +0200 Subject: [PATCH 110/112] Wait for the off chain database to process all blocks --- crates/fuel-core/src/p2p_test_helpers.rs | 41 +++++++++++++++--------- tests/tests/da_compression.rs | 3 +- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/crates/fuel-core/src/p2p_test_helpers.rs b/crates/fuel-core/src/p2p_test_helpers.rs index f72b4c348b0..a716b739bb9 100644 --- a/crates/fuel-core/src/p2p_test_helpers.rs +++ b/crates/fuel-core/src/p2p_test_helpers.rs @@ -6,7 +6,11 @@ use crate::{ CoinConfigGenerator, }, combined_database::CombinedDatabase, - database::Database, + database::{ + database_description::off_chain::OffChain, + Database, + }, + fuel_core_graphql_api::storage::transactions::TransactionStatuses, p2p::Multiaddr, service::{ Config, @@ -33,7 +37,6 @@ use fuel_core_poa::{ Trigger, }; use fuel_core_storage::{ - tables::Transactions, transactional::AtomicView, StorageAsRef, }; @@ -59,7 +62,6 @@ use fuel_core_types::{ services::p2p::GossipsubMessageAcceptance, }; use futures::StreamExt; -use itertools::Itertools; use rand::{ rngs::StdRng, SeedableRng, @@ -491,24 +493,27 @@ impl Node { /// Wait for the node to reach consistency with the given transactions. pub async fn consistency(&mut self, txs: &HashMap) { - let Self { db, .. } = self; - let mut blocks = self.node.shared.block_importer.block_stream(); - while !not_found_txs(db, txs).is_empty() { + let db = self.node.shared.database.off_chain(); + loop { + let not_found = not_found_txs(db, txs); + + if not_found.is_empty() { + break; + } + + let tx_id = not_found[0]; + let mut wait_transaction = + self.node.transaction_status_change(tx_id).unwrap(); + tokio::select! { - result = blocks.next() => { - result.unwrap(); + result = wait_transaction.next() => { + let _ = result.unwrap(); } _ = self.node.await_shutdown() => { panic!("Got a stop signal") } } } - - let count = db - .all_transactions(None, None) - .filter_ok(|tx| tx.is_script()) - .count(); - assert_eq!(count, txs.len()); } /// Wait for the node to reach consistency with the given transactions within 10 seconds. @@ -570,13 +575,17 @@ impl Node { } fn not_found_txs<'iter>( - db: &'iter Database, + db: &'iter Database, txs: &'iter HashMap, ) -> Vec { let mut not_found = vec![]; txs.iter().for_each(|(id, tx)| { assert_eq!(id, &tx.id(&Default::default())); - if !db.storage::().contains_key(id).unwrap() { + let found = db + .storage::() + .contains_key(id) + .unwrap(); + if !found { not_found.push(*id); } }); diff --git a/tests/tests/da_compression.rs b/tests/tests/da_compression.rs index 7bc3c4e03de..1aadd50728e 100644 --- a/tests/tests/da_compression.rs +++ b/tests/tests/da_compression.rs @@ -98,14 +98,13 @@ async fn da_compressed_blocks_are_available_from_non_block_producing_nodes() { ) .await; - let mut producer = producers.pop().unwrap(); + let producer = producers.pop().unwrap(); let mut validator = validators.pop().unwrap(); let v_client = FuelClient::from(validator.node.shared.graph_ql.bound_address); // Insert some txs let expected = producer.insert_txs().await; - producer.consistency_10s(&expected).await; validator.consistency_20s(&expected).await; let block_height = 1u32.into(); From 997b41aaa681ecc874090f3618bc8454b1174962 Mon Sep 17 00:00:00 2001 From: green Date: Fri, 4 Oct 2024 22:34:57 +0200 Subject: [PATCH 111/112] Avoid active loop --- crates/fuel-core/src/p2p_test_helpers.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/crates/fuel-core/src/p2p_test_helpers.rs b/crates/fuel-core/src/p2p_test_helpers.rs index a716b739bb9..8303122eb45 100644 --- a/crates/fuel-core/src/p2p_test_helpers.rs +++ b/crates/fuel-core/src/p2p_test_helpers.rs @@ -12,6 +12,7 @@ use crate::{ }, fuel_core_graphql_api::storage::transactions::TransactionStatuses, p2p::Multiaddr, + schema::tx::types::TransactionStatus, service::{ Config, FuelService, @@ -505,12 +506,19 @@ impl Node { let mut wait_transaction = self.node.transaction_status_change(tx_id).unwrap(); - tokio::select! { - result = wait_transaction.next() => { - let _ = result.unwrap(); - } - _ = self.node.await_shutdown() => { - panic!("Got a stop signal") + loop { + tokio::select! { + result = wait_transaction.next() => { + let status = result.unwrap().unwrap(); + + if matches!(status, TransactionStatus::Failed { .. }) + || matches!(status, TransactionStatus::Success { .. }) { + break + } + } + _ = self.node.await_shutdown() => { + panic!("Got a stop signal") + } } } } From 05d811db900dfcfd975687e17c2d3a43ef65518a Mon Sep 17 00:00:00 2001 From: green Date: Sat, 5 Oct 2024 00:32:02 +0200 Subject: [PATCH 112/112] Remove flakiness from `test_peer_info` test --- tests/tests/node_info.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tests/tests/node_info.rs b/tests/tests/node_info.rs index f84b767e4df..4dda549bb86 100644 --- a/tests/tests/node_info.rs +++ b/tests/tests/node_info.rs @@ -92,8 +92,18 @@ async fn test_peer_info() { // This is just a mock of what we should be able to do with GQL API. let client = producer.node.bound_address; let client = FuelClient::from(client); - let peers = client.connected_peers_info().await.unwrap(); - assert_eq!(peers.len(), 2); + let mut peers; + + // It takes some time before all validators are connected. + loop { + peers = client.connected_peers_info().await.unwrap(); + + if peers.len() == 2 { + break; + } + tokio::time::sleep(Duration::from_secs(1)).await; + } + let info = peers .iter() .find(|info| info.id.to_string() == validator_peer_id.to_base58())