From 298f1b1153ff616c41d9ff76135a240c3339ff2c Mon Sep 17 00:00:00 2001 From: Alon Lukatch Date: Wed, 25 Dec 2024 15:09:18 +0200 Subject: [PATCH] refactor(papyrus_storage): fix CR comments --- crates/papyrus_storage/src/body/events.rs | 17 +++++++++-------- crates/papyrus_storage/src/lib.rs | 4 ++-- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/crates/papyrus_storage/src/body/events.rs b/crates/papyrus_storage/src/body/events.rs index a9a9ee4edd..df9292ad22 100644 --- a/crates/papyrus_storage/src/body/events.rs +++ b/crates/papyrus_storage/src/body/events.rs @@ -381,16 +381,17 @@ type AddressToTransactionIndexTableCursor<'txn> = DbCursor< type TransactionMetadataTableCursor<'txn> = DbCursor<'txn, RO, TransactionIndex, VersionZeroWrapper, SimpleTable>; -/// interface for updating the events in the storage. +/// Interface for updating the events in the storage. pub trait EventStorageWriter where Self: Sized, { /// Appends the events of an entire block to the storage. + // To enforce that no commit happen after a failure, we consume and return Self on success. fn append_events( self, block_number: BlockNumber, - block_events: Vec>, + block_events: &[&[Event]], ) -> StorageResult; } @@ -398,7 +399,7 @@ impl EventStorageWriter for StorageTxn<'_, RW> { fn append_events( self, block_number: BlockNumber, - block_events: Vec>, + block_events: &[&[Event]], ) -> StorageResult { let markers_table = self.open_table(&self.tables.markers)?; update_marker(&self.txn, &markers_table, block_number)?; @@ -408,22 +409,22 @@ impl EventStorageWriter for StorageTxn<'_, RW> { let address_to_transaction_index = self.open_table(&self.tables.address_to_transaction_index)?; - for (index, transaction_events) in block_events.iter().enumerate() { + for (index, &transaction_events) in block_events.iter().enumerate() { let transaction_index = TransactionIndex(block_number, TransactionOffsetInBlock(index)); - let event_offset = self.file_handlers.append_events(&transaction_events.clone()); + let event_offset = self.file_handlers.append_events(transaction_events); events_table.append(&self.txn, &transaction_index, &event_offset)?; - for even in transaction_events { + for event in transaction_events { address_to_transaction_index.insert( &self.txn, - &(even.from_address, transaction_index), + &(event.from_address, transaction_index), &NoValue, )?; } if index == block_events.len() - 1 { file_offset_table.upsert( &self.txn, - &OffsetKind::Events, + &OffsetKind::Event, &event_offset.next_offset(), )?; } diff --git a/crates/papyrus_storage/src/lib.rs b/crates/papyrus_storage/src/lib.rs index a12c94bca1..62e4fe3736 100644 --- a/crates/papyrus_storage/src/lib.rs +++ b/crates/papyrus_storage/src/lib.rs @@ -721,8 +721,8 @@ impl FileHandlers { } // Appends an event to the corresponding file and returns its location. - fn append_events(&self, events: &Vec) -> LocationInFile { - self.clone().event.append(events) + fn append_events(&self, events: &[Event]) -> LocationInFile { + self.clone().event.append(&events.to_vec()) } // TODO(dan): Consider 1. flushing only the relevant files, 2. flushing concurrently.