Skip to content

Commit

Permalink
Deprecate old epoch flags and delete dead code (#18294)
Browse files Browse the repository at this point in the history
All of these flags have been released for months, so the old code should
not be needed anywhere at this point.
  • Loading branch information
mystenmark authored Jun 18, 2024
1 parent ac30285 commit 62b5953
Show file tree
Hide file tree
Showing 5 changed files with 29 additions and 215 deletions.
18 changes: 0 additions & 18 deletions crates/sui-core/src/authority/authority_per_epoch_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -774,13 +774,6 @@ impl AuthorityPerEpochStore {
info!("authenticator_state disabled");
}

let is_validator = committee.authority_index(&name).is_some();
if is_validator {
assert!(epoch_start_configuration
.flags()
.contains(&EpochFlag::InMemoryCheckpointRoots));
}

let mut jwk_aggregator = JwkAggregator::new(committee.clone());

for ((authority, id, jwk), _) in tables.pending_jwks.unbounded_iter().seek_to_first() {
Expand Down Expand Up @@ -1426,17 +1419,6 @@ impl AuthorityPerEpochStore {
.collect())
}

pub fn per_epoch_finalized_txns_enabled(&self) -> bool {
self.epoch_start_configuration
.flags()
.contains(&EpochFlag::PerEpochFinalizedTransactions)
}

pub fn object_lock_split_tables_enabled(&self) -> bool {
self.epoch_start_configuration
.object_lock_split_tables_enabled()
}

// For each id in objects_to_init, return the next version for that id as recorded in the
// next_shared_object_versions table.
//
Expand Down
171 changes: 9 additions & 162 deletions crates/sui-core/src/authority/authority_store.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
// Copyright (c) Mysten Labs, Inc.
// SPDX-License-Identifier: Apache-2.0

use std::cmp::Ordering;
use std::ops::Not;
use std::sync::Arc;
use std::{iter, mem, thread};
Expand Down Expand Up @@ -990,109 +989,7 @@ impl AuthorityStore {
Ok(())
}

pub(crate) async fn acquire_transaction_locks(
&self,
epoch_store: &AuthorityPerEpochStore,
owned_input_objects: &[ObjectRef],
transaction: VerifiedSignedTransaction,
) -> SuiResult {
let tx_digest = *transaction.digest();
if epoch_store.object_lock_split_tables_enabled() {
self.acquire_transaction_locks_v2(epoch_store, owned_input_objects, transaction)
.await
} else {
self.acquire_transaction_locks_v1(epoch_store, owned_input_objects, tx_digest)
.await
}
}

/// Acquires a lock for a transaction on the given objects if they have all been initialized previously
async fn acquire_transaction_locks_v1(
&self,
epoch_store: &AuthorityPerEpochStore,
owned_input_objects: &[ObjectRef],
tx_digest: TransactionDigest,
) -> SuiResult {
let epoch = epoch_store.epoch();
// Other writers may be attempting to acquire locks on the same objects, so a mutex is
// required.
// TODO: replace with optimistic db_transactions (i.e. set lock to tx if none)
let _mutexes = self.acquire_locks(owned_input_objects).await;

trace!(?owned_input_objects, "acquire_locks");
let mut locks_to_write = Vec::new();

let locks = self
.perpetual_tables
.live_owned_object_markers
.multi_get(owned_input_objects)?;

for ((i, lock), obj_ref) in locks.into_iter().enumerate().zip(owned_input_objects) {
// The object / version must exist, and therefore lock initialized.
if lock.is_none() {
let latest_lock = self.get_latest_live_version_for_object_id(obj_ref.0)?;
fp_bail!(UserInputError::ObjectVersionUnavailableForConsumption {
provided_obj_ref: *obj_ref,
current_version: latest_lock.1
}
.into());
}
// Safe to unwrap as it is checked above
let lock = lock.unwrap().map(|l| l.migrate().into_inner());

if let Some(LockDetailsDeprecated {
epoch: previous_epoch,
tx_digest: previous_tx_digest,
}) = &lock
{
fp_ensure!(
&epoch >= previous_epoch,
SuiError::ObjectLockedAtFutureEpoch {
obj_refs: owned_input_objects.to_vec(),
locked_epoch: *previous_epoch,
new_epoch: epoch,
locked_by_tx: *previous_tx_digest,
}
);
// Lock already set to different transaction from the same epoch.
// If the lock is set in a previous epoch, it's ok to override it.
if previous_epoch == &epoch && previous_tx_digest != &tx_digest {
// TODO: add metrics here
info!(prev_tx_digest = ?previous_tx_digest,
cur_tx_digest = ?tx_digest,
"Cannot acquire lock: conflicting transaction!");
return Err(SuiError::ObjectLockConflict {
obj_ref: *obj_ref,
pending_transaction: *previous_tx_digest,
});
}
if &epoch == previous_epoch {
// Exactly the same epoch and same transaction, nothing to lock here.
continue;
} else {
info!(prev_epoch =? previous_epoch, cur_epoch =? epoch, "Overriding an old lock from previous epoch");
// Fall through and override the old lock.
}
}
let obj_ref = owned_input_objects[i];
let lock_details = LockDetailsDeprecated { epoch, tx_digest };
locks_to_write.push((obj_ref, Some(lock_details.into())));
}

if !locks_to_write.is_empty() {
trace!(?locks_to_write, "Writing locks");
let mut batch = self.perpetual_tables.live_owned_object_markers.batch();
batch.insert_batch(
&self.perpetual_tables.live_owned_object_markers,
locks_to_write,
)?;
batch.write()?;
}

Ok(())
}

async fn acquire_transaction_locks_v2(
pub async fn acquire_transaction_locks(
&self,
epoch_store: &AuthorityPerEpochStore,
owned_input_objects: &[ObjectRef],
Expand Down Expand Up @@ -1182,18 +1079,6 @@ impl AuthorityStore {
&self,
obj_ref: ObjectRef,
epoch_store: &AuthorityPerEpochStore,
) -> SuiLockResult {
if epoch_store.object_lock_split_tables_enabled() {
self.get_lock_v2(obj_ref, epoch_store)
} else {
self.get_lock_v1(obj_ref, epoch_store.epoch())
}
}

fn get_lock_v2(
&self,
obj_ref: ObjectRef,
epoch_store: &AuthorityPerEpochStore,
) -> SuiLockResult {
if self
.perpetual_tables
Expand Down Expand Up @@ -1221,43 +1106,6 @@ impl AuthorityStore {
}
}

fn get_lock_v1(&self, obj_ref: ObjectRef, epoch_id: EpochId) -> SuiLockResult {
Ok(
if let Some(lock_info) = self
.perpetual_tables
.live_owned_object_markers
.get(&obj_ref)?
{
match lock_info {
Some(lock_info) => {
let lock_info = lock_info.migrate().into_inner();
match Ord::cmp(&lock_info.epoch, &epoch_id) {
// If the object was locked in a previous epoch, we can say that it's
// no longer locked and is considered as just Initialized.
Ordering::Less => ObjectLockStatus::Initialized,
Ordering::Equal => ObjectLockStatus::LockedToTx {
locked_by_tx: lock_info,
},
Ordering::Greater => {
return Err(SuiError::ObjectLockedAtFutureEpoch {
obj_refs: vec![obj_ref],
locked_epoch: lock_info.epoch,
new_epoch: epoch_id,
locked_by_tx: lock_info.tx_digest,
});
}
}
}
None => ObjectLockStatus::Initialized,
}
} else {
ObjectLockStatus::LockedAtDifferentVersion {
locked_ref: self.get_latest_live_version_for_object_id(obj_ref.0)?,
}
},
)
}

/// Returns UserInputError::ObjectNotFound if no lock records found for this object.
pub(crate) fn get_latest_live_version_for_object_id(
&self,
Expand Down Expand Up @@ -1333,27 +1181,26 @@ impl AuthorityStore {
) -> SuiResult {
trace!(?objects, "initialize_locks");

let locks = live_object_marker_table.multi_get(objects)?;
let live_object_markers = live_object_marker_table.multi_get(objects)?;

if !is_force_reset {
// If any locks exist and are not None, return errors for them
// Note that if epoch_store.object_lock_split_tables_enabled() is true, we don't
// check if there is a pre-existing lock. this is because initializing the live
// If any live_object_markers exist and are not None, return errors for them
// Note we don't check if there is a pre-existing lock. this is because initializing the live
// object marker will not overwrite the lock and cause the validator to equivocate.
let existing_locks: Vec<ObjectRef> = locks
let existing_live_object_markers: Vec<ObjectRef> = live_object_markers
.iter()
.zip(objects)
.filter_map(|(lock_opt, objref)| {
lock_opt.clone().flatten().map(|_tx_digest| *objref)
})
.collect();
if !existing_locks.is_empty() {
if !existing_live_object_markers.is_empty() {
info!(
?existing_locks,
"Cannot initialize locks because some exist already"
?existing_live_object_markers,
"Cannot initialize live_object_markers because some exist already"
);
return Err(SuiError::ObjectLockAlreadyInitialized {
refs: existing_locks,
refs: existing_live_object_markers,
});
}
}
Expand Down
32 changes: 17 additions & 15 deletions crates/sui-core/src/authority/epoch_start_configuration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,13 @@ pub trait EpochStartConfigTrait {

#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub enum EpochFlag {
InMemoryCheckpointRoots,
PerEpochFinalizedTransactions,
ObjectLockSplitTables,
// The deprecated flags have all been in production for long enough that
// we can have deleted the old code paths they were guarding.
// We retain them here in order not to break deserialization.
_InMemoryCheckpointRootsDeprecated,
_PerEpochFinalizedTransactionsDeprecated,
_ObjectLockSplitTablesDeprecated,

WritebackCacheEnabled,
StateAccumulatorV2Enabled,
}
Expand All @@ -64,11 +68,7 @@ impl EpochFlag {
cache_config: &ExecutionCacheConfig,
enable_state_accumulator_v2: bool,
) -> Vec<Self> {
let mut new_flags = vec![
EpochFlag::InMemoryCheckpointRoots,
EpochFlag::PerEpochFinalizedTransactions,
EpochFlag::ObjectLockSplitTables,
];
let mut new_flags = vec![];

if matches!(
choose_execution_cache(cache_config),
Expand All @@ -89,9 +89,15 @@ impl fmt::Display for EpochFlag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Important - implementation should return low cardinality values because this is used as metric key
match self {
EpochFlag::InMemoryCheckpointRoots => write!(f, "InMemoryCheckpointRoots"),
EpochFlag::PerEpochFinalizedTransactions => write!(f, "PerEpochFinalizedTransactions"),
EpochFlag::ObjectLockSplitTables => write!(f, "ObjectLockSplitTables"),
EpochFlag::_InMemoryCheckpointRootsDeprecated => {
write!(f, "InMemoryCheckpointRoots (DEPRECATED)")
}
EpochFlag::_PerEpochFinalizedTransactionsDeprecated => {
write!(f, "PerEpochFinalizedTransactions (DEPRECATED)")
}
EpochFlag::_ObjectLockSplitTablesDeprecated => {
write!(f, "ObjectLockSplitTables (DEPRECATED)")
}
EpochFlag::WritebackCacheEnabled => write!(f, "WritebackCacheEnabled"),
EpochFlag::StateAccumulatorV2Enabled => write!(f, "StateAccumulatorV2Enabled"),
}
Expand Down Expand Up @@ -169,10 +175,6 @@ impl EpochStartConfiguration {
pub fn epoch_start_timestamp_ms(&self) -> CheckpointTimestamp {
self.epoch_start_state().epoch_start_timestamp_ms()
}

pub fn object_lock_split_tables_enabled(&self) -> bool {
self.flags().contains(&EpochFlag::ObjectLockSplitTables)
}
}

#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
Expand Down
5 changes: 2 additions & 3 deletions crates/sui-core/src/checkpoints/checkpoint_executor/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1311,9 +1311,8 @@ async fn finalize_checkpoint(
data_ingestion_dir: Option<PathBuf>,
) -> SuiResult<Accumulator> {
debug!("finalizing checkpoint");
if epoch_store.per_epoch_finalized_txns_enabled() {
epoch_store.insert_finalized_transactions(tx_digests, checkpoint.sequence_number)?;
}
epoch_store.insert_finalized_transactions(tx_digests, checkpoint.sequence_number)?;

// TODO remove once we no longer need to support this table for read RPC
state
.get_checkpoint_cache()
Expand Down
18 changes: 1 addition & 17 deletions crates/sui-e2e-tests/tests/reconfiguration_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,9 @@ use rand::rngs::OsRng;
use std::collections::{BTreeSet, HashSet};
use std::sync::Arc;
use std::time::Duration;
use sui_core::authority::epoch_start_configuration::EpochFlag;
use sui_core::consensus_adapter::position_submit_certificate;
use sui_json_rpc_types::SuiTransactionBlockEffectsAPI;
use sui_macros::{register_fail_point_arg, sim_test};
use sui_macros::sim_test;
use sui_node::SuiNodeHandle;
use sui_protocol_config::ProtocolConfig;
use sui_swarm_config::genesis_config::{ValidatorGenesisConfig, ValidatorGenesisConfigBuilder};
Expand Down Expand Up @@ -301,21 +300,6 @@ async fn do_test_passive_reconfig() {
// Test for syncing a node to an authority that already has many txes.
#[sim_test]
async fn test_expired_locks() {
do_test_lock_table_upgrade().await
}

#[sim_test]
async fn test_expired_locks_with_lock_table_upgrade() {
register_fail_point_arg("initial_epoch_flags", || {
Some(vec![
EpochFlag::InMemoryCheckpointRoots,
EpochFlag::PerEpochFinalizedTransactions,
])
});
do_test_lock_table_upgrade().await
}

async fn do_test_lock_table_upgrade() {
let test_cluster = TestClusterBuilder::new()
.with_epoch_duration_ms(10000)
.build()
Expand Down

0 comments on commit 62b5953

Please sign in to comment.