From 972ae0e18ee8fe2af979c637dde9050e01decee3 Mon Sep 17 00:00:00 2001 From: siddharthteli12 <111498619+siddharthteli12@users.noreply.github.com> Date: Fri, 15 Sep 2023 23:54:10 +0530 Subject: [PATCH] Clippy & cleanup (#421) --- pallets/automation-price/src/lib.rs | 67 +++--- pallets/automation-time/rpc/src/lib.rs | 10 +- .../automation-time/src/autocompounding.rs | 4 +- pallets/automation-time/src/fees.rs | 12 +- pallets/automation-time/src/lib.rs | 89 +++---- pallets/automation-time/src/mock.rs | 29 +-- pallets/automation-time/src/tests.rs | 223 ++++++++---------- pallets/automation-time/src/types.rs | 6 +- pallets/valve/src/lib.rs | 2 +- pallets/valve/src/mock.rs | 7 +- pallets/vesting/src/lib.rs | 2 +- pallets/vesting/src/mock.rs | 7 +- pallets/vesting/src/tests.rs | 13 +- pallets/xcmp-handler/rpc/src/lib.rs | 2 +- pallets/xcmp-handler/src/lib.rs | 16 +- pallets/xcmp-handler/src/mock.rs | 11 +- pallets/xcmp-handler/src/tests.rs | 25 +- runtime/neumann/src/lib.rs | 4 +- runtime/neumann/src/xcm_config.rs | 2 +- runtime/oak/src/lib.rs | 4 +- runtime/oak/src/xcm_config.rs | 2 +- runtime/turing/src/lib.rs | 4 +- runtime/turing/src/xcm_config.rs | 2 +- 23 files changed, 237 insertions(+), 306 deletions(-) diff --git a/pallets/automation-price/src/lib.rs b/pallets/automation-price/src/lib.rs index a7968ef38..909389b7a 100644 --- a/pallets/automation-price/src/lib.rs +++ b/pallets/automation-price/src/lib.rs @@ -304,7 +304,7 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(_: T::BlockNumber) -> Weight { - if Self::is_shutdown() == true { + if Self::is_shutdown() { return T::DbWeight::get().reads(1u64) } @@ -344,8 +344,7 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; let fee = >::saturated_from(1_000_000_000_000u64); - T::FeeHandler::can_pay_fee(&who, fee.clone()) - .map_err(|_| Error::::InsufficientBalance)?; + T::FeeHandler::can_pay_fee(&who, fee).map_err(|_| Error::::InsufficientBalance)?; Self::validate_and_schedule_task( who.clone(), provided_id, @@ -355,9 +354,9 @@ pub mod pallet { recipient, amount, )?; - T::FeeHandler::withdraw_fee(&who, fee.clone()) + T::FeeHandler::withdraw_fee(&who, fee) .map_err(|_| Error::::LiquidityRestrictions)?; - Ok(().into()) + Ok(()) } /// Initialize an asset @@ -419,7 +418,7 @@ pub mod pallet { 0, )?; } - Ok(().into()) + Ok(()) } /// Post asset update @@ -447,8 +446,7 @@ pub mod pallet { } } let fee = >::saturated_from(1_000_000_000_000u64); - T::FeeHandler::can_pay_fee(&who.clone(), fee.clone()) - .map_err(|_| Error::::InsufficientBalance)?; + T::FeeHandler::can_pay_fee(&who, fee).map_err(|_| Error::::InsufficientBalance)?; if let Some(asset_target_price) = Self::get_asset_baseline_price(asset.clone()) { let last_asset_price: AssetPrice = match Self::get_asset_price(asset.clone()) { None => Err(Error::::AssetNotSupported)?, @@ -483,13 +481,13 @@ pub mod pallet { )?; } AssetPrices::::insert(asset.clone(), value); - T::FeeHandler::withdraw_fee(&who, fee.clone()) + T::FeeHandler::withdraw_fee(&who, fee) .map_err(|_| Error::::LiquidityRestrictions)?; Self::deposit_event(Event::AssetUpdated { asset }); } else { Err(Error::::AssetNotSupported)? } - Ok(().into()) + Ok(()) } /// Delete an asset @@ -518,7 +516,7 @@ pub mod pallet { } else { Err(Error::::AssetNotSupported)? } - Ok(().into()) + Ok(()) } } @@ -562,7 +560,7 @@ pub mod pallet { }; } ScheduledAssetDeletion::::remove(current_time_slot); - weight_left = weight_left - asset_reset_weight; + weight_left -= asset_reset_weight; } // run as many scheduled tasks as we can @@ -571,7 +569,7 @@ pub mod pallet { .saturating_sub(T::DbWeight::get().reads(1u64)) // For measuring the TaskQueue::::put(tasks_left); .saturating_sub(T::DbWeight::get().writes(1u64)); - if task_queue.len() > 0 { + if !task_queue.is_empty() { let (tasks_left, new_weight_left) = Self::run_tasks(task_queue, weight_left); weight_left = new_weight_left; TaskQueue::::put(tasks_left); @@ -588,13 +586,13 @@ pub mod pallet { if let Some(mut future_scheduled_deletion_assets) = Self::get_scheduled_asset_period_reset(new_time_slot) { - future_scheduled_deletion_assets.push(asset.clone()); + future_scheduled_deletion_assets.push(asset); >::insert( new_time_slot, future_scheduled_deletion_assets, ); } else { - let new_asset_list = vec![asset.clone()]; + let new_asset_list = vec![asset]; >::insert(new_time_slot, new_asset_list); } }; @@ -614,7 +612,7 @@ pub mod pallet { upper_bound, lower_bound, expiration_period, - asset_sudo: asset_owner.clone(), + asset_sudo: asset_owner, }; AssetMetadata::::insert(asset.clone(), asset_metadatum); let new_time_slot = Self::get_current_time_slot()?.saturating_add(expiration_period); @@ -694,7 +692,7 @@ pub mod pallet { consumed_task_index.saturating_inc(); let action_weight = match Self::get_task(task_id) { None => { - Self::deposit_event(Event::TaskNotFound { task_id: task_id.1.clone() }); + Self::deposit_event(Event::TaskNotFound { task_id: task_id.1 }); ::WeightInfo::emit_event() }, Some(task) => { @@ -725,15 +723,14 @@ pub mod pallet { } if consumed_task_index == task_ids.len() { - return (vec![], weight_left) + (vec![], weight_left) } else { - return (task_ids.split_off(consumed_task_index), weight_left) + (task_ids.split_off(consumed_task_index), weight_left) } } pub fn generate_task_id(owner_id: AccountOf, provided_id: Vec) -> T::Hash { - let task_hash_input = - TaskHashInput:: { owner_id: owner_id.clone(), provided_id: provided_id.clone() }; + let task_hash_input = TaskHashInput:: { owner_id, provided_id }; T::Hashing::hash_of(&task_hash_input) } @@ -747,22 +744,20 @@ pub mod pallet { direction: AssetDirection, trigger_percentage: AssetPercentage, ) -> Result> { - let task_id = Self::generate_task_id(owner_id.clone(), provided_id.clone()); - if let Some(_) = Self::get_task((asset.clone(), task_id.clone())) { + let task_id = Self::generate_task_id(owner_id, provided_id); + if let Some(_) = Self::get_task((asset.clone(), task_id)) { Err(Error::::DuplicateTask)? } - if let Some(mut asset_tasks) = Self::get_scheduled_tasks(( - asset.clone(), - direction.clone(), - trigger_percentage.clone(), - )) { - if let Err(_) = asset_tasks.try_push(task_id.clone()) { + if let Some(mut asset_tasks) = + Self::get_scheduled_tasks((asset.clone(), direction.clone(), trigger_percentage)) + { + if asset_tasks.try_push(task_id).is_err() { Err(Error::::MaxTasksReached)? } >::insert((asset, direction, trigger_percentage), asset_tasks); } else { let scheduled_tasks: BoundedVec = - vec![task_id.clone()].try_into().unwrap(); + vec![task_id].try_into().unwrap(); >::insert( (asset, direction, trigger_percentage), scheduled_tasks, @@ -782,7 +777,7 @@ pub mod pallet { recipient: T::AccountId, amount: BalanceOf, ) -> Result<(), Error> { - if provided_id.len() == 0 { + if provided_id.is_empty() { Err(Error::::EmptyProvidedId)? } let asset_target_price: AssetPrice = match Self::get_asset_baseline_price(asset.clone()) @@ -794,7 +789,7 @@ pub mod pallet { None => Err(Error::::AssetNotSupported)?, Some(asset_price) => asset_price, }; - match direction.clone() { + match direction { Direction::Down => if last_asset_price < asset_target_price { let last_asset_percentage = @@ -848,11 +843,9 @@ pub mod pallet { }; for percentage in lower..adjusted_higher { // TODO: pull all and cycle through in memory - if let Some(asset_tasks) = Self::get_scheduled_tasks(( - asset.clone(), - direction.clone(), - percentage.clone(), - )) { + if let Some(asset_tasks) = + Self::get_scheduled_tasks((asset.clone(), direction.clone(), percentage)) + { for task in asset_tasks { existing_task_queue.push((asset.clone(), task)); } diff --git a/pallets/automation-time/rpc/src/lib.rs b/pallets/automation-time/rpc/src/lib.rs index 6c9a752c8..d121b4768 100644 --- a/pallets/automation-time/rpc/src/lib.rs +++ b/pallets/automation-time/rpc/src/lib.rs @@ -117,7 +117,7 @@ where let uxt: Block::Extrinsic = Decode::decode(&mut &*encoded_xt).map_err(|e| { CallError::Custom(ErrorObject::owned( Error::RuntimeError.into(), - format!("Unable to decode extrinsic."), + "Unable to decode extrinsic.".to_string(), Some(format!("{:?}", e)), )) })?; @@ -126,7 +126,7 @@ where .map_err(|e| { CallError::Custom(ErrorObject::owned( Error::RuntimeError.into(), - format!("Unable to query fee details."), + "Unable to query fee details.".to_string(), Some(format!("{:?}", e)), )) })? @@ -134,7 +134,7 @@ where JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( Error::RuntimeError.into(), "Unable to get fees.", - Some(String::from_utf8(e).unwrap_or(String::default())), + Some(String::from_utf8(e).unwrap_or_default()), ))) })?; @@ -176,7 +176,7 @@ where JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( Error::RuntimeError.into(), "RPC value doesn't fit in u64 representation", - Some(format!("RPC value cannot be translated into u64 representation")), + Some("RPC value cannot be translated into u64 representation".to_string()), ))) }) } @@ -198,7 +198,7 @@ where }; runtime_api_result .map_err(|e| mapped_err(format!("{:?}", e))) - .map(|r| r.map_err(|e| mapped_err(String::from_utf8(e).unwrap_or(String::default()))))? + .map(|r| r.map_err(|e| mapped_err(String::from_utf8(e).unwrap_or_default())))? } fn get_auto_compound_delegated_stake_task_ids( diff --git a/pallets/automation-time/src/autocompounding.rs b/pallets/automation-time/src/autocompounding.rs index 3879b59a9..df802ad65 100644 --- a/pallets/automation-time/src/autocompounding.rs +++ b/pallets/automation-time/src/autocompounding.rs @@ -65,11 +65,11 @@ pub fn do_calculate_optimal_autostaking( if total_earnings > best_earnings { best_earnings = total_earnings; best_period = period; - best_apy = (total_earnings as f64 / principal as f64) * (365 as f64 / duration as f64); + best_apy = (total_earnings as f64 / principal as f64) * (365_f64 / duration as f64); } } - return (best_period, best_apy) + (best_period, best_apy) } #[cfg(test)] diff --git a/pallets/automation-time/src/fees.rs b/pallets/automation-time/src/fees.rs index 0941b309f..7c1ed26b6 100644 --- a/pallets/automation-time/src/fees.rs +++ b/pallets/automation-time/src/fees.rs @@ -132,7 +132,7 @@ where action: &ActionOf, executions: u32, ) -> Result { - let schedule_fee_location = action.schedule_fee_location::().into(); + let schedule_fee_location = action.schedule_fee_location::(); let schedule_fee_amount: u128 = Pallet::::calculate_schedule_fee_amount(action, executions)?.saturated_into(); @@ -173,7 +173,7 @@ mod tests { fn pay_checked_fees_for_success() { new_test_ext(0).execute_with(|| { let alice = AccountId32::new(ALICE); - fund_account(&alice.clone(), 900_000_000, 1, Some(0)); + fund_account(&alice, 900_000_000, 1, Some(0)); let starting_funds = Balances::free_balance(alice.clone()); let call: ::RuntimeCall = @@ -181,7 +181,7 @@ mod tests { let mut spy = 0; let result = ::FeeHandler::pay_checked_fees_for( &alice, - &Action::DynamicDispatch { encoded_call: call.clone().encode() }, + &Action::DynamicDispatch { encoded_call: call.encode() }, 1, || { spy += 1; @@ -202,7 +202,7 @@ mod tests { frame_system::Call::remark_with_event { remark: vec![50] }.into(); let result = ::FeeHandler::pay_checked_fees_for( &alice, - &Action::DynamicDispatch { encoded_call: call.clone().encode() }, + &Action::DynamicDispatch { encoded_call: call.encode() }, 1, || Ok(()), ); @@ -214,7 +214,7 @@ mod tests { fn does_not_charge_fees_when_prereq_errors() { new_test_ext(0).execute_with(|| { let alice = AccountId32::new(ALICE); - fund_account(&alice.clone(), 900_000_000, 1, Some(0)); + fund_account(&alice, 900_000_000, 1, Some(0)); let starting_funds = Balances::free_balance(alice.clone()); let call: ::RuntimeCall = @@ -222,7 +222,7 @@ mod tests { let result = ::FeeHandler::pay_checked_fees_for::<(), _>( &alice, - &Action::DynamicDispatch { encoded_call: call.clone().encode() }, + &Action::DynamicDispatch { encoded_call: call.encode() }, 1, || Err("error".into()), ); diff --git a/pallets/automation-time/src/lib.rs b/pallets/automation-time/src/lib.rs index bef499b98..7277a81ca 100644 --- a/pallets/automation-time/src/lib.rs +++ b/pallets/automation-time/src/lib.rs @@ -334,7 +334,7 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(_block: T::BlockNumber) -> Weight { - if Self::is_shutdown() == true { + if Self::is_shutdown() { return T::DbWeight::get().reads(1u64) } @@ -402,7 +402,7 @@ pub mod pallet { let schedule = schedule.validated_into::()?; Self::validate_and_schedule_task(action, who, schedule, vec![])?; - Ok(().into()) + Ok(()) } /// Schedule a task through XCMP through proxy account to fire an XCMP message with a provided call. @@ -465,7 +465,7 @@ pub mod pallet { let schedule = schedule.validated_into::()?; Self::validate_and_schedule_task(action, who, schedule, vec![])?; - Ok(().into()) + Ok(()) } /// Schedule a task to increase delegation to a specified up to a minimum balance @@ -509,7 +509,7 @@ pub mod pallet { .collect(); Self::validate_and_schedule_task(action, who, schedule, errors)?; - Ok(().into()) + Ok(()) } /// Schedule a task that will dispatch a call. @@ -539,7 +539,7 @@ pub mod pallet { let schedule = schedule.validated_into::()?; Self::validate_and_schedule_task(action, who, schedule, vec![])?; - Ok(().into()) + Ok(()) } /// Cancel a task. @@ -560,7 +560,7 @@ pub mod pallet { .ok_or(Error::::TaskDoesNotExist) .map(|task| Self::remove_task(task_id.clone(), task))?; - Ok(().into()) + Ok(()) } /// Sudo can force cancel a task. @@ -584,7 +584,7 @@ pub mod pallet { .ok_or(Error::::TaskDoesNotExist) .map(|task| Self::remove_task(task_id.clone(), task))?; - Ok(().into()) + Ok(()) } } @@ -675,7 +675,7 @@ pub mod pallet { // run as many scheduled tasks as we can let task_queue = Self::get_task_queue(); weight_left = weight_left.saturating_sub(T::DbWeight::get().reads(1u64)); - if task_queue.len() > 0 { + if !task_queue.is_empty() { let (tasks_left, new_weight_left) = Self::run_tasks(task_queue, weight_left); TaskQueueV2::::put(tasks_left); weight_left = new_weight_left.saturating_sub(T::DbWeight::get().writes(1u64)); @@ -688,7 +688,7 @@ pub mod pallet { if weight_left.ref_time() >= run_missed_task_weight.ref_time() { let missed_queue = Self::get_missed_queue(); weight_left = weight_left.saturating_sub(T::DbWeight::get().reads(1u64)); - if missed_queue.len() > 0 { + if !missed_queue.is_empty() { let (tasks_left, new_weight_left) = Self::run_missed_tasks(missed_queue, weight_left); @@ -856,7 +856,7 @@ pub mod pallet { tasks.push(new_missed_task); } } - return tasks + tasks } /// Runs as many tasks as the weight allows from the provided vec of task_ids. @@ -973,9 +973,9 @@ pub mod pallet { } if consumed_task_index == account_task_ids.len() { - return (vec![], weight_left) + (vec![], weight_left) } else { - return (account_task_ids.split_off(consumed_task_index), weight_left) + (account_task_ids.split_off(consumed_task_index), weight_left) } } @@ -1022,9 +1022,9 @@ pub mod pallet { } if consumed_task_index == missed_tasks.len() { - return (vec![], weight_left) + (vec![], weight_left) } else { - return (missed_tasks.split_off(consumed_task_index), weight_left) + (missed_tasks.split_off(consumed_task_index), weight_left) } } @@ -1069,10 +1069,10 @@ pub mod pallet { task: &TaskOf, ) -> (Weight, Option) { let fee_amount = Self::calculate_schedule_fee_amount(&task.action, 1); - if fee_amount.is_err() { + if let Err(error) = fee_amount { return ( ::WeightInfo::run_auto_compound_delegated_stake_task(), - Some(fee_amount.unwrap_err()), + Some(error), ) } let fee_amount = fee_amount.unwrap(); @@ -1089,11 +1089,10 @@ pub mod pallet { ::WeightInfo::run_auto_compound_delegated_stake_task(), None, ), - Err(e) => - return ( - ::WeightInfo::run_auto_compound_delegated_stake_task(), - Some(e), - ), + Err(e) => ( + ::WeightInfo::run_auto_compound_delegated_stake_task(), + Some(e), + ), } }, None => { @@ -1114,7 +1113,7 @@ pub mod pallet { match ::Call::decode(&mut &*encoded_call) { Ok(scheduled_call) => { let mut dispatch_origin: T::RuntimeOrigin = - frame_system::RawOrigin::Signed(caller.clone()).into(); + frame_system::RawOrigin::Signed(caller).into(); dispatch_origin.add_filter( |call: &::RuntimeCall| { T::ScheduleAllowList::contains(call) @@ -1151,11 +1150,11 @@ pub mod pallet { match task.schedule { Schedule::Fixed { ref mut executions_left, .. } => { *executions_left = executions_left.saturating_sub(1); - if *executions_left <= 0 { + if *executions_left == 0 { AccountTasks::::remove(task.owner_id.clone(), task_id.clone()); Self::deposit_event(Event::TaskCompleted { who: task.owner_id.clone(), - task_id: task_id.clone(), + task_id, }); } else { AccountTasks::::insert(task.owner_id.clone(), task_id, task); @@ -1170,11 +1169,7 @@ pub mod pallet { let mut found_task: bool = false; let mut execution_times = task.execution_times(); Self::clean_execution_times_vector(&mut execution_times); - let current_time_slot = match Self::get_current_time_slot() { - Ok(time_slot) => time_slot, - // This will only occur for the first block in the chain. - Err(_) => 0, - }; + let current_time_slot = Self::get_current_time_slot().unwrap_or(0); if let Some((last_time_slot, _)) = Self::get_last_slot() { for execution_time in execution_times.iter().rev() { @@ -1262,10 +1257,7 @@ pub mod pallet { } AccountTasks::::remove(task.owner_id.clone(), task_id.clone()); - Self::deposit_event(Event::TaskCancelled { - who: task.owner_id, - task_id: task_id.clone(), - }); + Self::deposit_event(Event::TaskCancelled { who: task.owner_id, task_id }); } /// Schedule task and return it's task_id. @@ -1274,7 +1266,7 @@ pub mod pallet { let execution_times = task.execution_times(); - if AccountTasks::::contains_key(owner_id.clone(), task.task_id.clone()) { + if AccountTasks::::contains_key(&owner_id, task.task_id.clone()) { Err(Error::::DuplicateTask)?; } @@ -1303,9 +1295,7 @@ pub mod pallet { with_transaction(|| -> storage::TransactionOutcome> { for time in execution_times.iter() { let mut scheduled_tasks = Self::get_scheduled_tasks(*time).unwrap_or_default(); - if let Err(_) = - scheduled_tasks.try_push::>(task_id.clone(), task) - { + if scheduled_tasks.try_push::>(task_id.clone(), task).is_err() { return Rollback(Err(DispatchError::Other("time slot full"))) } >::insert(*time, scheduled_tasks); @@ -1330,14 +1320,13 @@ pub mod pallet { .map_err(|()| Error::::BadVersion)?; let asset_location = asset_location .reanchored( - &MultiLocation::new(1, X1(Parachain(T::SelfParaId::get().into()))) - .into(), + &MultiLocation::new(1, X1(Parachain(T::SelfParaId::get().into()))), T::UniversalLocation::get(), ) .map_err(|_| Error::::CannotReanchor)?; // Only native token are supported as the XCMP fee for local deductions if instruction_sequence == InstructionSequence::PayThroughSovereignAccount && - asset_location != MultiLocation::new(0, Here).into() + asset_location != MultiLocation::new(0, Here) { Err(Error::::UnsupportedFeePayment)? } @@ -1403,7 +1392,7 @@ pub mod pallet { task_id: task_id.clone(), schedule_as, }); - AccountTasks::::insert(owner_id.clone(), task_id, task.clone()); + AccountTasks::::insert(owner_id, task_id, task.clone()); }, Err(err) => { Self::deposit_event(Event::::TaskRescheduleFailed { @@ -1435,7 +1424,7 @@ pub mod pallet { })?; let owner_id = task.owner_id.clone(); - AccountTasks::::insert(owner_id.clone(), task_id, task.clone()); + AccountTasks::::insert(owner_id, task_id, task.clone()); }, Schedule::Fixed { .. } => {}, } @@ -1456,15 +1445,11 @@ pub mod pallet { pub fn generate_task_idv2() -> TaskIdV2 { let current_block_number = - match TryInto::::try_into(>::block_number()).ok() { - Some(i) => i, - None => 0, - }; + TryInto::::try_into(>::block_number()) + .ok() + .unwrap_or(0); - let tx_id = match >::extrinsic_index() { - Some(i) => i, - None => 0, - }; + let tx_id = >::extrinsic_index().unwrap_or(0); let evt_index = >::event_count(); @@ -1478,7 +1463,7 @@ pub mod pallet { pub fn get_auto_compound_delegated_stake_task_ids( account_id: AccountOf, ) -> Vec { - AccountTasks::::iter_prefix_values(account_id.clone()) + AccountTasks::::iter_prefix_values(account_id) .filter(|task| { match task.action { // We don't care about the inner content, we just want to pick out the @@ -1504,7 +1489,7 @@ pub mod pallet { let schedule_fee_location = action.schedule_fee_location::(); let schedule_fee_location = schedule_fee_location .reanchored( - &MultiLocation::new(1, X1(Parachain(T::SelfParaId::get().into()))).into(), + &MultiLocation::new(1, X1(Parachain(T::SelfParaId::get().into()))), T::UniversalLocation::get(), ) .map_err(|_| Error::::CannotReanchor)?; diff --git a/pallets/automation-time/src/mock.rs b/pallets/automation-time/src/mock.rs index 11cbe8b56..b741a1247 100644 --- a/pallets/automation-time/src/mock.rs +++ b/pallets/automation-time/src/mock.rs @@ -417,11 +417,11 @@ where _overall_weight: Weight, _flow: InstructionSequence, ) -> Result<(), sp_runtime::DispatchError> { - Ok(().into()) + Ok(()) } fn pay_xcm_fee(_: T::AccountId, _: u128) -> Result<(), sp_runtime::DispatchError> { - Ok(().into()) + Ok(()) } } @@ -543,8 +543,8 @@ pub fn new_test_ext(state_block_time: u64) -> sp_io::TestExternalities { // making sure a task is scheduled into the queue pub fn schedule_task(owner: [u8; 32], scheduled_times: Vec, message: Vec) -> TaskIdV2 { let call: RuntimeCall = frame_system::Call::remark_with_event { remark: message }.into(); - let task_id = schedule_dynamic_dispatch_task(owner, scheduled_times, call); - task_id + + schedule_dynamic_dispatch_task(owner, scheduled_times, call) } pub fn schedule_dynamic_dispatch_task( @@ -554,16 +554,12 @@ pub fn schedule_dynamic_dispatch_task( ) -> TaskIdV2 { let account_id = AccountId32::new(owner); - assert_ok!(fund_account_dynamic_dispatch( - &account_id, - scheduled_times.len(), - call.clone().encode() - )); + assert_ok!(fund_account_dynamic_dispatch(&account_id, scheduled_times.len(), call.encode())); assert_ok!(AutomationTime::schedule_dynamic_dispatch_task( - RuntimeOrigin::signed(account_id.clone()), + RuntimeOrigin::signed(account_id), ScheduleParam::Fixed { execution_times: scheduled_times }, - Box::new(call.clone()), + Box::new(call), )); last_task_id() } @@ -583,7 +579,7 @@ pub fn schedule_recurring_task( assert_ok!(fund_account_dynamic_dispatch(&account_id, 1, call.encode())); assert_ok!(AutomationTime::schedule_dynamic_dispatch_task( - RuntimeOrigin::signed(account_id.clone()), + RuntimeOrigin::signed(account_id), ScheduleParam::Recurring { next_execution_time, frequency }, Box::new(call), )); @@ -635,7 +631,7 @@ pub fn add_task_to_missed_queue( abort_errors: Vec>, ) -> TaskIdV2 { let schedule = Schedule::new_fixed_schedule::(scheduled_times.clone()).unwrap(); - let task_id = create_task(owner, task_id.clone(), schedule, action, abort_errors); + let task_id = create_task(owner, task_id, schedule, action, abort_errors); let missed_task = MissedTaskV2Of::::new(AccountId32::new(owner), task_id.clone(), scheduled_times[0]); let mut missed_queue = AutomationTime::get_missed_queue(); @@ -692,7 +688,7 @@ pub fn get_task_ids_from_events() -> Vec { } pub fn get_funds(account: AccountId) { - let double_action_weight = Weight::from_ref_time(20_000 as u64) * 2; + let double_action_weight = Weight::from_ref_time(20_000_u64) * 2; let action_fee = ExecutionWeightFee::get() * u128::from(double_action_weight.ref_time()); let max_execution_fee = action_fee * u128::from(MaxExecutionTimes::get()); @@ -700,7 +696,7 @@ pub fn get_funds(account: AccountId) { } pub fn get_minimum_funds(account: AccountId, executions: u32) { - let double_action_weight = Weight::from_ref_time(20_000 as u64) * 2; + let double_action_weight = Weight::from_ref_time(20_000_u64) * 2; let action_fee = ExecutionWeightFee::get() * u128::from(double_action_weight.ref_time()); let max_execution_fee = action_fee * u128::from(executions); Balances::set_balance(RawOrigin::Root.into(), account, max_execution_fee, 0).unwrap(); @@ -742,8 +738,7 @@ pub fn fund_account( pub fn get_fee_per_second(location: &MultiLocation) -> Option { let location = location .reanchored( - &MultiLocation::new(1, X1(Parachain(::SelfParaId::get().into()))) - .into(), + &MultiLocation::new(1, X1(Parachain(::SelfParaId::get().into()))), ::UniversalLocation::get(), ) .expect("Reanchor location failed"); diff --git a/pallets/automation-time/src/tests.rs b/pallets/automation-time/src/tests.rs index 8e06523a7..4f39b7721 100644 --- a/pallets/automation-time/src/tests.rs +++ b/pallets/automation-time/src/tests.rs @@ -38,7 +38,6 @@ use sp_runtime::{ use sp_std::collections::btree_map::BTreeMap; use xcm::latest::{prelude::*, Junction::Parachain, MultiLocation}; -use pallet_balances; use pallet_valve::Shutdown; pub const START_BLOCK_TIME: u64 = 33198768000 * 1_000; @@ -69,7 +68,7 @@ impl Default for XcmpActionParams { let delegator_account = AccountId32::new(DELEGATOR_ACCOUNT); XcmpActionParams { destination: MultiLocation::new(1, X1(Parachain(PARA_ID))), - schedule_fee: DEFAULT_SCHEDULE_FEE_LOCATION.into(), + schedule_fee: DEFAULT_SCHEDULE_FEE_LOCATION, execution_fee: AssetPayment { asset_location: MOONBASE_ASSET_LOCATION.into(), amount: 100, @@ -116,21 +115,20 @@ fn calculate_expected_xcmp_action_schedule_fee( ) -> u128 { let schedule_fee_location = schedule_fee_location .reanchored( - &MultiLocation::new(1, X1(Parachain(::SelfParaId::get().into()))) - .into(), + &MultiLocation::new(1, X1(Parachain(::SelfParaId::get().into()))), ::UniversalLocation::get(), ) .expect("Location reanchor failed"); let weight = ::WeightInfo::run_xcmp_task(); - let expected_schedule_fee_amount = if schedule_fee_location == MultiLocation::default() { + + if schedule_fee_location == MultiLocation::default() { calculate_local_action_schedule_fee(weight, num_of_execution) } else { let fee_per_second = get_fee_per_second(&schedule_fee_location).expect("Get fee per second should work"); fee_per_second * (weight.ref_time() as u128) * (num_of_execution as u128) / (WEIGHT_REF_TIME_PER_SECOND as u128) - }; - expected_schedule_fee_amount + } } // Helper function to asset event easiser @@ -156,15 +154,15 @@ fn contains_events(emitted_events: Vec, events: Vec) // Convert both lists to iterators let mut emitted_iter = emitted_events.iter(); - let mut events_iter = events.iter(); + let events_iter = events.iter(); // Iterate through the target events - while let Some(target_event) = events_iter.next() { + for target_event in events_iter { // Initialize a boolean variable to track whether the target event is found let mut found = false; // Continue iterating through the emitted events until a match is found or there are no more emitted events - while let Some(emitted_event) = emitted_iter.next() { + for emitted_event in emitted_iter.by_ref() { // Compare event type and event data for a match if emitted_event == target_event { // Target event found, mark as found and advance the emitted iterator @@ -212,9 +210,9 @@ fn schedule_invalid_time_fixed_schedule() { fn schedule_invalid_time_recurring_schedule() { new_test_ext(START_BLOCK_TIME).execute_with(|| { for (next_run, frequency) in vec![ - (SCHEDULED_TIME + 10, 10 as u64), - (SCHEDULED_TIME + 3600, 100 as u64), - (SCHEDULED_TIME + 10, 3600 as u64), + (SCHEDULED_TIME + 10, 10_u64), + (SCHEDULED_TIME + 3600, 100_u64), + (SCHEDULED_TIME + 10, 3600_u64), ] .iter() { @@ -266,7 +264,7 @@ fn schedule_past_time() { fn schedule_past_time_recurring() { new_test_ext(START_BLOCK_TIME + 1_000 * 10800).execute_with(|| { for (next_run, frequency) in - vec![(SCHEDULED_TIME - 3600, 7200 as u64), (SCHEDULED_TIME, 7200 as u64)].iter() + vec![(SCHEDULED_TIME - 3600, 7200_u64), (SCHEDULED_TIME, 7200_u64)].iter() { // prepare data let call: RuntimeCall = frame_system::Call::remark { remark: vec![12] }.into(); @@ -347,7 +345,7 @@ fn schedule_not_enough_for_fees() { fn schedule_transfer_with_dynamic_dispatch() { new_test_ext(START_BLOCK_TIME).execute_with(|| { let account_id = AccountId32::new(ALICE); - let task_id = FIRST_TASK_ID.to_vec().clone(); + let task_id = FIRST_TASK_ID.to_vec(); fund_account(&account_id, 900_000_000, 2, Some(0)); @@ -385,7 +383,7 @@ fn schedule_transfer_with_dynamic_dispatch() { }), RuntimeEvent::Balances(pallet_balances::pallet::Event::Transfer { from: account_id.clone(), - to: recipient.clone(), + to: recipient, amount: 127, }), RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { @@ -407,7 +405,7 @@ fn will_emit_task_completed_event_when_task_completed() { new_test_ext(START_BLOCK_TIME).execute_with(|| { let frequency = 3_600; let account_id = AccountId32::new(ALICE); - let _task_id = FIRST_TASK_ID.to_vec().clone(); + let _task_id = FIRST_TASK_ID.to_vec(); fund_account(&account_id, 900_000_000, 2, Some(0)); @@ -417,7 +415,7 @@ fn will_emit_task_completed_event_when_task_completed() { // Schedule a task to be executed at SCHEDULED_TIME and SCHEDULED_TIME + frequency. let next_execution_time = SCHEDULED_TIME + frequency; assert_ok!(AutomationTime::schedule_dynamic_dispatch_task( - RuntimeOrigin::signed(account_id.clone()), + RuntimeOrigin::signed(account_id), ScheduleParam::Fixed { execution_times: vec![SCHEDULED_TIME, next_execution_time] }, Box::new(call), )); @@ -574,8 +572,8 @@ fn will_emit_task_completed_event_when_task_failed() { error: >::InsufficientBalance.into(), }), RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { - who: account_id.clone(), - task_id: task_id.clone(), + who: account_id, + task_id, }), ] )) @@ -588,11 +586,8 @@ fn calculate_auto_compound_action_schedule_fee_amount_works() { let num_of_execution = generate_random_num(1, 20); let delegator = AccountId32::new(ALICE); let collator = AccountId32::new(COLLATOR_ACCOUNT); - let action = Action::AutoCompoundDelegatedStake { - delegator, - collator, - account_minimum: 100u128.into(), - }; + let action = + Action::AutoCompoundDelegatedStake { delegator, collator, account_minimum: 100u128 }; let fee_amount = AutomationTime::calculate_schedule_fee_amount(&action, num_of_execution) .expect(EXPECT_CALCULATE_SCHEDULE_FEE_AMOUNT); @@ -646,7 +641,7 @@ fn calculate_xcmp_action_schedule_fee_amount_with_different_schedule_fees_works( ASSET_FEE_PER_SECOND.into_iter().for_each(|fee| { let num_of_execution = generate_random_num(1, 20); let action = create_xcmp_action(XcmpActionParams { - schedule_fee: fee.asset_location.clone(), + schedule_fee: fee.asset_location, ..XcmpActionParams::default() }); @@ -830,7 +825,7 @@ fn schedule_xcmp_works() { get_xcmp_funds(alice.clone()); assert_ok!(AutomationTime::schedule_xcmp_task( - RuntimeOrigin::signed(alice.clone()), + RuntimeOrigin::signed(alice), ScheduleParam::Fixed { execution_times: vec![SCHEDULED_TIME] }, Box::new(destination.into()), Box::new(NATIVE_LOCATION.into()), @@ -838,7 +833,7 @@ fn schedule_xcmp_works() { asset_location: MultiLocation::new(0, Here).into(), amount: 10 }), - call.clone(), + call, Weight::from_ref_time(100_000), Weight::from_ref_time(200_000), )); @@ -848,7 +843,7 @@ fn schedule_xcmp_works() { #[test] fn schedule_xcmp_through_proxy_works() { new_test_ext(START_BLOCK_TIME).execute_with(|| { - let destination = MultiLocation::new(1, X1(Parachain(PARA_ID.into()))); + let destination = MultiLocation::new(1, X1(Parachain(PARA_ID))); let delegator_account = AccountId32::new(DELEGATOR_ACCOUNT); let proxy_account = AccountId32::new(PROXY_ACCOUNT); let call: Vec = vec![2, 4, 5]; @@ -859,13 +854,13 @@ fn schedule_xcmp_through_proxy_works() { assert_ok!(AutomationTime::schedule_xcmp_task_through_proxy( RuntimeOrigin::signed(proxy_account.clone()), ScheduleParam::Fixed { execution_times: vec![SCHEDULED_TIME] }, - Box::new(destination.clone().into()), + Box::new(destination.into()), Box::new(MultiLocation::default().into()), Box::new(AssetPayment { asset_location: destination.into(), amount: 10, }), - call.clone(), + call, Weight::from_ref_time(100_000), Weight::from_ref_time(200_000), delegator_account.clone(), @@ -875,7 +870,7 @@ fn schedule_xcmp_through_proxy_works() { assert_eq!(tasks.is_some(), true); let tasks = tasks.unwrap(); - assert_eq!(tasks.tasks[0].0, proxy_account.clone()); + assert_eq!(tasks.tasks[0].0, proxy_account); // Find the TaskScheduled event in the event list and verify if the who within it is correct. events() @@ -897,7 +892,7 @@ fn schedule_xcmp_through_proxy_same_as_delegator_account() { new_test_ext(START_BLOCK_TIME).execute_with(|| { let delegator_account = AccountId32::new(ALICE); let call: Vec = vec![2, 4, 5]; - let destination = MultiLocation::new(1, X1(Parachain(PARA_ID.into()))); + let destination = MultiLocation::new(1, X1(Parachain(PARA_ID))); // Funds including XCM fees get_xcmp_funds(delegator_account.clone()); @@ -906,13 +901,13 @@ fn schedule_xcmp_through_proxy_same_as_delegator_account() { AutomationTime::schedule_xcmp_task_through_proxy( RuntimeOrigin::signed(delegator_account.clone()), ScheduleParam::Fixed { execution_times: vec![SCHEDULED_TIME] }, - Box::new(destination.clone().into()), + Box::new(destination.into()), Box::new(MultiLocation::default().into()), Box::new(AssetPayment { asset_location: destination.into(), amount: 10 }), - call.clone(), + call, Weight::from_ref_time(100_000), Weight::from_ref_time(200_000), - delegator_account.clone(), + delegator_account, ), sp_runtime::DispatchError::Other("proxy error: expected `ProxyType::Any`"), ); @@ -931,7 +926,7 @@ fn schedule_xcmp_fails_if_not_enough_funds() { assert_noop!( AutomationTime::schedule_xcmp_task( - RuntimeOrigin::signed(alice.clone()), + RuntimeOrigin::signed(alice), ScheduleParam::Fixed { execution_times: vec![SCHEDULED_TIME] }, Box::new(destination.into()), Box::new(NATIVE_LOCATION.into()), @@ -939,7 +934,7 @@ fn schedule_xcmp_fails_if_not_enough_funds() { asset_location: MultiLocation::new(0, Here).into(), amount: 10000000000000 }), - call.clone(), + call, Weight::from_ref_time(100_000), Weight::from_ref_time(200_000), ), @@ -969,7 +964,7 @@ fn schedule_auto_compound_delegated_stake() { AutomationTime::get_account_task(account_task_id.0.clone(), account_task_id.1), TaskOf::::create_auto_compound_delegated_stake_task::( alice.clone(), - FIRST_TASK_ID.to_vec().clone(), + FIRST_TASK_ID.to_vec(), SCHEDULED_TIME, 3_600, bob, @@ -1254,10 +1249,10 @@ fn schedule_time_slot_full_rolls_back() { Error::::TimeSlotFull, ); - if let Some(_) = AutomationTime::get_scheduled_tasks(SCHEDULED_TIME) { + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME).is_some() { panic!("Tasks scheduled for the time it should have been rolled back") } - if let Some(_) = AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + 3600) { + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + 3600).is_some() { panic!("Tasks scheduled for the time it should have been rolled back") } match AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + 7200) { @@ -1266,8 +1261,8 @@ fn schedule_time_slot_full_rolls_back() { }, Some(ScheduledTasksOf:: { tasks: account_task_ids, .. }) => { assert_eq!(account_task_ids.len(), 2); - assert_eq!(account_task_ids[0].1, task_id1.clone()); - assert_eq!(account_task_ids[1].1, task_id2.clone()); + assert_eq!(account_task_ids[0].1, task_id1); + assert_eq!(account_task_ids[1].1, task_id2); }, } }) @@ -1291,7 +1286,7 @@ fn taskid_changed_per_block() { ); LastTimeSlot::::put((SCHEDULED_TIME - 14400, SCHEDULED_TIME - 14400)); - assert_eq!(task_id1, FIRST_TASK_ID.to_vec().clone()); + assert_eq!(task_id1, FIRST_TASK_ID.to_vec()); assert_eq!(task_id2, vec![50, 48, 45, 48, 45, 54]); }) } @@ -1318,12 +1313,12 @@ fn taskid_adjusted_on_extrinsicid_on_same_block() { ); LastTimeSlot::::put((SCHEDULED_TIME - 14400, SCHEDULED_TIME - 14400)); - assert_eq!(task_id1, FIRST_TASK_ID.to_vec().clone()); + assert_eq!(task_id1, FIRST_TASK_ID.to_vec()); assert_eq!(task_id2, vec![49, 45, 50, 51, 52, 45, 56]); assert_has_event(RuntimeEvent::AutomationTime(crate::Event::TaskScheduled { who: first_caller, - task_id: FIRST_TASK_ID.to_vec().clone(), + task_id: FIRST_TASK_ID.to_vec(), schedule_as: None, })); @@ -1368,7 +1363,7 @@ fn taskid_adjusted_on_eventindex_on_same_block_from_same_caller() { })); assert_has_event(RuntimeEvent::AutomationTime(crate::Event::TaskScheduled { - who: caller.clone(), + who: caller, task_id: "1-234-6".as_bytes().to_vec(), schedule_as: None, })); @@ -1393,12 +1388,12 @@ fn taskid_on_same_extrinsid_have_unique_event_index() { ); LastTimeSlot::::put((SCHEDULED_TIME - 14400, SCHEDULED_TIME - 14400)); - assert_eq!(task_id1, FIRST_TASK_ID.to_vec().clone()); - assert_eq!(task_id2, SECOND_TASK_ID.to_vec().clone()); + assert_eq!(task_id1, FIRST_TASK_ID.to_vec()); + assert_eq!(task_id2, SECOND_TASK_ID.to_vec()); assert_has_event(RuntimeEvent::AutomationTime(crate::Event::TaskScheduled { who: owner, - task_id: FIRST_TASK_ID.to_vec().clone(), + task_id: FIRST_TASK_ID.to_vec(), schedule_as: None, })); }) @@ -1424,7 +1419,7 @@ fn cancel_works_for_fixed_scheduled() { task_id2.clone(), )); - if let Some(_) = AutomationTime::get_scheduled_tasks(SCHEDULED_TIME) { + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME).is_some() { panic!("Since there were only two tasks scheduled for the time it should have been deleted") } assert_eq!( @@ -1432,11 +1427,11 @@ fn cancel_works_for_fixed_scheduled() { [ RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { who: AccountId32::new(ALICE), - task_id: task_id1.clone() + task_id: task_id1 }), RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { who: AccountId32::new(ALICE), - task_id: task_id2.clone(), + task_id: task_id2, }), ] ); @@ -1464,20 +1459,20 @@ fn cancel_works_for_multiple_executions_scheduled() { )); assert_eq!(AutomationTime::get_account_task(owner.clone(), task_id1.clone()), None); - if let Some(_) = AutomationTime::get_scheduled_tasks(SCHEDULED_TIME) { + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME).is_some() { panic!("Tasks scheduled for the time it should have been deleted") } - if let Some(_) = AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + 3600) { + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + 3600).is_some() { panic!("Tasks scheduled for the time it should have been deleted") } - if let Some(_) = AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + 7200) { + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + 7200).is_some() { panic!("Tasks scheduled for the time it should have been deleted") } assert_eq!( events(), [RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { who: owner, - task_id: task_id1.clone(), + task_id: task_id1, })] ); }) @@ -1502,7 +1497,7 @@ fn cancel_works_for_recurring_scheduled() { task_id2.clone(), )); - if let Some(_) = AutomationTime::get_scheduled_tasks(SCHEDULED_TIME) { + if AutomationTime::get_scheduled_tasks(SCHEDULED_TIME).is_some() { panic!("Since there were only two tasks scheduled for the time it should have been deleted") } assert_eq!( @@ -1510,11 +1505,11 @@ fn cancel_works_for_recurring_scheduled() { [ RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { who: AccountId32::new(ALICE), - task_id: task_id1.clone() + task_id: task_id1 }), RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { who: AccountId32::new(ALICE), - task_id: task_id2.clone() + task_id: task_id2 }), ] ); @@ -1553,7 +1548,7 @@ fn cancel_works_for_an_executed_task() { }, Some(ScheduledTasksOf:: { tasks: task_ids, .. }) => { assert_eq!(task_ids.len(), 1); - assert_eq!(task_ids[0].1, task_id1.clone()); + assert_eq!(task_ids[0].1, task_id1); }, } match AutomationTime::get_scheduled_tasks(SCHEDULED_TIME + 3600) { @@ -1562,7 +1557,7 @@ fn cancel_works_for_an_executed_task() { }, Some(ScheduledTasksOf:: { tasks: task_ids, .. }) => { assert_eq!(task_ids.len(), 1); - assert_eq!(task_ids[0].1, task_id1.clone()); + assert_eq!(task_ids[0].1, task_id1); }, } @@ -1583,7 +1578,7 @@ fn cancel_works_for_an_executed_task() { }), RuntimeEvent::System(frame_system::pallet::Event::Remarked { sender: owner.clone(), - hash: BlakeTwo256::hash(&vec![50]), + hash: BlakeTwo256::hash(&[50]), }), RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { who: owner.clone(), @@ -1607,7 +1602,7 @@ fn cancel_works_for_an_executed_task() { }, Some(ScheduledTasksOf:: { tasks: task_ids, .. }) => { assert_eq!(task_ids.len(), 1); - assert_eq!(task_ids[0].1, task_id1.clone()); + assert_eq!(task_ids[0].1, task_id1); }, } @@ -1624,7 +1619,7 @@ fn cancel_works_for_an_executed_task() { events(), [RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { who: owner, - task_id: task_id1.clone(), + task_id: task_id1, })] ); }) @@ -1644,7 +1639,7 @@ fn cancel_works_for_tasks_in_queue() { ); LastTimeSlot::::put((SCHEDULED_TIME, SCHEDULED_TIME)); - assert_eq!(task_id.clone(), AutomationTime::get_task_queue()[0].1); + assert_eq!(task_id, AutomationTime::get_task_queue()[0].1); assert_eq!(1, AutomationTime::get_task_queue().len()); assert_ok!(AutomationTime::cancel_task( @@ -1656,7 +1651,7 @@ fn cancel_works_for_tasks_in_queue() { events(), [RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { who: AccountId32::new(ALICE), - task_id: task_id.clone() + task_id }),] ); assert_eq!(0, AutomationTime::get_task_queue().len()); @@ -1717,10 +1712,7 @@ fn cancel_task_not_found() { // now ensure the task id is also removed from AccountTasks assert_noop!( - AutomationTime::cancel_task( - RuntimeOrigin::signed(AccountId32::new(ALICE)), - task_id.clone() - ), + AutomationTime::cancel_task(RuntimeOrigin::signed(AccountId32::new(ALICE)), task_id), Error::::TaskDoesNotExist, ); }) @@ -1750,10 +1742,7 @@ fn cancel_task_fail_non_owner() { ); // But Alice can cancel as expected - assert_ok!(AutomationTime::cancel_task( - RuntimeOrigin::signed(owner.clone()), - task_id1.clone(), - )); + assert_ok!(AutomationTime::cancel_task(RuntimeOrigin::signed(owner), task_id1,)); }) } @@ -1774,7 +1763,7 @@ fn force_cancel_task_works() { events(), [RuntimeEvent::AutomationTime(crate::Event::TaskCancelled { who: AccountId32::new(ALICE), - task_id: task_id.clone() + task_id }),] ); }) @@ -1807,7 +1796,7 @@ mod extrinsics { last_event(), RuntimeEvent::AutomationTime(crate::Event::TaskScheduled { who: account_id, - task_id: FIRST_TASK_ID.to_vec().clone(), + task_id: FIRST_TASK_ID.to_vec(), schedule_as: None, }) ); @@ -1826,7 +1815,7 @@ mod run_dynamic_dispatch_action { let bad_encoded_call: Vec = vec![1]; let (weight, error) = - AutomationTime::run_dynamic_dispatch_action(account_id.clone(), bad_encoded_call); + AutomationTime::run_dynamic_dispatch_action(account_id, bad_encoded_call); assert_eq!( weight, @@ -1844,8 +1833,7 @@ mod run_dynamic_dispatch_action { let call: RuntimeCall = frame_system::Call::set_code { code: vec![] }.into(); let encoded_call = call.encode(); - let (_, error) = - AutomationTime::run_dynamic_dispatch_action(account_id.clone(), encoded_call); + let (_, error) = AutomationTime::run_dynamic_dispatch_action(account_id, encoded_call); assert_eq!(error, Some(DispatchError::BadOrigin)); }) @@ -1858,8 +1846,7 @@ mod run_dynamic_dispatch_action { let call: RuntimeCall = pallet_timestamp::Call::set { now: 100 }.into(); let encoded_call = call.encode(); - let (_, error) = - AutomationTime::run_dynamic_dispatch_action(account_id.clone(), encoded_call); + let (_, error) = AutomationTime::run_dynamic_dispatch_action(account_id, encoded_call); assert_eq!(error, Some(DispatchError::from(frame_system::Error::::CallFiltered))); }) @@ -1872,8 +1859,7 @@ mod run_dynamic_dispatch_action { let call: RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); let encoded_call = call.encode(); - let (_, error) = - AutomationTime::run_dynamic_dispatch_action(account_id.clone(), encoded_call); + let (_, error) = AutomationTime::run_dynamic_dispatch_action(account_id, encoded_call); assert_eq!(error, None); }) @@ -1973,7 +1959,7 @@ fn trigger_tasks_handles_missed_slots() { let missed_task_id = schedule_task(ALICE, vec![SCHEDULED_TIME - 3600], vec![50]); let missed_task = MissedTaskV2Of::::new( AccountId32::new(ALICE), - missed_task_id.clone(), + missed_task_id, SCHEDULED_TIME - 3600, ); @@ -2017,12 +2003,12 @@ fn trigger_tasks_handles_missed_slots() { hash: BlakeTwo256::hash(&remark_message), }), RuntimeEvent::AutomationTime(crate::Event::TaskExecuted { - who: owner.clone(), + who: owner, task_id: task_will_be_run_id.clone(), }), RuntimeEvent::AutomationTime(crate::Event::TaskCompleted { who: AccountId32::new(ALICE), - task_id: task_will_be_run_id.clone(), + task_id: task_will_be_run_id, }), ], ); @@ -2051,7 +2037,7 @@ fn trigger_tasks_limits_missed_slots() { ALICE, vec![40], vec![SCHEDULED_TIME], - Action::DynamicDispatch { encoded_call: call.clone().encode() }, + Action::DynamicDispatch { encoded_call: call.encode() }, vec![], ); @@ -2272,7 +2258,7 @@ fn trigger_tasks_handles_nonexisting_tasks() { events(), [RuntimeEvent::AutomationTime(crate::Event::TaskNotFound { who: owner, - task_id: bad_task_id.clone() + task_id: bad_task_id }),] ); assert_eq!(0, AutomationTime::get_task_queue().len()); @@ -2296,7 +2282,7 @@ fn trigger_tasks_completes_some_tasks() { ALICE, vec![50], vec![SCHEDULED_TIME], - create_dynamic_dispatch_remark_action(message_two.clone()), + create_dynamic_dispatch_remark_action(message_two), vec![], ); LastTimeSlot::::put((LAST_BLOCK_TIME, LAST_BLOCK_TIME)); @@ -2385,14 +2371,8 @@ fn trigger_tasks_completes_all_missed_tasks() { ); assert_eq!(AutomationTime::get_missed_queue().len(), 0); - assert_eq!( - AutomationTime::get_account_task(AccountId32::new(ALICE), task_id1.clone()), - None - ); - assert_eq!( - AutomationTime::get_account_task(AccountId32::new(ALICE), task_id2.clone()), - None - ); + assert_eq!(AutomationTime::get_account_task(AccountId32::new(ALICE), task_id1), None); + assert_eq!(AutomationTime::get_account_task(AccountId32::new(ALICE), task_id2), None); }) } @@ -2452,7 +2432,7 @@ fn missed_tasks_updates_executions_left() { ); assert_eq!(AutomationTime::get_missed_queue().len(), 0); - match AutomationTime::get_account_task(owner.clone(), task_id1.clone()) { + match AutomationTime::get_account_task(owner.clone(), task_id1) { None => { panic!("A task should exist if it was scheduled") }, @@ -2460,7 +2440,7 @@ fn missed_tasks_updates_executions_left() { assert_eq!(task.schedule.known_executions_left(), 1); }, } - match AutomationTime::get_account_task(owner.clone(), task_id2.clone()) { + match AutomationTime::get_account_task(owner, task_id2) { None => { panic!("A task should exist if it was scheduled") }, @@ -2538,7 +2518,7 @@ fn missed_tasks_removes_completed_tasks() { }), ] ); - assert_eq!(AutomationTime::get_account_task(owner.clone(), task_id01.clone()), None); + assert_eq!(AutomationTime::get_account_task(owner, task_id01), None); }) } @@ -2553,7 +2533,7 @@ fn trigger_tasks_completes_some_xcmp_tasks() { vec![40], vec![SCHEDULED_TIME], Action::XCMP { - destination: destination.clone(), + destination, schedule_fee: NATIVE_LOCATION, execution_fee: AssetPayment { asset_location: MultiLocation::new(0, Here).into(), @@ -2716,7 +2696,6 @@ fn auto_compound_delegated_stake_enough_balance_has_delegation() { // 2. Next execution will be scheduled emitted_events - .clone() .into_iter() .find(|e| { matches!(e, RuntimeEvent::AutomationTime(crate::Event::TaskRescheduled { .. })) @@ -2737,7 +2716,7 @@ fn auto_compound_delegated_stake_enough_balance_has_delegation() { // 3. The task will re-run in the next execution time Timestamp::set_timestamp(next_scheduled_time * 1_000); - get_funds(delegator.clone()); + get_funds(delegator); System::reset_events(); AutomationTime::trigger_tasks(Weight::from_ref_time(100_000_000_000)); @@ -2804,7 +2783,7 @@ fn auto_compound_delegated_stake_not_enough_balance_has_delegation() { .into_iter() .find(|t| *t == (delegator.clone(), task_id.clone())) .expect("Task should have been rescheduled"); - let task = AutomationTime::get_account_task(delegator.clone(), task_id) + let task = AutomationTime::get_account_task(delegator, task_id) .expect("Task should not have been removed from task map"); assert_eq!(task.schedule.known_executions_left(), 1); assert_eq!(task.execution_times(), vec![next_scheduled_time]); @@ -2860,7 +2839,7 @@ fn auto_compound_delegated_stake_enough_balance_no_delegator() { RuntimeEvent::AutomationTime(crate::Event::TaskExecutionFailed { error, .. - }) if *error == delegator_error.clone()) + }) if *error == delegator_error) }) .expect("TaskExecutionFailed event should have been emitted"); @@ -2872,7 +2851,7 @@ fn auto_compound_delegated_stake_enough_balance_no_delegator() { RuntimeEvent::AutomationTime(crate::Event::TaskNotRescheduled { error, .. - }) if *error == delegator_error.clone()) + }) if *error == delegator_error) }) .expect("TaskNotRescheduled event should have been emitted"); @@ -2934,7 +2913,7 @@ fn auto_compound_delegated_stake_enough_balance_no_delegation() { RuntimeEvent::AutomationTime(crate::Event::TaskExecutionFailed { error, .. - }) if *error == delegation_error.clone()) + }) if *error == delegation_error) }) .expect("TaskExecutionFailed event should have been emitted"); @@ -2946,7 +2925,7 @@ fn auto_compound_delegated_stake_enough_balance_no_delegation() { RuntimeEvent::AutomationTime(crate::Event::TaskNotRescheduled { error, .. - }) if *error == delegation_error.clone()) + }) if *error == delegation_error) }) .expect("TaskNotRescheduled event should have been emitted"); @@ -2985,7 +2964,7 @@ fn auto_compound_delegated_stake_not_enough_balance_no_delegation() { frequency, Action::AutoCompoundDelegatedStake { delegator: delegator.clone(), - collator: collator.clone(), + collator, account_minimum, }, vec!["DelegatorDNE".as_bytes().to_vec(), "DelegationDNE".as_bytes().to_vec()], @@ -3007,7 +2986,7 @@ fn auto_compound_delegated_stake_not_enough_balance_no_delegation() { RuntimeEvent::AutomationTime(crate::Event::TaskExecutionFailed { error, .. - }) if *error == insufficient_balance_error.clone()) + }) if *error == insufficient_balance_error) }) .expect("TaskExecutionFailed event should have been emitted"); @@ -3026,7 +3005,7 @@ fn auto_compound_delegated_stake_not_enough_balance_no_delegation() { .into_iter() .find(|t| *t == (delegator.clone(), task_id.clone())) .expect("Task should have been rescheduled"); - let task = AutomationTime::get_account_task(delegator.clone(), task_id) + let task = AutomationTime::get_account_task(delegator, task_id) .expect("Task should not have been removed from task map"); assert_eq!(task.schedule.known_executions_left(), 1); assert_eq!(task.execution_times(), vec![next_scheduled_time]); @@ -3085,7 +3064,7 @@ fn trigger_tasks_updates_executions_left() { }), ] ); - match AutomationTime::get_account_task(owner.clone(), task_id01) { + match AutomationTime::get_account_task(owner, task_id01) { None => { panic!("A task should exist if it was scheduled") }, @@ -3150,7 +3129,7 @@ fn trigger_tasks_removes_completed_tasks() { }), ] ); - assert_eq!(AutomationTime::get_account_task(owner.clone(), task_id01), None); + assert_eq!(AutomationTime::get_account_task(owner, task_id01), None); }) } @@ -3232,8 +3211,8 @@ fn on_init_runs_tasks() { }), ] ); - assert_eq!(AutomationTime::get_account_task(owner.clone(), task_id1.clone()), None); - assert_eq!(AutomationTime::get_account_task(owner.clone(), task_id2.clone()), None); + assert_eq!(AutomationTime::get_account_task(owner.clone(), task_id1), None); + assert_eq!(AutomationTime::get_account_task(owner.clone(), task_id2), None); assert_ne!(AutomationTime::get_account_task(owner.clone(), task_id3.clone()), None); assert_eq!(AutomationTime::get_task_queue().len(), 1); assert_eq!(AutomationTime::get_missed_queue().len(), 0); @@ -3254,7 +3233,7 @@ fn on_init_runs_tasks() { }), ], ); - assert_eq!(AutomationTime::get_account_task(owner.clone(), task_id3.clone()), None); + assert_eq!(AutomationTime::get_account_task(owner, task_id3), None); assert_eq!(AutomationTime::get_task_queue().len(), 0); assert_eq!(AutomationTime::get_missed_queue().len(), 0); }) @@ -3410,7 +3389,7 @@ fn on_init_shutdown() { ALICE, vec![40], vec![SCHEDULED_TIME], - create_dynamic_dispatch_remark_action(message_one.clone()), + create_dynamic_dispatch_remark_action(message_one), vec![], ); let message_two: Vec = vec![2, 4]; @@ -3418,7 +3397,7 @@ fn on_init_shutdown() { ALICE, vec![50], vec![SCHEDULED_TIME], - create_dynamic_dispatch_remark_action(message_two.clone()), + create_dynamic_dispatch_remark_action(message_two), vec![], ); let task_id3 = add_task_to_task_queue( @@ -3435,9 +3414,9 @@ fn on_init_shutdown() { Timestamp::set_timestamp(START_BLOCK_TIME + (3600 * 1_000)); AutomationTime::on_initialize(2); assert_eq!(events(), [],); - assert_ne!(AutomationTime::get_account_task(owner.clone(), task_id1.clone()), None); - assert_ne!(AutomationTime::get_account_task(owner.clone(), task_id2.clone()), None); - assert_ne!(AutomationTime::get_account_task(owner.clone(), task_id3.clone()), None); + assert_ne!(AutomationTime::get_account_task(owner.clone(), task_id1), None); + assert_ne!(AutomationTime::get_account_task(owner.clone(), task_id2), None); + assert_ne!(AutomationTime::get_account_task(owner, task_id3), None); assert_eq!(AutomationTime::get_task_queue().len(), 3); assert_eq!(AutomationTime::get_missed_queue().len(), 0); }) diff --git a/pallets/automation-time/src/types.rs b/pallets/automation-time/src/types.rs index eeb1b8aa7..b739b260c 100644 --- a/pallets/automation-time/src/types.rs +++ b/pallets/automation-time/src/types.rs @@ -60,7 +60,7 @@ impl Action { pub fn schedule_fee_location(&self) -> MultiLocation { match self { - Action::XCMP { schedule_fee, .. } => (*schedule_fee).clone(), + Action::XCMP { schedule_fee, .. } => *schedule_fee, _ => MultiLocation::default(), } } @@ -372,7 +372,7 @@ mod tests { let task_id = vec![48, 45, 48, 45, 48]; assert_err!( ScheduledTasksOf:: { tasks: vec![], weight: MaxWeightPerSlot::get() } - .try_push::>(task_id.clone(), &task), + .try_push::>(task_id, &task), Error::::TimeSlotFull ); }) @@ -385,7 +385,7 @@ mod tests { let id = (alice.clone(), vec![49, 45, 48, 45, 42]); let task = TaskOf::::create_event_task::( - alice.clone(), + alice, vec![0], vec![SCHEDULED_TIME], vec![0], diff --git a/pallets/valve/src/lib.rs b/pallets/valve/src/lib.rs index 289183615..ff520f395 100644 --- a/pallets/valve/src/lib.rs +++ b/pallets/valve/src/lib.rs @@ -322,7 +322,7 @@ pub mod pallet { impl Pallet { /// Sudo or a member of the CallAccessFilter can call. pub fn ensure_allowed(origin: OriginFor) -> DispatchResult { - if let Err(_) = ensure_root(origin.clone()) { + if ensure_root(origin.clone()).is_err() { let who = ensure_signed(origin)?; if !T::CallAccessFilter::contains(&who) { Err(Error::::NotAllowed)?; diff --git a/pallets/valve/src/mock.rs b/pallets/valve/src/mock.rs index 4732f5fa1..df191fd26 100644 --- a/pallets/valve/src/mock.rs +++ b/pallets/valve/src/mock.rs @@ -156,17 +156,12 @@ impl Config for Test { } /// Externality builder for pallet maintenance mode's mock runtime +#[derive(Default)] pub(crate) struct ExtBuilder { valve_closed: bool, closed_gates: Vec>, } -impl Default for ExtBuilder { - fn default() -> ExtBuilder { - ExtBuilder { valve_closed: false, closed_gates: vec![] } - } -} - impl ExtBuilder { pub(crate) fn with_valve_closed(mut self, c: bool) -> Self { self.valve_closed = c; diff --git a/pallets/vesting/src/lib.rs b/pallets/vesting/src/lib.rs index 0058f71f5..1e83dfc6a 100644 --- a/pallets/vesting/src/lib.rs +++ b/pallets/vesting/src/lib.rs @@ -167,7 +167,7 @@ pub mod pallet { *amount > ::Currency::minimum_balance(), "Cannot vest less than the existential deposit" ); - scheduled_vests.push((account.clone(), amount.clone())); + scheduled_vests.push((account.clone(), *amount)); unvested_allocation = unvested_allocation.saturating_add(*amount); } VestingSchedule::::insert(time, scheduled_vests); diff --git a/pallets/vesting/src/mock.rs b/pallets/vesting/src/mock.rs index f2d15e525..172a8ef4c 100644 --- a/pallets/vesting/src/mock.rs +++ b/pallets/vesting/src/mock.rs @@ -116,16 +116,11 @@ impl Config for Test { type Currency = Balances; } +#[derive(Default)] pub(crate) struct ExtBuilder { vesting_schedule: Vec<(u64, Vec<(AccountId, Balance)>)>, } -impl Default for ExtBuilder { - fn default() -> ExtBuilder { - ExtBuilder { vesting_schedule: Default::default() } - } -} - impl ExtBuilder { pub(crate) fn schedule(mut self, v: Vec<(u64, Vec<(AccountId, Balance)>)>) -> Self { self.vesting_schedule = v; diff --git a/pallets/vesting/src/tests.rs b/pallets/vesting/src/tests.rs index 42b050698..658c4313c 100644 --- a/pallets/vesting/src/tests.rs +++ b/pallets/vesting/src/tests.rs @@ -138,16 +138,9 @@ fn additional_issuance() { } fn get_schedule() -> Vec<(u64, Vec<(AccountId, Balance)>)> { - let mut scheduled_vests: Vec<(u64, Vec<(AccountId, Balance)>)> = vec![]; - let mut first_vest: Vec<(AccountId, Balance)> = vec![]; - first_vest.push((ALICE, 100)); - first_vest.push((BOB, 100)); - scheduled_vests.push((FIRST_VEST_TIME, first_vest)); - let mut second_vest: Vec<(AccountId, Balance)> = vec![]; - second_vest.push((ALICE, 200)); - second_vest.push((BOB, 200)); - scheduled_vests.push((SECOND_VEST_TIME, second_vest)); - + let first_vest = vec![(ALICE, 100), (BOB, 100)]; + let second_vest = vec![(ALICE, 200), (BOB, 200)]; + let scheduled_vests = vec![(FIRST_VEST_TIME, first_vest), (SECOND_VEST_TIME, second_vest)]; scheduled_vests } diff --git a/pallets/xcmp-handler/rpc/src/lib.rs b/pallets/xcmp-handler/rpc/src/lib.rs index c2f934451..6d9b702cb 100644 --- a/pallets/xcmp-handler/rpc/src/lib.rs +++ b/pallets/xcmp-handler/rpc/src/lib.rs @@ -83,6 +83,6 @@ where }; runtime_api_result .map_err(|e| mapped_err(format!("{:?}", e))) - .map(|r| r.map_err(|e| mapped_err(String::from_utf8(e).unwrap_or(String::default()))))? + .map(|r| r.map_err(|e| mapped_err(String::from_utf8(e).unwrap_or_default())))? } } diff --git a/pallets/xcmp-handler/src/lib.rs b/pallets/xcmp-handler/src/lib.rs index 6e8041ab5..dd2d88963 100644 --- a/pallets/xcmp-handler/src/lib.rs +++ b/pallets/xcmp-handler/src/lib.rs @@ -302,7 +302,7 @@ pub mod pallet { .map_err(|_| Error::::CannotReanchor)?; let target_xcm = Xcm(vec![ - DescendOrigin::<()>(descend_location.clone()), + DescendOrigin::<()>(descend_location), WithdrawAsset::<()>(target_asset.clone().into()), BuyExecution::<()> { fees: target_asset, weight_limit: Limited(xcm_weight) }, Transact::<()> { @@ -347,7 +347,7 @@ pub mod pallet { Self::deposit_event(Event::XcmTransactedLocally); - Ok(().into()) + Ok(()) } /// Send XCM instructions to parachain. @@ -357,7 +357,7 @@ pub mod pallet { target_instructions: xcm::latest::Xcm<()>, ) -> Result<(), DispatchError> { #[allow(unused_variables)] - let destination_location = destination.clone(); + let destination_location = destination; #[cfg(all(not(test), feature = "runtime-benchmarks"))] let destination_location = MultiLocation::new(1, Here); @@ -372,7 +372,7 @@ pub mod pallet { Self::deposit_event(Event::XcmSent { destination }); - Ok(().into()) + Ok(()) } /// Create and transact instructions. @@ -405,7 +405,7 @@ pub mod pallet { Self::transact_in_local_chain(local_instructions)?; Self::transact_in_target_chain(destination, target_instructions)?; - Ok(().into()) + Ok(()) } /// Pay for XCMP fees. @@ -432,7 +432,7 @@ pub mod pallet { }), }; - Ok(().into()) + Ok(()) } } } @@ -474,13 +474,13 @@ impl XcmpTransactor for Pallet { flow, )?; - Ok(()).into() + Ok(()) } fn pay_xcm_fee(source: T::AccountId, fee: u128) -> Result<(), sp_runtime::DispatchError> { Self::pay_xcm_fee(source, fee)?; - Ok(()).into() + Ok(()) } } diff --git a/pallets/xcmp-handler/src/mock.rs b/pallets/xcmp-handler/src/mock.rs index 1e6fa1d24..9af7567f4 100644 --- a/pallets/xcmp-handler/src/mock.rs +++ b/pallets/xcmp-handler/src/mock.rs @@ -172,9 +172,8 @@ impl SendXcm for TestSendXcm { if message.clone().unwrap() == err_message { Err(SendError::Transport("Destination location full")) } else { - SENT_XCM.with(|q| { - q.borrow_mut().push((destination.clone().unwrap(), message.clone().unwrap())) - }); + SENT_XCM + .with(|q| q.borrow_mut().push(((*destination).unwrap(), message.clone().unwrap()))); Ok(((), MultiAssets::new())) } } @@ -203,7 +202,7 @@ pub struct DummyAssetTransactor; impl TransactAsset for DummyAssetTransactor { fn deposit_asset(what: &MultiAsset, who: &MultiLocation, _context: &XcmContext) -> XcmResult { let asset = what.clone(); - let location = who.clone(); + let location = *who; TRANSACT_ASSET.with(|q| q.borrow_mut().push((asset, location))); Ok(()) } @@ -214,7 +213,7 @@ impl TransactAsset for DummyAssetTransactor { _maybe_context: Option<&XcmContext>, ) -> Result { let asset = what.clone(); - let location = who.clone(); + let location = *who; TRANSACT_ASSET.with(|q| q.borrow_mut().push((asset.clone(), location))); Ok(asset.into()) } @@ -223,7 +222,7 @@ impl TransactAsset for DummyAssetTransactor { parameter_types! { pub const RelayNetwork: NetworkId = NetworkId::Polkadot; pub UniversalLocation: InteriorMultiLocation = - X1(Parachain(2114).into()); + X1(Parachain(2114)); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); } pub struct XcmConfig; diff --git a/pallets/xcmp-handler/src/tests.rs b/pallets/xcmp-handler/src/tests.rs index 75b0c64ba..c20e8c6b0 100644 --- a/pallets/xcmp-handler/src/tests.rs +++ b/pallets/xcmp-handler/src/tests.rs @@ -45,12 +45,12 @@ fn get_instruction_set_local_currency_instructions() { AccountIdToMultiLocation::convert(ALICE).try_into().unwrap(); let expected_instructions = XcmpHandler::get_local_currency_instructions( - destination.clone(), + destination, asset_location, descend_location, transact_encoded_call.clone(), - transact_encoded_call_weight.clone(), - overall_weight.clone(), + transact_encoded_call_weight, + overall_weight, 10, ) .unwrap(); @@ -107,14 +107,14 @@ fn get_local_currency_instructions_works() { fn transact_in_local_chain_works() { new_test_ext().execute_with(|| { let destination = MultiLocation::new(1, X1(Parachain(PARA_ID))); - let asset_location = destination.clone(); + let asset_location = destination; let transact_encoded_call: Vec = vec![0, 1, 2]; let transact_encoded_call_weight = Weight::from_ref_time(100_000_000); let xcm_weight = transact_encoded_call_weight .checked_add(&Weight::from_ref_time(100_000_000)) .expect("xcm_weight overflow"); let xcm_fee = (xcm_weight.ref_time() as u128) * 5_000_000_000; - let asset = MultiAsset { id: Concrete(asset_location.clone()), fun: Fungible(xcm_fee) }; + let asset = MultiAsset { id: Concrete(asset_location), fun: Fungible(xcm_fee) }; let descend_location: Junctions = AccountIdToMultiLocation::convert(ALICE).try_into().unwrap(); @@ -122,7 +122,7 @@ fn transact_in_local_chain_works() { destination, asset_location, descend_location, - transact_encoded_call.clone(), + transact_encoded_call, transact_encoded_call_weight, xcm_weight, xcm_fee, @@ -135,14 +135,11 @@ fn transact_in_local_chain_works() { vec![ // Withdrawing asset ( - asset.clone().into(), + asset.clone(), MultiLocation { parents: 1, interior: X1(Parachain(LOCAL_PARA_ID)) } ), // Depositing asset - ( - asset.clone().into(), - MultiLocation { parents: 1, interior: X1(Parachain(PARA_ID)) } - ), + (asset, MultiLocation { parents: 1, interior: X1(Parachain(PARA_ID)) }), ] ); assert_eq!(events(), [RuntimeEvent::XcmpHandler(crate::Event::XcmTransactedLocally)]); @@ -165,7 +162,7 @@ fn transact_in_target_chain_works() { AccountIdToMultiLocation::convert(ALICE).try_into().unwrap(); let (_, target_instructions) = XcmpHandler::get_local_currency_instructions( - destination.clone(), + destination, asset_location, descend_location, transact_encoded_call.clone(), @@ -175,7 +172,7 @@ fn transact_in_target_chain_works() { ) .unwrap(); - assert_ok!(XcmpHandler::transact_in_target_chain(destination.clone(), target_instructions)); + assert_ok!(XcmpHandler::transact_in_target_chain(destination, target_instructions)); assert_eq!( sent_xcm(), vec![( @@ -196,7 +193,7 @@ fn transact_in_target_chain_works() { Transact { origin_kind: OriginKind::SovereignAccount, require_weight_at_most: transact_encoded_call_weight, - call: transact_encoded_call.clone().into(), + call: transact_encoded_call.into(), }, RefundSurplus, DepositAsset { diff --git a/runtime/neumann/src/lib.rs b/runtime/neumann/src/lib.rs index b654c77f7..06521bf54 100644 --- a/runtime/neumann/src/lib.rs +++ b/runtime/neumann/src/lib.rs @@ -1182,8 +1182,8 @@ impl_runtime_apis! { .map_err(|_| "Unable to parse fee".as_bytes())?; Ok(AutomationFeeDetails { - schedule_fee: fee_handler.schedule_fee_amount.into(), - execution_fee: fee_handler.execution_fee_amount.into() + schedule_fee: fee_handler.schedule_fee_amount, + execution_fee: fee_handler.execution_fee_amount }) } diff --git a/runtime/neumann/src/xcm_config.rs b/runtime/neumann/src/xcm_config.rs index dbb6577c1..03d31ca50 100644 --- a/runtime/neumann/src/xcm_config.rs +++ b/runtime/neumann/src/xcm_config.rs @@ -322,7 +322,7 @@ impl Convert> for TokenIdConvert { MultiLocation { parents: 1, interior: X1(Parachain(para_id)) } if para_id == u32::from(ParachainInfo::parachain_id()) => Some(NATIVE_TOKEN_ID), - _ => AssetRegistryOf::::location_to_asset_id(location.clone()), + _ => AssetRegistryOf::::location_to_asset_id(location), } } } diff --git a/runtime/oak/src/lib.rs b/runtime/oak/src/lib.rs index e6428c4d4..e61bc4071 100644 --- a/runtime/oak/src/lib.rs +++ b/runtime/oak/src/lib.rs @@ -1194,8 +1194,8 @@ impl_runtime_apis! { .map_err(|_| "Unable to parse fee".as_bytes())?; Ok(AutomationFeeDetails { - schedule_fee: fee_handler.schedule_fee_amount.into(), - execution_fee: fee_handler.execution_fee_amount.into() + schedule_fee: fee_handler.schedule_fee_amount, + execution_fee: fee_handler.execution_fee_amount }) } diff --git a/runtime/oak/src/xcm_config.rs b/runtime/oak/src/xcm_config.rs index bd83c3146..446aaf917 100644 --- a/runtime/oak/src/xcm_config.rs +++ b/runtime/oak/src/xcm_config.rs @@ -326,7 +326,7 @@ impl Convert> for TokenIdConvert { MultiLocation { parents: 1, interior: X1(Parachain(para_id)) } if para_id == u32::from(ParachainInfo::parachain_id()) => Some(NATIVE_TOKEN_ID), - _ => AssetRegistryOf::::location_to_asset_id(location.clone()), + _ => AssetRegistryOf::::location_to_asset_id(location), } } } diff --git a/runtime/turing/src/lib.rs b/runtime/turing/src/lib.rs index 6ca7c9202..a7425db34 100644 --- a/runtime/turing/src/lib.rs +++ b/runtime/turing/src/lib.rs @@ -1206,8 +1206,8 @@ impl_runtime_apis! { .map_err(|_| "Unable to parse fee".as_bytes())?; Ok(AutomationFeeDetails { - schedule_fee: fee_handler.schedule_fee_amount.into(), - execution_fee: fee_handler.execution_fee_amount.into() + schedule_fee: fee_handler.schedule_fee_amount, + execution_fee: fee_handler.execution_fee_amount }) } diff --git a/runtime/turing/src/xcm_config.rs b/runtime/turing/src/xcm_config.rs index a0d45fd33..88c4426b6 100644 --- a/runtime/turing/src/xcm_config.rs +++ b/runtime/turing/src/xcm_config.rs @@ -325,7 +325,7 @@ impl Convert> for TokenIdConvert { MultiLocation { parents: 1, interior: X1(Parachain(para_id)) } if para_id == u32::from(ParachainInfo::parachain_id()) => Some(NATIVE_TOKEN_ID), - _ => AssetRegistryOf::::location_to_asset_id(location.clone()), + _ => AssetRegistryOf::::location_to_asset_id(location), } } }