Skip to content

Remove unused async #943

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Sep 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .cargo/config.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[build]
# Workaround to enable this lint for all packages in the workspace
#
# Once https://github.com/rust-lang/cargo/issues/12115 makes it to our
# toolchain, we'll be able to put this in the `Cargo.toml` manifest instead.
rustflags = ["-Wclippy::unused-async"]
1 change: 0 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -124,4 +124,3 @@ repair-client = { path = "./repair-client" }

[profile.dev]
panic = 'abort'

4 changes: 2 additions & 2 deletions downstairs/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1981,7 +1981,7 @@ impl Downstairs {
) -> Result<Option<Message>> {
let job = {
let mut work = self.work_lock(upstairs_connection).await?;
let job = work.get_ready_job(job_id).await;
let job = work.get_ready_job(job_id);

// `promote_to_active` can clear out the Work struct for this
// UpstairsConnection, but the tasks can still be working on
Expand Down Expand Up @@ -2913,7 +2913,7 @@ impl Work {
}

// Return a job that's ready to have the work done
async fn get_ready_job(&mut self, job_id: JobId) -> Option<DownstairsWork> {
fn get_ready_job(&mut self, job_id: JobId) -> Option<DownstairsWork> {
match self.active.get(&job_id) {
Some(job) => {
assert_eq!(job.state, WorkState::InProgress);
Expand Down
14 changes: 7 additions & 7 deletions downstairs/src/repair.rs
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ async fn get_files_for_extent(
format!("Expected {:?} to be a directory", extent_dir),
))
} else {
let files = extent_file_list(extent_dir, eid).await?;
let files = extent_file_list(extent_dir, eid)?;
Ok(HttpResponseOk(files))
}
}
Expand All @@ -221,7 +221,7 @@ async fn get_files_for_extent(
* that correspond to the given extent. Return an error if any
* of the required files are missing.
*/
async fn extent_file_list(
fn extent_file_list(
extent_dir: PathBuf,
eid: u32,
) -> Result<Vec<String>, HttpError> {
Expand Down Expand Up @@ -281,7 +281,7 @@ mod test {

// Determine the directory and name for expected extent files.
let ed = extent_dir(&dir, 1);
let mut ex_files = extent_file_list(ed, 1).await.unwrap();
let mut ex_files = extent_file_list(ed, 1).unwrap();
ex_files.sort();
let expected = vec!["001", "001.db", "001.db-shm", "001.db-wal"];
println!("files: {:?}", ex_files);
Expand Down Expand Up @@ -311,7 +311,7 @@ mod test {
rm_file.set_extension("db-shm");
std::fs::remove_file(rm_file).unwrap();

let mut ex_files = extent_file_list(extent_dir, 1).await.unwrap();
let mut ex_files = extent_file_list(extent_dir, 1).unwrap();
ex_files.sort();
let expected = vec!["001", "001.db"];
println!("files: {:?}", ex_files);
Expand Down Expand Up @@ -346,7 +346,7 @@ mod test {
rm_file.set_extension("db-shm");
let _ = std::fs::remove_file(rm_file);

let mut ex_files = extent_file_list(extent_dir, 1).await.unwrap();
let mut ex_files = extent_file_list(extent_dir, 1).unwrap();
ex_files.sort();
let expected = vec!["001", "001.db"];
println!("files: {:?}", ex_files);
Expand All @@ -373,7 +373,7 @@ mod test {
rm_file.set_extension("db");
std::fs::remove_file(&rm_file).unwrap();

assert!(extent_file_list(extent_dir, 2).await.is_err());
assert!(extent_file_list(extent_dir, 2).is_err());

Ok(())
}
Expand All @@ -395,7 +395,7 @@ mod test {
rm_file.push(extent_file_name(1, ExtentType::Data));
std::fs::remove_file(&rm_file).unwrap();

assert!(extent_file_list(extent_dir, 1).await.is_err());
assert!(extent_file_list(extent_dir, 1).is_err());

Ok(())
}
Expand Down
6 changes: 2 additions & 4 deletions integration_tests/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3395,13 +3395,12 @@ mod test {

// Start a new pantry

let (log, pantry) = crucible_pantry::initialize_pantry().await.unwrap();
let (log, pantry) = crucible_pantry::initialize_pantry().unwrap();
let (pantry_addr, _join_handle) = crucible_pantry::server::run_server(
&log,
"127.0.0.1:0".parse().unwrap(),
&pantry,
)
.await
.unwrap();

// Create a Volume out of it, and attach a CruciblePantryClient
Expand Down Expand Up @@ -3929,13 +3928,12 @@ mod test {

// Start the pantry, then use it to scrub

let (log, pantry) = crucible_pantry::initialize_pantry().await.unwrap();
let (log, pantry) = crucible_pantry::initialize_pantry().unwrap();
let (pantry_addr, _join_handle) = crucible_pantry::server::run_server(
&log,
"127.0.0.1:0".parse().unwrap(),
&pantry,
)
.await
.unwrap();

let client =
Expand Down
2 changes: 1 addition & 1 deletion pantry/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ pub const PROG: &str = "crucible-pantry";
pub mod pantry;
pub mod server;

pub async fn initialize_pantry() -> Result<(Logger, Arc<pantry::Pantry>)> {
pub fn initialize_pantry() -> Result<(Logger, Arc<pantry::Pantry>)> {
let log = ConfigLogging::File {
level: ConfigLoggingLevel::Info,
path: "/dev/stdout".into(),
Expand Down
5 changes: 2 additions & 3 deletions pantry/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,9 @@ async fn main() -> Result<()> {
write_openapi(&mut f)
}
Args::Run { listen } => {
let (log, pantry) = initialize_pantry().await?;
let (log, pantry) = initialize_pantry()?;

let (_, join_handle) =
server::run_server(&log, listen, &pantry).await?;
let (_, join_handle) = server::run_server(&log, listen, &pantry)?;

join_handle.await?.map_err(|e| anyhow!(e))
}
Expand Down
2 changes: 1 addition & 1 deletion pantry/src/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ pub fn make_api() -> Result<dropshot::ApiDescription<Arc<Pantry>>, String> {
Ok(api)
}

pub async fn run_server(
pub fn run_server(
log: &Logger,
bind_address: SocketAddr,
df: &Arc<Pantry>,
Expand Down
34 changes: 17 additions & 17 deletions upstairs/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3716,7 +3716,7 @@ impl Downstairs {
/**
* Enqueue a new downstairs live repair request.
*/
async fn enqueue_repair(&mut self, mut io: DownstairsIO) {
fn enqueue_repair(&mut self, mut io: DownstairsIO) {
// Puts the repair IO onto the downstairs work queue.
for cid in ClientId::iter() {
assert_eq!(io.state[cid], IOState::New);
Expand Down Expand Up @@ -5042,7 +5042,7 @@ impl UpstairsState {
* that happens on initial startup. This is because the running
* upstairs has some state it can use to re-verify a downstairs.
*/
async fn set_active(&mut self) -> Result<(), CrucibleError> {
fn set_active(&mut self) -> Result<(), CrucibleError> {
if self.up_state == UpState::Active {
crucible_bail!(UpstairsAlreadyActive);
} else if self.up_state == UpState::Deactivating {
Expand Down Expand Up @@ -5359,7 +5359,7 @@ impl Upstairs {
async fn set_active(&self) -> Result<(), CrucibleError> {
let mut active = self.active.lock().await;
self.stats.add_activation().await;
active.set_active().await?;
active.set_active()?;
info!(
self.log,
"{} is now active with session: {}", self.uuid, self.session_id
Expand Down Expand Up @@ -6614,7 +6614,7 @@ impl Upstairs {
* Verify the guest given gen number is highest.
* Decide if we need repair, and if so create the repair list
*/
async fn collate_downstairs(
fn collate_downstairs(
&self,
ds: &mut Downstairs,
) -> Result<bool, CrucibleError> {
Expand Down Expand Up @@ -6939,7 +6939,7 @@ impl Upstairs {
* downstairs out, forget any activation requests, and the
* upstairs goes back to waiting for another activation request.
*/
self.collate_downstairs(&mut ds).await
self.collate_downstairs(&mut ds)
};

match collate_status {
Expand Down Expand Up @@ -7048,7 +7048,7 @@ impl Upstairs {
for s in ds.ds_state.iter_mut() {
*s = DsState::Active;
}
active.set_active().await?;
active.set_active()?;
info!(
self.log,
"{} is now active with session: {}",
Expand Down Expand Up @@ -7084,7 +7084,7 @@ impl Upstairs {
for s in ds.ds_state.iter_mut() {
*s = DsState::Active;
}
active.set_active().await?;
active.set_active()?;
info!(
self.log,
"{} is now active with session: {}",
Expand Down Expand Up @@ -8822,7 +8822,7 @@ impl GtoS {
/*
* Notify corresponding BlockReqWaiter
*/
pub async fn notify(self, result: Result<(), CrucibleError>) {
pub fn notify(self, result: Result<(), CrucibleError>) {
/*
* If present, send the result to the guest. If this is a flush
* issued on behalf of crucible, then there is no place to send
Expand Down Expand Up @@ -8943,7 +8943,7 @@ impl GuestWork {
gtos_job.transfer().await;
}

gtos_job.notify(result).await;
gtos_job.notify(result);

self.completed.push(gw_id);
} else {
Expand Down Expand Up @@ -9503,7 +9503,7 @@ struct Condition {
* Send work to all the targets.
* If a send fails, report an error.
*/
async fn send_work(t: &[Target], val: u64, log: &Logger) {
fn send_work(t: &[Target], val: u64, log: &Logger) {
for (client_id, d_client) in t.iter().enumerate() {
let res = d_client.ds_work_tx.try_send(val);
if let Err(e) = res {
Expand Down Expand Up @@ -9754,7 +9754,7 @@ async fn process_new_io(
return;
}

send_work(dst, *lastcast, &up.log).await;
send_work(dst, *lastcast, &up.log);
*lastcast += 1;
}
BlockOp::Read { offset, data } => {
Expand All @@ -9765,7 +9765,7 @@ async fn process_new_io(
{
return;
}
send_work(dst, *lastcast, &up.log).await;
send_work(dst, *lastcast, &up.log);
*lastcast += 1;
}
BlockOp::Write { offset, data } => {
Expand All @@ -9776,7 +9776,7 @@ async fn process_new_io(
{
return;
}
send_work(dst, *lastcast, &up.log).await;
send_work(dst, *lastcast, &up.log);
*lastcast += 1;
}
BlockOp::WriteUnwritten { offset, data } => {
Expand All @@ -9787,7 +9787,7 @@ async fn process_new_io(
{
return;
}
send_work(dst, *lastcast, &up.log).await;
send_work(dst, *lastcast, &up.log);
*lastcast += 1;
}
BlockOp::Flush { snapshot_details } => {
Expand All @@ -9811,7 +9811,7 @@ async fn process_new_io(
return;
}

send_work(dst, *lastcast, &up.log).await;
send_work(dst, *lastcast, &up.log);
*lastcast += 1;
}
BlockOp::RepairOp => {
Expand Down Expand Up @@ -9915,7 +9915,7 @@ async fn process_new_io(
req.send_err(CrucibleError::UpstairsInactive);
return;
}
send_work(dst, *lastcast, &up.log).await;
send_work(dst, *lastcast, &up.log);
*lastcast += 1;
}
}
Expand Down Expand Up @@ -10133,7 +10133,7 @@ async fn up_listen(
error!(up.log, "flush send failed:{:?}", e);
// XXX What to do here?
} else {
send_work(&dst, 1, &up.log).await;
send_work(&dst, 1, &up.log);
}
}

Expand Down
Loading