Skip to content

Commit

Permalink
Remove timeouts from OBS API requests
Browse files Browse the repository at this point in the history
Since gitlab-runner-rs 0.0.7 now supports respecting job cancellation
and timeouts from GitLab, so there isn't any reason to add additional
timeouts to the API requests anymore.

The tests do now need to have some timeouts set on them, since the
retries would otherwise go on indefinitely. This slows them down a bit,
so the single test function was refactored into 4 more fine-grained,
that way they can run in parallel (it's a bit cleaner anyway).

Signed-off-by: Ryan Gonzalez <[email protected]>
  • Loading branch information
refi64 authored and sjoerdsimons committed Jan 11, 2024
1 parent 1d9954c commit dc5787d
Show file tree
Hide file tree
Showing 4 changed files with 83 additions and 56 deletions.
4 changes: 2 additions & 2 deletions src/binaries.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use tokio::{fs::File as AsyncFile, io::AsyncSeekExt};
use tokio_util::compat::FuturesAsyncReadCompatExt;
use tracing::{info_span, instrument, Instrument};

use crate::retry::{retry_large_request, retry_request};
use crate::retry::retry_request;

#[instrument(skip(client))]
pub async fn download_binaries(
Expand All @@ -28,7 +28,7 @@ pub async fn download_binaries(
let mut binaries = HashMap::new();

for binary in binary_list.binaries {
let mut dest = retry_large_request(|| {
let mut dest = retry_request(|| {
let binary = binary.clone();
let client = client.clone();
async move {
Expand Down
4 changes: 2 additions & 2 deletions src/monitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use tokio::{
};
use tracing::{debug, instrument};

use crate::retry::{retry_large_request, retry_request};
use crate::retry::retry_request;

#[derive(Debug)]
pub enum PackageCompletion {
Expand Down Expand Up @@ -245,7 +245,7 @@ impl ObsMonitor {
pub async fn download_build_log(&self) -> Result<LogFile> {
const LOG_LEN_TO_CHECK_FOR_MD5: u64 = 2500;

let mut file = retry_large_request(|| async {
let mut file = retry_request(|| async {
let mut file = AsyncFile::from_std(
tempfile::tempfile().wrap_err("Failed to create tempfile to build log")?,
);
Expand Down
123 changes: 77 additions & 46 deletions src/retry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ use open_build_service_api as obs;
use tokio::sync::Mutex;
use tracing::instrument;

const INITIAL_INTERVAL: Duration = Duration::from_millis(300);

fn is_client_error(err: &(dyn std::error::Error + 'static)) -> bool {
err.downcast_ref::<reqwest::Error>()
.and_then(|e| e.status())
Expand All @@ -30,7 +32,8 @@ fn is_caused_by_client_error(report: &Report) -> bool {
})
}

async fn retry_request_impl<T, E, Fut, Func>(backoff_limit: Duration, func: Func) -> Result<T>
#[instrument(skip(func))]
pub async fn retry_request<T, E, Fut, Func>(func: Func) -> Result<T>
where
Fut: Future<Output = Result<T, E>>,
Func: FnMut() -> Fut,
Expand All @@ -39,7 +42,8 @@ where
let func = Arc::new(Mutex::new(func));
backoff::future::retry(
ExponentialBackoff {
max_elapsed_time: Some(backoff_limit),
max_elapsed_time: None,
initial_interval: INITIAL_INTERVAL,
..Default::default()
},
move || {
Expand All @@ -60,32 +64,11 @@ where
.await
}

#[instrument(skip(func))]
pub async fn retry_request<T, E, Fut, Func>(func: Func) -> Result<T>
where
Fut: Future<Output = Result<T, E>>,
Func: FnMut() -> Fut,
E: Into<Report>,
{
const BACKOFF_LIMIT: Duration = Duration::from_secs(10 * 60); // 10 minutes
retry_request_impl(BACKOFF_LIMIT, func).await
}

#[instrument(skip(func))]
pub async fn retry_large_request<T, E, Fut, Func>(func: Func) -> Result<T>
where
Fut: Future<Output = Result<T, E>>,
Func: FnMut() -> Fut,
E: Into<Report>,
{
const BACKOFF_LIMIT: Duration = Duration::from_secs(60 * 60); // 1 hour
retry_request_impl(BACKOFF_LIMIT, func).await
}

#[cfg(test)]
mod tests {
use claim::*;
use open_build_service_api as obs;
use rstest::*;
use wiremock::{
matchers::{method, path_regex},
Mock, MockServer, ResponseTemplate,
Expand All @@ -95,10 +78,12 @@ mod tests {

use super::*;

const LIMIT: Duration = Duration::from_secs(1);
fn wrap_in_io_error(err: obs::Error) -> std::io::Error {
std::io::Error::new(std::io::ErrorKind::Other, err)
}

#[tokio::test]
async fn test_retry_on_non_client_errors() {
#[fixture]
async fn server() -> MockServer {
let server = MockServer::start().await;

Mock::given(method("GET"))
Expand All @@ -116,6 +101,13 @@ mod tests {
.mount(&server)
.await;

server
}

#[rstest]
#[tokio::test]
async fn test_retry_on_non_client_errors(server: impl Future<Output = MockServer>) {
let server = server.await;
let client = obs::Client::new(
server.uri().parse().unwrap(),
TEST_USER.to_owned(),
Expand All @@ -124,50 +116,89 @@ mod tests {

let mut attempts = 0;
assert_err!(
retry_request_impl(LIMIT, || {
attempts += 1;
async { client.project("500".to_owned()).meta().await }
})
tokio::time::timeout(
Duration::from_millis(2000),
retry_request(|| {
attempts += 1;
async { client.project("500".to_owned()).meta().await }
})
)
.await
);
assert_gt!(attempts, 1);
}

#[rstest]
#[tokio::test]
async fn test_retry_on_nested_non_client_errors(server: impl Future<Output = MockServer>) {
let server = server.await;
let client = obs::Client::new(
server.uri().parse().unwrap(),
TEST_USER.to_owned(),
TEST_PASS.to_owned(),
);

let mut attempts = 0;
assert_err!(
retry_request_impl(LIMIT, || {
attempts += 1;
async {
client
.project("500".to_owned())
.meta()
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
}
})
tokio::time::timeout(
Duration::from_millis(2000),
retry_request(|| {
attempts += 1;
async {
client
.project("500".to_owned())
.meta()
.await
.map_err(wrap_in_io_error)
}
})
)
.await
);
assert_gt!(attempts, 1);
}

attempts = 0;
#[rstest]
#[tokio::test]
async fn test_no_retry_on_client_errors(server: impl Future<Output = MockServer>) {
let server = server.await;
let client = obs::Client::new(
server.uri().parse().unwrap(),
TEST_USER.to_owned(),
TEST_PASS.to_owned(),
);

let mut attempts = 0;
assert_err!(
retry_request_impl(LIMIT, || {
retry_request(|| {
attempts += 1;
async { client.project("403".to_owned()).meta().await }
})
.await
);
assert_eq!(attempts, 1);
}

attempts = 0;
#[rstest]
#[tokio::test]
async fn test_no_retry_on_nested_client_errors(server: impl Future<Output = MockServer>) {
let server = server.await;
let client = obs::Client::new(
server.uri().parse().unwrap(),
TEST_USER.to_owned(),
TEST_PASS.to_owned(),
);

let mut attempts = 0;
assert_err!(
retry_request_impl(LIMIT, || {
retry_request(|| {
attempts += 1;
async {
client
.project("403".to_owned())
.meta()
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
.map_err(wrap_in_io_error)
}
})
.await
Expand Down
8 changes: 2 additions & 6 deletions src/upload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,7 @@ use md5::{Digest, Md5};
use open_build_service_api as obs;
use tracing::{debug, info_span, instrument, trace, Instrument};

use crate::{
artifacts::ArtifactDirectory,
dsc::Dsc,
retry::{retry_large_request, retry_request},
};
use crate::{artifacts::ArtifactDirectory, dsc::Dsc, retry::retry_request};

type Md5String = String;

Expand Down Expand Up @@ -234,7 +230,7 @@ impl ObsDscUploader {
debug!("Uploading file");
let file = artifacts.get_file(root.join(filename).as_str()).await?;

retry_large_request(|| {
retry_request(|| {
file.try_clone().then(|file| async {
let file = file.wrap_err("Failed to clone file")?;
self.client
Expand Down

0 comments on commit dc5787d

Please sign in to comment.