Skip to content

Commit

Permalink
chore: Implement latency-simulating benchmarks
Browse files Browse the repository at this point in the history
  • Loading branch information
matheus23 committed Aug 25, 2023
1 parent 7e9b79a commit b108ddf
Show file tree
Hide file tree
Showing 3 changed files with 182 additions and 0 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions car-mirror-benches/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ async-trait = "0.1"
bytes = "1.4.0"
car-mirror = { path = "../car-mirror", version = "0.1", features = ["test_utils"] }
libipld = "0.16.0"
serde_ipld_dagcbor = "0.4.0"
wnfs-common = "0.1.23"

[dev-dependencies]
Expand All @@ -24,3 +25,7 @@ harness = false
[[bench]]
name = "artificially_slow_blockstore"
harness = false

[[bench]]
name = "simulated_latency"
harness = false
176 changes: 176 additions & 0 deletions car-mirror-benches/benches/simulated_latency.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,176 @@
use car_mirror::{
common::Config,
pull, push,
test_utils::{arb_ipld_dag, links_to_padded_ipld, setup_blockstore},
};
use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
use std::{ops::Range, time::Duration};
use wnfs_common::MemoryBlockStore;

// We artificially slow down sending & receiving data.
// This is the *one way* latency.
const LATENCY: Duration = Duration::from_millis(150);
// We also simulate limited bandwidth.
// We assume that upload & download are asymmetrical.
// Taking statistics from here: https://www.statista.com/statistics/896779/average-mobile-fixed-broadband-download-upload-speeds/
// and using mobile numbers, so 10.33 Mbps for upload and 42.07 Mbps for download.
// This gives us ~1291250 bytes per second upload and ~5258750 bytes per second download.
// Inverting this gives us ~774 nanoseconds per byte upload and ~190 nanoseconds per byte download.
const UPLOAD_DELAY_PER_BYTE: Duration = Duration::from_nanos(774);
const DOWNLOAD_DELAY_PER_BYTE: Duration = Duration::from_nanos(227);

async fn simulate_upload_latency(request_size: usize) {
let delay = LATENCY + UPLOAD_DELAY_PER_BYTE * request_size as u32;
async_std::task::sleep(delay).await;
}

async fn simulate_download_latency(response_size: usize) {
let delay = LATENCY + DOWNLOAD_DELAY_PER_BYTE * response_size as u32;
async_std::task::sleep(delay).await;
}

pub fn pull_with_simulated_latency_10kb_blocks(c: &mut Criterion) {
// Very highly connected
// 10KiB random data added
// ~61 blocks on average
pull_with_simulated_latency(c, 60..64, 0.9, 10 * 1024);
}

pub fn pull_with_simulated_latency_1kb_blocks(c: &mut Criterion) {
// Very highly connected
// 1KiB random data added
// ~625 blocks on average
pull_with_simulated_latency(c, 600..640, 0.9, 1024);
}

pub fn pull_with_simulated_latency(
c: &mut Criterion,
dag_size: impl Into<Range<u16>>,
edge_probability: f64,
block_padding: usize,
) {
let mut rvg = car_mirror::test_utils::Rvg::deterministic();

let dag_size = dag_size.into();

let bench_name = format!(
"pull with simulated latency, {block_padding} byte blocks, ~{}..{} blocks",
dag_size.start, dag_size.end
);

c.bench_function(&bench_name, |b| {
b.iter_batched(
|| {
let (blocks, root) = rvg.sample(&arb_ipld_dag(
dag_size.clone(),
edge_probability,
links_to_padded_ipld(block_padding),
));
let store = async_std::task::block_on(setup_blockstore(blocks)).unwrap();
(store, root)
},
|(ref server_store, root)| {
let client_store = &MemoryBlockStore::new();
let config = &Config::default();

// Simulate a multi-round protocol run in-memory
async_std::task::block_on(async move {
let mut request = pull::request(root, None, config, client_store).await?;
loop {
let request_bytes = serde_ipld_dagcbor::to_vec(&request)?.len();
simulate_upload_latency(request_bytes).await;

let response = pull::response(root, request, config, server_store).await?;
simulate_download_latency(response.bytes.len()).await;

request = pull::request(root, Some(response), config, client_store).await?;

if request.indicates_finished() {
break;
}
}

Ok::<(), anyhow::Error>(())
})
.unwrap();
},
BatchSize::LargeInput,
)
});
}

pub fn push_with_simulated_latency_10kb_blocks(c: &mut Criterion) {
// Very highly connected
// 10KiB random data added
// ~61 blocks on average
push_with_simulated_latency(c, 60..64, 0.9, 10 * 1024);
}

pub fn push_with_simulated_latency_1kb_blocks(c: &mut Criterion) {
// Very highly connected
// 1KiB random data added
// ~625 blocks on average
push_with_simulated_latency(c, 600..640, 0.9, 1024);
}

pub fn push_with_simulated_latency(
c: &mut Criterion,
dag_size: impl Into<Range<u16>>,
edge_probability: f64,
block_padding: usize,
) {
let mut rvg = car_mirror::test_utils::Rvg::deterministic();

let dag_size = dag_size.into();

let bench_name = format!(
"push with simulated latency, {block_padding} byte blocks, ~{}..{} blocks",
dag_size.start, dag_size.end
);

c.bench_function(&bench_name, |b| {
b.iter_batched(
|| {
let (blocks, root) = rvg.sample(&arb_ipld_dag(
dag_size.clone(),
edge_probability,
links_to_padded_ipld(block_padding),
));
let store = async_std::task::block_on(setup_blockstore(blocks)).unwrap();
(store, root)
},
|(ref client_store, root)| {
let server_store = &MemoryBlockStore::new();
let config = &Config::default();

// Simulate a multi-round protocol run in-memory
async_std::task::block_on(async move {
let mut request = push::request(root, None, config, client_store).await?;
loop {
simulate_upload_latency(request.bytes.len()).await;

let response = push::response(root, request, config, server_store).await?;
let response_bytes = serde_ipld_dagcbor::to_vec(&response)?.len();
simulate_download_latency(response_bytes).await;

if response.indicates_finished() {
break;
}
request = push::request(root, Some(response), config, client_store).await?;
}

Ok::<(), anyhow::Error>(())
})
.unwrap();
},
BatchSize::LargeInput,
)
});
}

criterion_group! {
name = benches;
config = Criterion::default().sample_size(10); // Reduced sample size due to ~2s per test
targets = pull_with_simulated_latency_10kb_blocks, pull_with_simulated_latency_1kb_blocks, push_with_simulated_latency_10kb_blocks, push_with_simulated_latency_1kb_blocks
}
criterion_main!(benches);

0 comments on commit b108ddf

Please sign in to comment.