From b108ddf63ba32f94fc85f09a4c8ab3001b539398 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Kr=C3=BCger?= Date: Fri, 25 Aug 2023 14:29:12 +0200 Subject: [PATCH] chore: Implement latency-simulating benchmarks --- Cargo.lock | 1 + car-mirror-benches/Cargo.toml | 5 + .../benches/simulated_latency.rs | 176 ++++++++++++++++++ 3 files changed, 182 insertions(+) create mode 100644 car-mirror-benches/benches/simulated_latency.rs diff --git a/Cargo.lock b/Cargo.lock index 32f2802..129e063 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -407,6 +407,7 @@ dependencies = [ "car-mirror", "criterion", "libipld", + "serde_ipld_dagcbor", "wnfs-common", ] diff --git a/car-mirror-benches/Cargo.toml b/car-mirror-benches/Cargo.toml index ba1c467..36bb027 100644 --- a/car-mirror-benches/Cargo.toml +++ b/car-mirror-benches/Cargo.toml @@ -12,6 +12,7 @@ async-trait = "0.1" bytes = "1.4.0" car-mirror = { path = "../car-mirror", version = "0.1", features = ["test_utils"] } libipld = "0.16.0" +serde_ipld_dagcbor = "0.4.0" wnfs-common = "0.1.23" [dev-dependencies] @@ -24,3 +25,7 @@ harness = false [[bench]] name = "artificially_slow_blockstore" harness = false + +[[bench]] +name = "simulated_latency" +harness = false diff --git a/car-mirror-benches/benches/simulated_latency.rs b/car-mirror-benches/benches/simulated_latency.rs new file mode 100644 index 0000000..a055fdd --- /dev/null +++ b/car-mirror-benches/benches/simulated_latency.rs @@ -0,0 +1,176 @@ +use car_mirror::{ + common::Config, + pull, push, + test_utils::{arb_ipld_dag, links_to_padded_ipld, setup_blockstore}, +}; +use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; +use std::{ops::Range, time::Duration}; +use wnfs_common::MemoryBlockStore; + +// We artificially slow down sending & receiving data. +// This is the *one way* latency. +const LATENCY: Duration = Duration::from_millis(150); +// We also simulate limited bandwidth. +// We assume that upload & download are asymmetrical. +// Taking statistics from here: https://www.statista.com/statistics/896779/average-mobile-fixed-broadband-download-upload-speeds/ +// and using mobile numbers, so 10.33 Mbps for upload and 42.07 Mbps for download. +// This gives us ~1291250 bytes per second upload and ~5258750 bytes per second download. +// Inverting this gives us ~774 nanoseconds per byte upload and ~190 nanoseconds per byte download. +const UPLOAD_DELAY_PER_BYTE: Duration = Duration::from_nanos(774); +const DOWNLOAD_DELAY_PER_BYTE: Duration = Duration::from_nanos(227); + +async fn simulate_upload_latency(request_size: usize) { + let delay = LATENCY + UPLOAD_DELAY_PER_BYTE * request_size as u32; + async_std::task::sleep(delay).await; +} + +async fn simulate_download_latency(response_size: usize) { + let delay = LATENCY + DOWNLOAD_DELAY_PER_BYTE * response_size as u32; + async_std::task::sleep(delay).await; +} + +pub fn pull_with_simulated_latency_10kb_blocks(c: &mut Criterion) { + // Very highly connected + // 10KiB random data added + // ~61 blocks on average + pull_with_simulated_latency(c, 60..64, 0.9, 10 * 1024); +} + +pub fn pull_with_simulated_latency_1kb_blocks(c: &mut Criterion) { + // Very highly connected + // 1KiB random data added + // ~625 blocks on average + pull_with_simulated_latency(c, 600..640, 0.9, 1024); +} + +pub fn pull_with_simulated_latency( + c: &mut Criterion, + dag_size: impl Into>, + edge_probability: f64, + block_padding: usize, +) { + let mut rvg = car_mirror::test_utils::Rvg::deterministic(); + + let dag_size = dag_size.into(); + + let bench_name = format!( + "pull with simulated latency, {block_padding} byte blocks, ~{}..{} blocks", + dag_size.start, dag_size.end + ); + + c.bench_function(&bench_name, |b| { + b.iter_batched( + || { + let (blocks, root) = rvg.sample(&arb_ipld_dag( + dag_size.clone(), + edge_probability, + links_to_padded_ipld(block_padding), + )); + let store = async_std::task::block_on(setup_blockstore(blocks)).unwrap(); + (store, root) + }, + |(ref server_store, root)| { + let client_store = &MemoryBlockStore::new(); + let config = &Config::default(); + + // Simulate a multi-round protocol run in-memory + async_std::task::block_on(async move { + let mut request = pull::request(root, None, config, client_store).await?; + loop { + let request_bytes = serde_ipld_dagcbor::to_vec(&request)?.len(); + simulate_upload_latency(request_bytes).await; + + let response = pull::response(root, request, config, server_store).await?; + simulate_download_latency(response.bytes.len()).await; + + request = pull::request(root, Some(response), config, client_store).await?; + + if request.indicates_finished() { + break; + } + } + + Ok::<(), anyhow::Error>(()) + }) + .unwrap(); + }, + BatchSize::LargeInput, + ) + }); +} + +pub fn push_with_simulated_latency_10kb_blocks(c: &mut Criterion) { + // Very highly connected + // 10KiB random data added + // ~61 blocks on average + push_with_simulated_latency(c, 60..64, 0.9, 10 * 1024); +} + +pub fn push_with_simulated_latency_1kb_blocks(c: &mut Criterion) { + // Very highly connected + // 1KiB random data added + // ~625 blocks on average + push_with_simulated_latency(c, 600..640, 0.9, 1024); +} + +pub fn push_with_simulated_latency( + c: &mut Criterion, + dag_size: impl Into>, + edge_probability: f64, + block_padding: usize, +) { + let mut rvg = car_mirror::test_utils::Rvg::deterministic(); + + let dag_size = dag_size.into(); + + let bench_name = format!( + "push with simulated latency, {block_padding} byte blocks, ~{}..{} blocks", + dag_size.start, dag_size.end + ); + + c.bench_function(&bench_name, |b| { + b.iter_batched( + || { + let (blocks, root) = rvg.sample(&arb_ipld_dag( + dag_size.clone(), + edge_probability, + links_to_padded_ipld(block_padding), + )); + let store = async_std::task::block_on(setup_blockstore(blocks)).unwrap(); + (store, root) + }, + |(ref client_store, root)| { + let server_store = &MemoryBlockStore::new(); + let config = &Config::default(); + + // Simulate a multi-round protocol run in-memory + async_std::task::block_on(async move { + let mut request = push::request(root, None, config, client_store).await?; + loop { + simulate_upload_latency(request.bytes.len()).await; + + let response = push::response(root, request, config, server_store).await?; + let response_bytes = serde_ipld_dagcbor::to_vec(&response)?.len(); + simulate_download_latency(response_bytes).await; + + if response.indicates_finished() { + break; + } + request = push::request(root, Some(response), config, client_store).await?; + } + + Ok::<(), anyhow::Error>(()) + }) + .unwrap(); + }, + BatchSize::LargeInput, + ) + }); +} + +criterion_group! { + name = benches; + config = Criterion::default().sample_size(10); // Reduced sample size due to ~2s per test + targets = pull_with_simulated_latency_10kb_blocks, pull_with_simulated_latency_1kb_blocks, push_with_simulated_latency_10kb_blocks, push_with_simulated_latency_1kb_blocks +} +criterion_main!(benches);