Skip to content

Commit

Permalink
Don't run benchmarks with debug, plus minor refactoring (#1104)
Browse files Browse the repository at this point in the history
## Description of change

Remove the `--debug` flag when mounting S3 for the benchmarks.

This is now available via an S3_DEBUG environment variable which, when
set, will add `--debug` back again.

Also, within `fs_bench.sh`, merge the read and write benchmark methods
into a single one, which is paramterized, since they were almost
identical. This avoids having to make the change described above in two
places and simplifies the code going forwards.

## Does this change impact existing behavior?

This changes the benchmarks to run without --debug to the mount command,
which creates a discontinuity in benchmark results, and may improve them
(though there's no actual performance improvement here).

## Does this change need a changelog entry in any of the crates?

No.

---

By submitting this pull request, I confirm that my contribution is made
under the terms of the Apache 2.0 license and I agree to the terms of
the [Developer Certificate of Origin
(DCO)](https://developercertificate.org/).

---------

Signed-off-by: Andrew Peace <[email protected]>
  • Loading branch information
adpeace authored Nov 6, 2024
1 parent 53197c9 commit 50433e6
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 74 deletions.
103 changes: 31 additions & 72 deletions mountpoint-s3/scripts/fs_bench.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,10 @@ if [[ -n "${S3_ENDPOINT_URL}" ]]; then
optional_args+="--endpoint-url=${S3_ENDPOINT_URL}"
fi

if [[ -n "${S3_DEBUG}" ]]; then
optional_args+=" --debug"
fi

base_dir=$(dirname "$0")
project_dir="${base_dir}/../.."
cd ${project_dir}
Expand Down Expand Up @@ -104,8 +108,20 @@ should_run_job() {
fi
}

read_benchmark () {
jobs_dir=mountpoint-s3/scripts/fio/read
# Run all benchmarks within a category. Fio job definitions should exist under a directory
# with the category name, passed as first argument, inside mountpoint-s3/scripts/fio.
#
# Params:
# $1: benchmark category.
run_benchmarks() {
category=$1
jobs_dir=mountpoint-s3/scripts/fio/$category

if [ $category == "read" ]; then
part_size="--part-size=16777216"
else
part_size=""
fi

for job_file in "${jobs_dir}"/*.fio; do

Expand All @@ -122,7 +138,7 @@ read_benchmark () {

# cleanup mount directory and log directory
cleanup() {
echo "read_benchmark:cleanup"
echo "${category}_benchmark:cleanup"
# unmount file system only if it is mounted
! mountpoint -q ${mount_dir} || sudo umount ${mount_dir}
rm -rf ${mount_dir}
Expand All @@ -139,11 +155,10 @@ read_benchmark () {
set +e
cargo run --quiet --release -- \
${S3_BUCKET_NAME} ${mount_dir} \
--debug \
--allow-delete \
--log-directory=${log_dir} \
--prefix=${S3_BUCKET_TEST_PREFIX} \
--part-size=16777216 \
$part_size \
${optional_args}
mount_status=$?
set -e
Expand All @@ -153,86 +168,30 @@ read_benchmark () {
fi

# set bench file
bench_file=${S3_BUCKET_BENCH_FILE}
# run against small file if the job file ends with small.fio
if [[ $job_file == *small.fio ]]; then
bench_file=${S3_BUCKET_SMALL_BENCH_FILE}
fi

# run the benchmark
run_fio_job $job_file $bench_file $mount_dir $log_dir

# collect resource utilization metrics (peak memory usage)
cargo run --bin mount-s3-log-analyzer ${log_dir} ${results_dir}/${job_name}_peak_mem.json ${job_name}

cleanup

done
}

write_benchmark () {
jobs_dir=mountpoint-s3/scripts/fio/write

for job_file in "${jobs_dir}"/*.fio; do

if ! should_run_job "${job_file}"; then
echo "Skipping job ${job_file} because it does not match ${S3_JOB_NAME_FILTER}"
continue
fi

job_name=$(basename "${job_file}")
job_name="${job_name%.*}"
log_dir=logs/${job_name}


# cleanup mount directory and log directory
cleanup() {
echo "write_benchmark:cleanup"
# unmount file system only if it is mounted
! mountpoint -q ${mount_dir} || sudo umount ${mount_dir}
rm -rf ${mount_dir}
rm -rf ${log_dir}
}

# trap cleanup on exit
trap 'cleanup' EXIT

rm -rf ${log_dir}
mkdir -p ${log_dir}

# mount file system
mount_dir=$(mktemp -d /tmp/fio-XXXXXXXXXXXX)
set +e
cargo run --quiet --release -- \
${S3_BUCKET_NAME} ${mount_dir} \
--debug \
--allow-delete \
--log-directory=${log_dir} \
--prefix=${S3_BUCKET_TEST_PREFIX} \
${optional_args}
mount_status=$?
set -e
if [ $mount_status -ne 0 ]; then
echo "Failed to mount file system"
exit 1
if [[ $category == "write" ]]; then
bench_file=${job_name}_${RANDOM}.dat
else
bench_file=${S3_BUCKET_BENCH_FILE}
# run against small file if the job file ends with small.fio
if [[ $job_file == *small.fio ]]; then
bench_file=${S3_BUCKET_SMALL_BENCH_FILE}
fi
fi

# set bench file
bench_file=${job_name}_${RANDOM}.dat

# run the benchmark
run_fio_job $job_file $bench_file $mount_dir $log_dir

# collect resource utilization metrics (peak memory usage)
cargo run --bin mount-s3-log-analyzer ${log_dir} ${results_dir}/${job_name}_peak_mem.json ${job_name}

cleanup
trap - EXIT

done
}

read_benchmark
write_benchmark
run_benchmarks read
run_benchmarks write

# combine all bench results into one json file
echo "Throughput:"
Expand Down
5 changes: 4 additions & 1 deletion mountpoint-s3/scripts/fs_cache_bench.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,10 @@ if [[ -n "${S3_ENDPOINT_URL}" ]]; then
optional_args+="--endpoint-url=${S3_ENDPOINT_URL}"
fi

if [[ -n "${S3_DEBUG}" ]]; then
optional_args+=" --debug"
fi

base_dir=$(dirname "$0")
project_dir="${base_dir}/../.."
cd ${project_dir}
Expand Down Expand Up @@ -164,7 +168,6 @@ cache_benchmark () {
set +e
cargo run --quiet --release -- \
${S3_BUCKET_NAME} ${mount_dir} \
--debug \
--allow-delete \
--cache=${cache_dir} \
--log-directory=${log_dir} \
Expand Down
5 changes: 4 additions & 1 deletion mountpoint-s3/scripts/fs_latency_bench.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ if [[ -n "${S3_ENDPOINT_URL}" ]]; then
optional_args+="--endpoint-url=${S3_ENDPOINT_URL}"
fi

if [[ -n "${S3_DEBUG}" ]]; then
optional_args+=" --debug"
fi

base_dir=$(dirname "$0")
project_dir="${base_dir}/../.."
cd ${project_dir}
Expand Down Expand Up @@ -136,7 +140,6 @@ for job_file in "${jobs_dir}"/*.fio; do

# mount file system
cargo run --release ${S3_BUCKET_NAME} ${mount_dir} \
--debug \
--allow-delete \
--log-directory=$log_dir \
--prefix=${S3_BUCKET_TEST_PREFIX} \
Expand Down

0 comments on commit 50433e6

Please sign in to comment.