Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement some much-needed improvements for the UHS load-generators #280

Draft
wants to merge 4 commits into
base: trunk
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 79 additions & 0 deletions scripts/get-tx-lifecycle.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
#!/usr/bin/env bash

LOGDIR='.'
TXID=

IFS='' read -r -d '' usage <<'EOF'
Usage: %s [options]

Options:
-h, --help print this help and exit
-d, --log-dir=PATH PATH where the log-files are located
(defaults to '.' if not specified)
-t, --tx-id=ID ID of the transaction to trace
EOF

_help=
if [[ $# -eq 0 ]]; then
_help=1
fi

_err=0
while [[ $# -gt 0 ]]; do
optarg=
shft_cnt=1
if [[ "$1" =~ [=] ]]; then
optarg="${1#*=}"
elif [[ "$1" =~ ^-- && $# -gt 1 && ! "$2" =~ ^- ]]; then
optarg="$2"
shft_cnt=2
elif [[ "$1" =~ ^-[^-] && $# -gt 1 && ! "$2" =~ ^- ]]; then
optarg="$2"
shft_cnt=2
elif [[ "$1" =~ ^-[^-] ]]; then
optarg="${1/??/}"
fi

case "$1" in
-d*|--log-dir*) LOGDIR="${optarg:-$LOGDIR}"; shift "$shft_cnt";;
-t*|--tx-id*)
if [[ ! "$optarg" ]]; then
printf '`--tx-id` requires an argument\n'
_help=1; _err=1;
break
else
TXID="$optarg"
shift "$shft_cnt"
fi;;
-h|--help) _help=1; shift "$shft_cnt";;
*)
printf 'Unrecognized option: %s\n' "$1"
_help=1; _err=1;
break;;
esac
done

if [[ -n "$_help" ]]; then
printf "$usage" "$(basename $0)"
exit "$_err"
fi

pushd "$LOGDIR" &>/dev/null

all_lines=$(grep -n "$TXID.*at" *.{log,txt})
sorted_lines=$(printf "%s\n" "$all_lines" | awk -F ' at ' '{ print $2, $0 }' | sort | sed 's/^[^ ]* //')

printf "%s\n" "$sorted_lines"

fst=$(printf "%s\n" "$sorted_lines" | head -n 1)
lst=$(printf "%s\n" "$sorted_lines" | tail -n 1)
if [[ $(printf "%s\n" "$fst" | cut -d':' -f1) != $(printf "%s\n" "$lst" | cut -d':' -f1) ]]; then
printf 'First and last message for %s are in different logs!\n' "$TXID"
fi

start=$(printf "%s\n" "$fst" | awk -F ' at ' '{ print $2 }')
end=$(printf "%s\n" "$lst" | awk -F ' at ' '{ print $2 }')

printf "total elapsed time (assuming all clocks synchronized): %d ticks\n" "$(( end - start ))"

popd &>/dev/null
42 changes: 35 additions & 7 deletions scripts/native-system-benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ TL=$(git rev-parse --show-toplevel)
RT="${TL:-$CWD}"
BLD="$RT"/build
SEEDDIR="$BLD"/preseeds
TESTDIR="$BLD"/test-$(date +"%s")
TESTID=$(date +"%s")
TESTDIR="$BLD"/test-"$TESTID"

IFS='' read -r -d '' usage <<'EOF'
Usage: %s [options]
Expand All @@ -24,6 +25,8 @@ Options:
-c, --config=PATH use PATH as the test configuration
-s, --samples=TIME run test for TIME seconds (defaults to 30)
(or indefinitely if set to inf, or infinity)
-t, --test-dir=PATH use PATH for conducting the test instead of
generating one based on the current time

Cleanup:
--clean delete all previous test and preseed artifacts
Expand Down Expand Up @@ -51,6 +54,11 @@ fi

_err=0
while [[ $# -gt 0 ]]; do
if [[ "$1" = '--' ]]; then
shift 1
break
fi

optarg=
shft_cnt=1
if [[ "$1" =~ [=] ]]; then
Expand Down Expand Up @@ -86,6 +94,13 @@ while [[ $# -gt 0 ]]; do
*) DBG="$optarg";;
esac
shift "$shft_cnt";;
-t*|--test-dir*)
if [[ "${optarg:0:1}" == '/' ]]; then
TESTDIR="$optarg"
else
TESTDIR="$CWD/$optarg"
fi
shift "$shft_cnt";;
-c*|--config*)
if [[ "$optarg" = /* ]]; then
ORIG_CFG="${optarg}"
Expand All @@ -111,7 +126,7 @@ if [[ -n "$_help" ]]; then
exit "$_err"
fi

if [[ -z "$ORIG_CFG" ]]; then
if [[ -z "$ORIG_CFG" && "$TESTDIR" = "$BLD"/test-"$TESTID" ]]; then
printf '%s\n' 'No config specified; exiting'
exit 0
fi
Expand All @@ -136,7 +151,20 @@ BEGIN {
EOF

CFG="$TESTDIR"/config
awk "$normalize" "$ORIG_CFG" > "$CFG"
if [[ -f "$CFG" ]]; then
printf 'WARNING: Running from a pre-existing test directory\n'
if grep -q 'seed' "$CFG"; then
# loadgens don't currently serialize their wallet state to-disk
# so effectively all funds are orphaned in this case; disabling
# seeding means the new loadgens will mint and be able to send
# new transactions
printf 'Disabling seeding in-favor of minting\n'
grep -v 'seed' "$CFG" > "$TESTDIR"/config."$TESTID"
CFG="$TESTDIR"/config."$TESTID"
fi
else
awk "$normalize" "$ORIG_CFG" > "$CFG"
fi

twophase=$(grep -q '2pc=1' "$CFG" && printf '1\n' || printf '0\n')
arch=
Expand All @@ -151,7 +179,7 @@ on_int() {
printf 'Interrupting all components\n'
trap '' SIGINT # avoid interrupting ourself
for i in $PIDS; do # intentionally unquoted
if [[ -n "RECORD" ]]; then
if [[ -n "$RECORD" ]]; then
kill -SIGINT -- "-$i"
else
kill -SIGINT -- "$i"
Expand Down Expand Up @@ -192,7 +220,7 @@ on_int() {

printf 'Terminating any remaining processes\n'
for i in $PIDS; do # intentionally unquoted
if [[ -n "RECORD" ]]; then
if [[ -n "$RECORD" ]]; then
kill -SIGTERM -- "-$i"
else
kill -SIGTERM -- "$i"
Expand Down Expand Up @@ -322,7 +350,7 @@ launch() {
"$RT"/scripts/wait-for-it.sh -q -t 5 -h localhost -p "$ep"
done
printf 'Launched logical %s %d, replica %d [PID: %d]\n' "$1" "$id" "$node" "$PID"
if [[ -n "RECORD" ]]; then
if [[ -n "$RECORD" ]]; then
PIDS="$PIDS $(getpgid $PID)"
else
PIDS="$PIDS $PID"
Expand All @@ -335,7 +363,7 @@ launch() {
"$RT"/scripts/wait-for-it.sh -q -t 5 -h localhost -p "$ep"
done
printf 'Launched %s %d [PID: %d]\n' "$1" "$id" "$PID"
if [[ -n "RECORD" ]]; then
if [[ -n "$RECORD" ]]; then
PIDS="$PIDS $(getpgid $PID)"
else
PIDS="$PIDS $PID"
Expand Down
12 changes: 12 additions & 0 deletions src/uhs/twophase/coordinator/controller.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -689,6 +689,12 @@ namespace cbdc::coordinator {
return false;
}

auto recv_time = std::chrono::high_resolution_clock::now()
.time_since_epoch()
.count();
m_logger->trace("Received", to_string(tx.m_id), "at",
recv_time);

if(!transaction::validation::check_attestations(
tx,
m_opts.m_sentinel_public_keys,
Expand Down Expand Up @@ -719,6 +725,12 @@ namespace cbdc::coordinator {
m_current_txs->emplace(
tx.m_id,
std::make_pair(std::move(result_callback), idx));

auto add_time = std::chrono::high_resolution_clock::now()
.time_since_epoch()
.count();
m_logger->trace("Added", to_string(tx.m_id), "to batch at",
add_time);
return true;
}();
if(added) {
Expand Down
10 changes: 10 additions & 0 deletions src/uhs/twophase/locking_shard/locking_shard.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,11 @@ namespace cbdc::locking_shard {

auto locking_shard::check_and_lock_tx(const tx& t) -> bool {
bool success{true};
auto recv_time = std::chrono::high_resolution_clock::now()
.time_since_epoch()
.count();
m_logger->trace("Began check-and-lock of",
to_string(t.m_tx.m_id), "at", recv_time);
if(!transaction::validation::check_attestations(
t.m_tx,
m_opts.m_sentinel_public_keys,
Expand All @@ -129,6 +134,11 @@ namespace cbdc::locking_shard {
}
}
}
auto resp_time = std::chrono::high_resolution_clock::now()
.time_since_epoch()
.count();
m_logger->trace("Completed check-and-lock of",
to_string(t.m_tx.m_id), "at", resp_time);
return success;
}

Expand Down
29 changes: 27 additions & 2 deletions src/uhs/twophase/sentinel_2pc/controller.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,14 @@ namespace cbdc::sentinel_2pc {
auto controller::execute_transaction(
transaction::full_tx tx,
execute_result_callback_type result_callback) -> bool {
auto tx_id = transaction::tx_id(tx);
auto recv_time = std::chrono::high_resolution_clock::now()
.time_since_epoch()
.count();
m_logger->trace("Began execution of", to_string(tx_id), "at",
recv_time);
const auto validation_err = transaction::validation::check_tx(tx);
if(validation_err.has_value()) {
auto tx_id = transaction::tx_id(tx);
m_logger->debug(
"Rejected (",
transaction::validation::to_string(validation_err.value()),
Expand All @@ -119,6 +124,13 @@ namespace cbdc::sentinel_2pc {
compact_tx.m_attestations.insert(attestation);
}

auto fwd_time = std::chrono::high_resolution_clock::now()
.time_since_epoch()
.count();

m_logger->trace("Accumulating attestations for", to_string(tx_id), "at",
fwd_time);

gather_attestations(tx, std::move(result_callback), compact_tx, {});

return true;
Expand All @@ -143,13 +155,23 @@ namespace cbdc::sentinel_2pc {
auto controller::validate_transaction(
transaction::full_tx tx,
validate_result_callback_type result_callback) -> bool {
auto recv_time = std::chrono::high_resolution_clock::now()
.time_since_epoch()
.count();
m_logger->trace("Began validation of",
to_string(transaction::tx_id(tx)), "at", recv_time);
const auto validation_err = transaction::validation::check_tx(tx);
if(validation_err.has_value()) {
result_callback(std::nullopt);
return true;
}
auto compact_tx = cbdc::transaction::compact_tx(tx);
auto attestation = compact_tx.sign(m_secp.get(), m_privkey);
auto resp_time = std::chrono::high_resolution_clock::now()
.time_since_epoch()
.count();
m_logger->trace("Validated", to_string(transaction::tx_id(tx)), "at",
resp_time);
result_callback(std::move(attestation));
return true;
}
Expand Down Expand Up @@ -201,7 +223,10 @@ namespace cbdc::sentinel_2pc {
return;
}

m_logger->debug("Accepted", to_string(ctx.m_id));
auto acc_time = std::chrono::high_resolution_clock::now()
.time_since_epoch()
.count();
m_logger->debug("Accepted", to_string(ctx.m_id), "at", acc_time);

send_compact_tx(ctx, std::move(result_callback));
}
Expand Down
24 changes: 24 additions & 0 deletions src/util/common/config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,13 @@ namespace cbdc::config {
return ss.str();
}

auto get_loadgen_loglevel_key(size_t loadgen_id) -> std::string {
std::stringstream ss;
ss << loadgen_prefix << loadgen_id << config_separator
<< loglevel_postfix;
return ss.str();
}

auto get_sentinel_private_key_key(size_t sentinel_id) -> std::string {
auto ss = std::stringstream();
get_sentinel_key_prefix(ss, sentinel_id);
Expand Down Expand Up @@ -614,6 +621,23 @@ namespace cbdc::config {

opts.m_loadgen_count
= cfg.get_ulong(loadgen_count_key).value_or(opts.m_loadgen_count);
opts.m_loadgen_tps_target = cfg.get_ulong(tps_target_key)
.value_or(opts.m_loadgen_tps_target);
opts.m_loadgen_tps_step_time
= cfg.get_decimal(tps_steptime_key)
.value_or(opts.m_loadgen_tps_step_time);
opts.m_loadgen_tps_step_size
= cfg.get_decimal(tps_stepsize_key)
.value_or(opts.m_loadgen_tps_step_size);
opts.m_loadgen_tps_initial = cfg.get_decimal(tps_initial_key)
.value_or(opts.m_loadgen_tps_initial);
for(size_t i{0}; i < opts.m_loadgen_count; ++i) {
const auto loadgen_loglevel_key = get_loadgen_loglevel_key(i);
const auto loadgen_loglevel
= cfg.get_loglevel(loadgen_loglevel_key)
.value_or(defaults::log_level);
opts.m_loadgen_loglevels.push_back(loadgen_loglevel);
}
}

auto read_options(const std::string& config_file)
Expand Down
17 changes: 17 additions & 0 deletions src/util/common/config.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,11 @@ namespace cbdc::config {
static constexpr auto output_count_key = "loadgen_sendtx_output_count";
static constexpr auto invalid_rate_key = "loadgen_invalid_tx_rate";
static constexpr auto fixed_tx_rate_key = "loadgen_fixed_tx_rate";
static constexpr auto loadgen_prefix = "loadgen";
static constexpr auto tps_target_key = "loadgen_tps_target";
static constexpr auto tps_steptime_key = "loadgen_tps_step_time";
static constexpr auto tps_stepsize_key = "loadgen_tps_step_percentage";
static constexpr auto tps_initial_key = "loadgen_tps_step_start";
static constexpr auto archiver_count_key = "archiver_count";
static constexpr auto watchtower_count_key = "watchtower_count";
static constexpr auto watchtower_prefix = "watchtower";
Expand Down Expand Up @@ -250,6 +255,18 @@ namespace cbdc::config {
/// Number of load generators over which to split pre-seeded UTXOs.
size_t m_loadgen_count{0};

/// List of loadgen log levels, ordered by loadgen ID.
std::vector<logging::log_level> m_loadgen_loglevels;

/// Maximum Tx/s the loadgens should produce
size_t m_loadgen_tps_target{0};
/// Percent of target to send at test-start
double m_loadgen_tps_initial{0};
/// Percent (of target-initial) to increase on each step
double m_loadgen_tps_step_size{0};
/// Time (fractional seconds) to wait before the next step up
double m_loadgen_tps_step_time{0};

/// Private keys for sentinels.
std::unordered_map<size_t, privkey_t> m_sentinel_private_keys;

Expand Down
Loading
Loading