Skip to content

Commit

Permalink
mmtests.sh: moving results processing into separate functions
Browse files Browse the repository at this point in the history
'run_test' func reworked to do only ops required to launch benchmark.

New func 'extract_json' extracts results in form of JSON, one per benchmark.

This JSON are missing important details about test run - just
results. So 'collect_details' func was added to collect meta-data about
benchmark run and env. Func 'collect_results' builds final JSON file with
all necessary info for further processing.

This approach allows to modify functions individually and separate
functionality. Moreover parsing implemented in this way allows to remove
'MMTESTS_TYPE_NAME' parameter and simplify mmtests use and test case LAVA
specifications.

Removed alterport support and DBENCH-specifics.

Signed-off-by: Konstantin Belov <[email protected]>
  • Loading branch information
Konstantin Belov committed Oct 13, 2023
1 parent 85c3388 commit 5ed0be3
Showing 1 changed file with 87 additions and 65 deletions.
152 changes: 87 additions & 65 deletions automated/linux/mmtests/mmtests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,20 +12,15 @@ TEST_GIT_URL=https://github.com/gormanm/mmtests
TEST_DIR=${TEST_DIR:-"$(pwd)/${TEST_PROGRAM}"}
SKIP_INSTALL=${SKIP_INSTALL:-"false"}
MMTESTS_MAX_RETRIES=${MMTESTS_MAX_RETRIES:-"3"}
MMTESTS_TYPE_NAME=
MMTESTS_CONFIG_FILE=
MMTEST_ITERATIONS=${MMTEST_ITERATIONS:-"10"}
MMTEST_EXTR="./bin/extract-mmtests.pl"

# DBENCH specific variables
declare -A altreport_mappings=( ["dbench4"]="tput latency opslatency")
declare -A env_variable_mappings=( ["dbench4"]="DBENCH" )

usage() {
echo "\
Usage: $0 [-s] [-v <TEST_PROG_VERSION>] [-u <TEST_GIT_URL>] [-p <TEST_DIR>]
[-c <MMTESTS_CONFIG_FILE>] [-t <MMTESTS_TYPE_NAME>]
[-r <MMTESTS_MAX_RETRIES>] [-i <MMTEST_ITERATIONS>]
[-c <MMTESTS_CONFIG_FILE>] [-r <MMTESTS_MAX_RETRIES>]
[-i <MMTEST_ITERATIONS>]
-v <TEST_PROG_VERSION>
If this parameter is set, then the ${TEST_PROGRAM} suite is cloned. In
Expand Down Expand Up @@ -56,15 +51,15 @@ usage() {
MMTests test type, e.g. sysbenchcpu, iozone, sqlite, etc.
-r <MMTESTS_MAX_RETRIES>
Maximum number of retries for the single benchmark's source file download
Maximum number of retries for the single benchmark's source file download
-i <MMTEST_ITERATIONS>
The number of iterations to run the benchmark for."

exit 1
}

while getopts "c:p:r:st:u:v:i:" opt; do
while getopts "c:p:r:su:v:i:" opt; do
case "${opt}" in
c)
MMTESTS_CONFIG_FILE="${OPTARG}"
Expand All @@ -80,9 +75,6 @@ while getopts "c:p:r:st:u:v:i:" opt; do
s)
SKIP_INSTALL=true
;;
t)
MMTESTS_TYPE_NAME="${OPTARG}"
;;
u)
if [[ "$OPTARG" != '' ]]; then
TEST_GIT_URL="${OPTARG}"
Expand All @@ -92,7 +84,7 @@ while getopts "c:p:r:st:u:v:i:" opt; do
TEST_PROG_VERSION="${OPTARG}"
;;
i)
MMTESTS_ITERATIONS="${OPTARG}"
MMTESTS_ITERATIONS="${OPTARG}"
;;
*)
usage
Expand Down Expand Up @@ -146,75 +138,104 @@ prepare_system() {

run_test() {
pushd "${TEST_DIR}" || exit
info_msg "Running ${MMTESTS_TYPE_NAME} test..."
info_msg "Running ${MMTESTS_CONFIG_FILE} test..."
# Run benchmark according config file and with disabled monitoring.
# Results will be stored in work/log/benchmark directory.
export MMTEST_ITERATIONS=${MMTESTS_ITERATIONS}
# Using nice to increase priority for the benchmark.
export MMTEST_ITERATIONS=${MMTESTS_ITERATIONS}# Using nice to increase priority for the benchmark.
nice -n -5 ./run-mmtests.sh -np -c "${MMTESTS_CONFIG_FILE}" benchmark
popd || exit
}

extract_json() {
# Extract results data from available logs for each benchmark in JSON format.
# JSON files will be available in mmtests root directory.
jsons=()
results_loc="work/log"
log_dirs=()
# Find all log directories
while IFS= read -r -d '' log_dir; do
if [ -d "$log_dir" ]; then
iter_dir=$(basename "$log_dir")
if [[ "$iter_dir" =~ ^iter-([0-9]+)$ ]]; then
log_dirs+=("${log_dir%/iter-*}")
fi
fi
done < <(find "${results_loc}" -type d -print0)
# Filter & sort directories
mapfile -t logd < <(printf "%s\n" "${log_dirs[@]}" | sort -u)
for log_dir in "${logd[@]}"; do
# Find testname
full_testname=$(echo "$log_dir" | cut -d '/' -f 3)
# Remove useless text
testname=${full_testname#config-}
# Find benchmark names. It's possible when in single run several benchmarks used.
benchmarks=()
while IFS= read -r benchmark; do
benchmarks+=("$benchmark")
done < <(find "${log_dir}/iter-0" -type d | grep -E 'logs$' | grep -Eo 'iter-0/.+/logs' | cut -d '/' -f 2)
# Iterate through found benchmark names to extract results
for benchmark in "${benchmarks[@]}"; do
# Build JSON file name
# MARKERwords added intentionally, this allows to parse filename
# easily in the future.
results_json=BENCHMARK${benchmark}_CONFIG${testname}.json
# Call parser
${MMTEST_EXTR} -d ${results_loc} -b "${benchmark}" -n "${full_testname}" --print-json > "${results_json}"
# Add JSON file name to array
jsons+=("${results_json}")
done
done
# It's required to return of array separated by new line
printf "%s\n" "${jsons[@]}"
}

collect_details() {
# Collect benchmark run details
MEMTOTAL_BYTES=$(free -b | grep Mem: | awk '{print $2}')
export MEMTOTAL_BYTES
NUMCPUS=$(grep -c '^processor' /proc/cpuinfo)
export NUMCPUS
NUMNODES=$(grep ^Node /proc/zoneinfo | awk '{print $2}' | sort | uniq | wc -l)
export NUMNODES
LLC_INDEX=$(find /sys/devices/system/cpu/ -type d -name "index*" | sed -e 's/.*index//' | sort -n | tail -1)
export LLC_INDEX
NUMLLCS=$(grep . /sys/devices/system/cpu/cpu*/cache/index"$LLC_INDEX"/shared_cpu_map | awk -F : '{print $NF}' | sort | uniq | wc -l)
export NUMLLCS
KERNEL_VERSION=$(uname -r)
cat <<EOF
{
"MEMTOTAL_BYTES": "${MEMTOTAL_BYTES:-}",
"NUMCPUS": "${NUMCPUS:-}",
"NUMNODES": "${NUMNODES:-}",
"LLC_INDEX": "${LLC_INDEX:-}",
"NUMLLCS": "${NUMLLCS:-}",
"KERNEL_VERSION": "${KERNEL_VERSION:-}",
"MMTESTS_ITERATIONS": "${MMTESTS_ITERATIONS:-}",
"MMTESTS_CONFIG_FILE": "${MMTESTS_CONFIG_FILE:-}"
}
EOF
}

chmod u+x ./"${MMTESTS_CONFIG_FILE}"
eval 'source ./${MMTESTS_CONFIG_FILE}'
collect_results() {
# Collect benchmark run details in JSON object.
details=$(collect_details)
# Dump details to temp file
details_file=$(mktemp)
echo "$details" > "$details_file"
# Extract results data from available logs for each benchmark in JSON format.
# JSON files will be available in mmtests root directory.

# Note: benchmark name is not always equal to benchmark name from config file.
if [ "${MMTESTS_TYPE_NAME}" != "${MMTESTS}" ]; then
EXTRACT_NAMES="${MMTESTS}"
else
EXTRACT_NAMES="${MMTESTS_TYPE_NAME}"
fi

echo "test(s) to extract: ${EXTRACT_NAMES}"
for benchmark_name in ${EXTRACT_NAMES}; do
echo "results for: $benchmark_name"
${MMTEST_EXTR} -d work/log/ -b "${benchmark_name}" -n benchmark --print-json >> "../${MMTESTS_TYPE_NAME}_${benchmark_name}.json"

altreports=${altreport_mappings[${benchmark_name}]}
for altreport in ${altreports}; do
${MMTEST_EXTR} -d work/log/ -b "${benchmark_name}" -n benchmark \
-a "${altreport}" --print-json > "../${MMTESTS_TYPE_NAME}_${benchmark_name}${altreport}.json"
done
done

env_variables_prefix=${env_variable_mappings[${MMTESTS_TYPE_NAME}]}
if [ -z "$env_variables_prefix" ]; then
env_variables_prefix=${MMTESTS_TYPE_NAME^^}
fi

vars=""
eval 'vars=${!'"$env_variables_prefix"'*}'
for variable in ${vars}; do
mykey=CONFIG_${variable}
mykey=${mykey//_/-}
myvalue=${!variable}
echo "$mykey":"$myvalue"
tmp=$(mktemp)
jq -c --arg k "$mykey" --arg v "$myvalue" '. += {($k):$v}' \
"../${MMTESTS_TYPE_NAME}_${benchmark_name}.json" > "$tmp" \
|| { echo "jq operation failed"; exit 1; }
mv "$tmp" "../${MMTESTS_TYPE_NAME}_${benchmark_name}.json"
mapfile -t jsons < <(extract_json)
for json in "${jsons[@]}"; do
# Create a temp file to hold the merged JSON
merge_file=$(mktemp)
# Merge details and results JSON
jq -n \
--argfile d "$details_file" \
--argfile r "$json" \
'{details: $d, results: $r}' > "$merge_file"
# Replace results file
mv "$merge_file" "$json"
done

chmod a+r "../$MMTESTS_TYPE_NAME"*".json"
popd || exit
}

! check_root && error_msg "Please run this script as root."

if [ "${SKIP_INSTALL}" = "true" ]; then
info_msg "${MMTESTS_TYPE_NAME} installation skipped"
info_msg "Installation skipped"
else
# Install system-wide dependencies.
install_system_deps
Expand All @@ -226,3 +247,4 @@ else
fi

run_test
collect_results

0 comments on commit 5ed0be3

Please sign in to comment.