From 4942f7c60f498a5c83fd1307cfe127e92f08b5ce Mon Sep 17 00:00:00 2001 From: Konstantin Belov Date: Thu, 12 Oct 2023 14:52:35 +0200 Subject: [PATCH] mmtests.sh: moving results processing into separate functions 'run_test' func reworked to do only ops required to launch benchmark. New func 'extract_json' extracts results in form of JSON, one per benchmark. This JSON are missing important details about test run - just results. So 'collect_details' func was added to collect meta-data about benchmark run and env. Func 'collect_results' builds final JSON file with all necessary info for further processing. This approach allows to modify functions individually and separate functionality. Moreover parsing implemented in this way allows to remove 'MMTESTS_TYPE_NAME' parameter and simplify mmtests use and test case LAVA specifications. Removed alterport support and DBENCH-specifics. Signed-off-by: Konstantin Belov --- automated/linux/mmtests/mmtests.sh | 152 +++++++++++++++++------------ 1 file changed, 87 insertions(+), 65 deletions(-) diff --git a/automated/linux/mmtests/mmtests.sh b/automated/linux/mmtests/mmtests.sh index ff68ce4e5..209ebcc77 100755 --- a/automated/linux/mmtests/mmtests.sh +++ b/automated/linux/mmtests/mmtests.sh @@ -12,20 +12,15 @@ TEST_GIT_URL=https://github.com/gormanm/mmtests TEST_DIR=${TEST_DIR:-"$(pwd)/${TEST_PROGRAM}"} SKIP_INSTALL=${SKIP_INSTALL:-"false"} MMTESTS_MAX_RETRIES=${MMTESTS_MAX_RETRIES:-"3"} -MMTESTS_TYPE_NAME= MMTESTS_CONFIG_FILE= MMTEST_ITERATIONS=${MMTEST_ITERATIONS:-"10"} MMTEST_EXTR="./bin/extract-mmtests.pl" -# DBENCH specific variables -declare -A altreport_mappings=( ["dbench4"]="tput latency opslatency") -declare -A env_variable_mappings=( ["dbench4"]="DBENCH" ) - usage() { echo "\ Usage: $0 [-s] [-v ] [-u ] [-p ] - [-c ] [-t ] - [-r ] [-i ] + [-c ] [-r ] + [-i ] -v If this parameter is set, then the ${TEST_PROGRAM} suite is cloned. In @@ -56,7 +51,7 @@ usage() { MMTests test type, e.g. sysbenchcpu, iozone, sqlite, etc. -r - Maximum number of retries for the single benchmark's source file download + Maximum number of retries for the single benchmark's source file download -i The number of iterations to run the benchmark for." @@ -64,7 +59,7 @@ usage() { exit 1 } -while getopts "c:p:r:st:u:v:i:" opt; do +while getopts "c:p:r:su:v:i:" opt; do case "${opt}" in c) MMTESTS_CONFIG_FILE="${OPTARG}" @@ -80,9 +75,6 @@ while getopts "c:p:r:st:u:v:i:" opt; do s) SKIP_INSTALL=true ;; - t) - MMTESTS_TYPE_NAME="${OPTARG}" - ;; u) if [[ "$OPTARG" != '' ]]; then TEST_GIT_URL="${OPTARG}" @@ -92,7 +84,7 @@ while getopts "c:p:r:st:u:v:i:" opt; do TEST_PROG_VERSION="${OPTARG}" ;; i) - MMTESTS_ITERATIONS="${OPTARG}" + MMTESTS_ITERATIONS="${OPTARG}" ;; *) usage @@ -146,75 +138,104 @@ prepare_system() { run_test() { pushd "${TEST_DIR}" || exit - info_msg "Running ${MMTESTS_TYPE_NAME} test..." + info_msg "Running ${MMTESTS_CONFIG_FILE} test..." # Run benchmark according config file and with disabled monitoring. # Results will be stored in work/log/benchmark directory. - export MMTEST_ITERATIONS=${MMTESTS_ITERATIONS} - # Using nice to increase priority for the benchmark. + export MMTEST_ITERATIONS=${MMTESTS_ITERATIONS}# Using nice to increase priority for the benchmark. nice -n -5 ./run-mmtests.sh -np -c "${MMTESTS_CONFIG_FILE}" benchmark + popd || exit +} + +extract_json() { + # Extract results data from available logs for each benchmark in JSON format. + # JSON files will be available in mmtests root directory. + jsons=() + results_loc="work/log" + log_dirs=() + # Find all log directories + while IFS= read -r -d '' log_dir; do + if [ -d "$log_dir" ]; then + iter_dir=$(basename "$log_dir") + if [[ "$iter_dir" =~ ^iter-([0-9]+)$ ]]; then + log_dirs+=("${log_dir%/iter-*}") + fi + fi + done < <(find "${results_loc}" -type d -print0) + # Filter & sort directories + mapfile -t logd < <(printf "%s\n" "${log_dirs[@]}" | sort -u) + for log_dir in "${logd[@]}"; do + # Find testname + full_testname=$(echo "$log_dir" | cut -d '/' -f 3) + # Remove useless text + testname=${full_testname#config-} + # Find benchmark names. It's possible when in single run several benchmarks used. + benchmarks=() + while IFS= read -r benchmark; do + benchmarks+=("$benchmark") + done < <(find "${log_dir}/iter-0" -type d | grep -E 'logs$' | grep -Eo 'iter-0/.+/logs' | cut -d '/' -f 2) + # Iterate through found benchmark names to extract results + for benchmark in "${benchmarks[@]}"; do + # Build JSON file name + # MARKERwords added intentionally, this allows to parse filename + # easily in the future. + results_json=BENCHMARK${benchmark}_CONFIG${testname}.json + # Call parser + ${MMTEST_EXTR} -d ${results_loc} -b "${benchmark}" -n "${full_testname}" --print-json > "${results_json}" + # Add JSON file name to array + jsons+=("${results_json}") + done + done + # It's required to return of array separated by new line + printf "%s\n" "${jsons[@]}" +} +collect_details() { + # Collect benchmark run details MEMTOTAL_BYTES=$(free -b | grep Mem: | awk '{print $2}') - export MEMTOTAL_BYTES NUMCPUS=$(grep -c '^processor' /proc/cpuinfo) - export NUMCPUS NUMNODES=$(grep ^Node /proc/zoneinfo | awk '{print $2}' | sort | uniq | wc -l) - export NUMNODES LLC_INDEX=$(find /sys/devices/system/cpu/ -type d -name "index*" | sed -e 's/.*index//' | sort -n | tail -1) - export LLC_INDEX NUMLLCS=$(grep . /sys/devices/system/cpu/cpu*/cache/index"$LLC_INDEX"/shared_cpu_map | awk -F : '{print $NF}' | sort | uniq | wc -l) - export NUMLLCS + KERNEL_VERSION=$(uname -r) + cat < "$details_file" # Extract results data from available logs for each benchmark in JSON format. - # JSON files will be available in mmtests root directory. - - # Note: benchmark name is not always equal to benchmark name from config file. - if [ "${MMTESTS_TYPE_NAME}" != "${MMTESTS}" ]; then - EXTRACT_NAMES="${MMTESTS}" - else - EXTRACT_NAMES="${MMTESTS_TYPE_NAME}" - fi - - echo "test(s) to extract: ${EXTRACT_NAMES}" - for benchmark_name in ${EXTRACT_NAMES}; do - echo "results for: $benchmark_name" - ${MMTEST_EXTR} -d work/log/ -b "${benchmark_name}" -n benchmark --print-json >> "../${MMTESTS_TYPE_NAME}_${benchmark_name}.json" - - altreports=${altreport_mappings[${benchmark_name}]} - for altreport in ${altreports}; do - ${MMTEST_EXTR} -d work/log/ -b "${benchmark_name}" -n benchmark \ - -a "${altreport}" --print-json > "../${MMTESTS_TYPE_NAME}_${benchmark_name}${altreport}.json" - done - done - - env_variables_prefix=${env_variable_mappings[${MMTESTS_TYPE_NAME}]} - if [ -z "$env_variables_prefix" ]; then - env_variables_prefix=${MMTESTS_TYPE_NAME^^} - fi - - vars="" - eval 'vars=${!'"$env_variables_prefix"'*}' - for variable in ${vars}; do - mykey=CONFIG_${variable} - mykey=${mykey//_/-} - myvalue=${!variable} - echo "$mykey":"$myvalue" - tmp=$(mktemp) - jq -c --arg k "$mykey" --arg v "$myvalue" '. += {($k):$v}' \ - "../${MMTESTS_TYPE_NAME}_${benchmark_name}.json" > "$tmp" \ - || { echo "jq operation failed"; exit 1; } - mv "$tmp" "../${MMTESTS_TYPE_NAME}_${benchmark_name}.json" + mapfile -t jsons < <(extract_json) + for json in "${jsons[@]}"; do + # Create a temp file to hold the merged JSON + merge_file=$(mktemp) + # Merge details and results JSON + jq -n \ + --argfile d "$details_file" \ + --argfile r "$json" \ + '{details: $d, results: $r}' > "$merge_file" + # Replace results file + mv "$merge_file" "$json" done - - chmod a+r "../$MMTESTS_TYPE_NAME"*".json" - popd || exit } ! check_root && error_msg "Please run this script as root." if [ "${SKIP_INSTALL}" = "true" ]; then - info_msg "${MMTESTS_TYPE_NAME} installation skipped" + info_msg "Installation skipped" else # Install system-wide dependencies. install_system_deps @@ -226,3 +247,4 @@ else fi run_test +collect_results