From 1a37a189ce0c6b1f075cd1e6769014ce2c7bb228 Mon Sep 17 00:00:00 2001 From: Nikita Menkovich Date: Thu, 25 Jan 2024 16:42:50 +0100 Subject: [PATCH] merge actions changes to 23-3 (#246) --- .github/actions/build/action.yaml | 20 ++- .github/actions/prepare/action.yaml | 2 +- .github/actions/s3cmd/action.yaml | 35 ++++- .github/actions/test/action.yaml | 114 ++++++++++++--- .github/actions/test_cmake/action.yaml | 70 +++++++++- .github/config/muted_ya.txt | 2 - .github/packer/github-runner.pkr.hcl | 4 +- .github/scripts/index.py | 132 ++++++++++++++++++ .github/scripts/templates/index.html | 74 ++++++++++ .github/scripts/tests/attach-logs.py | 35 ++++- .github/scripts/tests/ctest-postprocess.py | 4 +- .github/scripts/tests/ctest_utils.py | 4 +- .github/scripts/tests/fail-checker.py | 26 +++- .github/scripts/tests/generate-summary.py | 113 ++++++++++++--- .github/scripts/tests/log_parser.py | 4 +- .github/scripts/tests/mute_utils.py | 4 +- .github/scripts/tests/split-junit.py | 45 ++++++ .github/scripts/tests/templates/summary.html | 6 +- .github/scripts/tests/transform-ya-junit.py | 84 ++++++++--- .github/workflows/README.md | 2 +- .github/workflows/build_and_test_act.yaml | 39 ++++-- .../workflows/build_and_test_act_cmake.yaml | 9 ++ .github/workflows/build_and_test_cmake.yaml | 28 +++- .../workflows/build_and_test_on_demand.yaml | 85 +++++++---- .../build_and_test_on_demand_cmake.yaml | 44 +++++- .github/workflows/build_and_test_ya.yaml | 40 +++++- .github/workflows/nightly-index-rebuild.yaml | 41 ++++++ .github/workflows/nightly.yaml | 3 +- .github/workflows/pr.yaml | 3 +- 29 files changed, 920 insertions(+), 152 deletions(-) create mode 100644 .github/scripts/index.py create mode 100644 .github/scripts/templates/index.html create mode 100755 .github/scripts/tests/split-junit.py create mode 100644 .github/workflows/nightly-index-rebuild.yaml diff --git a/.github/actions/build/action.yaml b/.github/actions/build/action.yaml index 181c101356..c9e5e5a7be 100644 --- a/.github/actions/build/action.yaml +++ b/.github/actions/build/action.yaml @@ -8,7 +8,7 @@ inputs: required: true default: "relwithdebinfo" description: "relwithdebinfo, release-asan, release-tsan" - build_cache_update: + cache_update: required: true description: "bazel-remote upload results" default: "false" @@ -34,9 +34,9 @@ runs: shell: bash run: | echo "SHELLOPTS=xtrace" >> $GITHUB_ENV - export TMP_DIR=$(pwd)/tmp_build + export TMP_DIR=/home/github/tmp_build echo "TMP_DIR=$TMP_DIR" >> $GITHUB_ENV - rm -rf $TMP_DIR && mkdir $TMP_DIR + rm -rf $TMP_DIR && mkdir $TMP_DIR && chown -R github:github $TMP_DIR $GITHUB_WORKSPACE - name: build shell: bash @@ -61,7 +61,7 @@ runs: extra_params+=(--add-result .o) fi - if [ "${{ inputs.build_cache_update }}" == "true" ]; then + if [ "${{ inputs.cache_update }}" == "true" ]; then extra_params+=(--bazel-remote-put --dist-cache-evict-bins) fi @@ -88,7 +88,7 @@ runs: ;; esac - ./ya make -k --build "${build_type}" --force-build-depends -D'BUILD_LANGUAGES=CPP PY3 PY2 GO' -T --stat \ + sudo -E -H -u github ./ya make -k --build "${build_type}" --force-build-depends -D'BUILD_LANGUAGES=CPP PY3 PY2 GO' -T --stat \ --log-file "$TMP_DIR/ya_log.txt" --evlog-file "$TMP_DIR/ya_evlog.jsonl" \ --dump-graph --dump-graph-to-file "$TMP_DIR/ya_graph.json" \ --cache-size 512G --link-threads "${{ inputs.link_threads }}" \ @@ -99,7 +99,15 @@ runs: shell: bash run: | echo "::group::s3-sync" - s3cmd sync --acl-private --no-progress --stats --no-check-md5 "$TMP_DIR/" "$S3_BUCKET_PATH/build_logs/" + sudo -E -H -u github s3cmd sync --acl-private --no-progress --stats --no-check-md5 "$TMP_DIR/" "$S3_BUCKET_PATH/build_logs/" + echo "::endgroup::" + + - name: Create directory listing on s3 + if: always() + shell: bash + run: | + echo "::group::generate-listing" + sudo -E -H -u github python3 .github/scripts/index.py "$S3_BUCKET_PATH" echo "::endgroup::" - name: show free space diff --git a/.github/actions/prepare/action.yaml b/.github/actions/prepare/action.yaml index a20757a4c9..47eee3307d 100644 --- a/.github/actions/prepare/action.yaml +++ b/.github/actions/prepare/action.yaml @@ -16,7 +16,7 @@ runs: sudo apt-get update sudo apt-get install -y --no-install-recommends git wget gnupg lsb-release curl xz-utils tzdata cmake \ python3-dev python3-pip ninja-build antlr3 m4 libidn11-dev libaio1 libaio-dev make clang-14 lld-14 llvm-14 file \ - distcc strace qemu-kvm dpkg-dev + distcc strace qemu-kvm qemu-utils dpkg-dev atop sudo pip3 install conan==1.59 pytest==7.1.3 pytest-timeout pytest-xdist==3.3.1 setproctitle==1.3.2 grpcio grpcio-tools PyHamcrest tornado xmltodict pyarrow boto3 moto[server] psutil pygithub==1.59.1 pyinstaller==5.13.2 cryptography packaging six pyyaml - name: install ccache shell: bash diff --git a/.github/actions/s3cmd/action.yaml b/.github/actions/s3cmd/action.yaml index 089422a6fc..dda327c6d9 100644 --- a/.github/actions/s3cmd/action.yaml +++ b/.github/actions/s3cmd/action.yaml @@ -29,11 +29,12 @@ runs: shell: bash run: | sudo apt-get update - sudo apt-get install -y --no-install-recommends s3cmd + sudo apt-get install -y --no-install-recommends s3cmd - name: configure s3cmd shell: bash run: | - export S3CMD_CONFIG=$(mktemp) + export S3CMD_CONFIG=$(mktemp -p /home/github) + chown github:github $S3CMD_CONFIG echo "S3CMD_CONFIG=$S3CMD_CONFIG" >> $GITHUB_ENV export GITHUB_WORKFLOW_NO_SPACES=${GITHUB_WORKFLOW// /-} cat < $S3CMD_CONFIG @@ -45,6 +46,30 @@ runs: host_bucket = %(bucket)s.storage.ai.nebius.cloud EOF + mkdir -p /home/github/.aws/ + cat < /home/github/.aws/credentials + [default] + aws_access_key_id = ${s3_key_id} + aws_secret_access_key = ${s3_secret_access_key} + EOF + cat < /home/github/.aws/config + [default] + region = eu-north1 + endpoint_url=https://storage.ai.nebius.cloud/ + EOF + + mkdir -p /root/.aws/ + cat < /root/.aws/credentials + [default] + aws_access_key_id = ${s3_key_id} + aws_secret_access_key = ${s3_secret_access_key} + EOF + cat < /root/.aws/config + [default] + region = eu-north1 + endpoint_url=https://storage.ai.nebius.cloud/ + EOF + folder="${{ runner.arch == 'X64' && 'x86-64' || runner.arch == 'ARM64' && 'arm64' || 'unknown' }}" case "${{ inputs.build_preset }}" in @@ -65,9 +90,9 @@ runs: ;; esac - echo "S3_BUCKET_PATH=s3://${{ inputs.s3_bucket }}/${{ github.repository }}/${GITHUB_WORKFLOW_NO_SPACES}/${{ github.run_id }}/${{ inputs.folder_prefix }}${folder}" >> $GITHUB_ENV - echo "S3_URL_PREFIX=${{ inputs.s3_endpoint }}/${{ inputs.s3_bucket }}/${{ github.repository }}/${GITHUB_WORKFLOW_NO_SPACES}/${{ github.run_id }}/${{ inputs.folder_prefix }}${folder}" >> $GITHUB_ENV - echo "S3_WEBSITE_PREFIX=https://${{ inputs.s3_bucket }}.${{ inputs.s3_website_suffix }}/${{ github.repository }}/${GITHUB_WORKFLOW_NO_SPACES}/${{ github.run_id }}/${{ inputs.folder_prefix }}${folder}" >> $GITHUB_ENV + echo "S3_BUCKET_PATH=s3://${{ inputs.s3_bucket }}/${{ github.repository }}/${GITHUB_WORKFLOW_NO_SPACES}/${{ github.run_id }}/${{ github.run_attempt || '1' }}/${{ inputs.folder_prefix }}${folder}" >> $GITHUB_ENV + echo "S3_URL_PREFIX=${{ inputs.s3_endpoint }}/${{ inputs.s3_bucket }}/${{ github.repository }}/${GITHUB_WORKFLOW_NO_SPACES}/${{ github.run_id }}/${{ github.run_attempt || '1' }}/${{ inputs.folder_prefix }}${folder}" >> $GITHUB_ENV + echo "S3_WEBSITE_PREFIX=https://${{ inputs.s3_bucket }}.${{ inputs.s3_website_suffix }}/${{ github.repository }}/${GITHUB_WORKFLOW_NO_SPACES}/${{ github.run_id }}/${{ github.run_attempt || '1' }}/${{ inputs.folder_prefix }}${folder}" >> $GITHUB_ENV env: s3_key_id: ${{ inputs.s3_key_id }} s3_secret_access_key: ${{ inputs.s3_key_secret }} diff --git a/.github/actions/test/action.yaml b/.github/actions/test/action.yaml index 6ba8eed99e..3a4d07eff8 100644 --- a/.github/actions/test/action.yaml +++ b/.github/actions/test/action.yaml @@ -27,9 +27,13 @@ inputs: bazel_remote_uri: required: false description: "bazel-remote endpoint" - test_cache_update: + cache_update: required: false description: "Use cache for tests" + sync_to_s3: + required: false + default: 'false' + description: 'Sync failed tests folders to s3' runs: using: composite steps: @@ -38,23 +42,29 @@ runs: shell: bash run: | echo "SHELLOPTS=xtrace" >> $GITHUB_ENV - export TMP_DIR=$(pwd)/tmp + export TMP_DIR=/home/github/tmp echo "TMP_DIR=$TMP_DIR" >> $GITHUB_ENV echo "LOG_DIR=$TMP_DIR/logs" >> $GITHUB_ENV echo "OUT_DIR=$TMP_DIR/out" >> $GITHUB_ENV echo "ARTIFACTS_DIR=$TMP_DIR/artifacts" >> $GITHUB_ENV + echo "TESTS_DATA_DIR=$TMP_DIR/test_data" >> $GITHUB_ENV + echo "REPORTS_ARTIFACTS_DIR=$TMP_DIR/artifacts/test_reports" >> $GITHUB_ENV echo "JUNIT_REPORT_XML=$TMP_DIR/junit.xml" >> $GITHUB_ENV - echo "SUMMARY_LINKS=$(mktemp)" >> $GITHUB_ENV + echo "JUNIT_REPORT_PARTS=$TMP_DIR/junit-split" >> $GITHUB_ENV + echo "SUMMARY_LINKS=$(mktemp -p /home/github)" >> $GITHUB_ENV - name: prepare shell: bash run: | - rm -rf $TMP_DIR $JUNIT_REPORT_XML - mkdir -p $TMP_DIR $OUT_DIR $ARTIFACTS_DIR $LOG_DIR - + rm -rf $TMP_DIR $JUNIT_REPORT_XML $JUNIT_REPORT_PARTS $REPORTS_ARTIFACTS_DIR $TESTS_DATA_DIR + mkdir -p $TMP_DIR $OUT_DIR $ARTIFACTS_DIR $LOG_DIR $JUNIT_REPORT_PARTS $REPORTS_ARTIFACTS_DIR $TESTS_DATA_DIR + chown -R github:github $TMP_DIR $OUT_DIR $ARTIFACTS_DIR $LOG_DIR $JUNIT_REPORT_PARTS \ + $REPORTS_ARTIFACTS_DIR $SUMMARY_LINKS $GITHUB_WORKSPACE \ + $GITHUB_STEP_SUMMARY $TESTS_DATA_DIR - name: ya test shell: bash run: | + set -x extra_params=() # FIXME: copy-paste from build_ya @@ -94,14 +104,15 @@ runs: extra_params+=(--bazel-remote-base-uri "${{ inputs.bazel_remote_uri }}") fi - if [ "${{ inputs.test_cache_update }}" = "true" ]; then + if [ "${{ inputs.cache_update }}" = "true" ]; then extra_params+=(--cache-tests) fi readarray -d ',' -t test_size < <(printf "%s" "${{ inputs.test_size }}") readarray -d ',' -t test_type < <(printf "%s" "${{ inputs.test_type }}") - ./ya test -k --build "${build_type}" -D'BUILD_LANGUAGES=CPP PY3 PY2 GO' \ + echo "::group::ya-make-test" + sudo -E -H -u github ./ya test -k --build "${build_type}" -D'BUILD_LANGUAGES=CPP PY3 PY2 GO' \ ${test_size[@]/#/--test-size=} ${test_type[@]/#/--test-type=} \ --test-threads "${{ inputs.test_threads }}" --link-threads "${{ inputs.link_threads }}" \ --cache-size 512G --do-not-output-stderrs -T \ @@ -109,46 +120,100 @@ runs: --canonization-backend=ydb-canondata.storage.yandexcloud.net \ --junit "$JUNIT_REPORT_XML" --output "$OUT_DIR" "${extra_params[@]}" || ( RC=$? - if [[ $RC -ge 10 && $RC -le 14 ]]; then - echo "ya test returned failed tests status, recovering.." - else - exit $RC + if [ $RC -ne 0 ]; then + echo "ya test returned $RC, check existence $JUNIT_REPORT_XML" + if [ -s "$JUNIT_REPORT_XML" ]; then + echo "$JUNIT_REPORT_XML exists" + ls -la "$JUNIT_REPORT_XML" + else + echo "$JUNIT_REPORT_XML doesn't exist or has zero size" + ls -la "$JUNIT_REPORT_XML" || true + exit $RC + fi fi ) + echo "::endgroup::" + + - name: archive unitest reports (orig) + shell: bash + run: | + sudo -E -H -u github gzip -c $JUNIT_REPORT_XML > $REPORTS_ARTIFACTS_DIR/orig_junit.xml.gz - name: postprocess junit report shell: bash run: | - .github/scripts/tests/transform-ya-junit.py -i \ + sudo -E -H -u github .github/scripts/tests/transform-ya-junit.py -i \ -m .github/config/muted_ya.txt \ --ya-out "$OUT_DIR" \ --log-url-prefix "$S3_WEBSITE_PREFIX/logs/" \ --log-out-dir "$ARTIFACTS_DIR/logs/" \ "$JUNIT_REPORT_XML" + sudo -E -H -u github .github/scripts/tests/split-junit.py -o "$JUNIT_REPORT_PARTS" "$JUNIT_REPORT_XML" + + - name: archive unitest reports (transformed) + shell: bash + run: | + sudo -E -H -u github tar -C $JUNIT_REPORT_PARTS/.. -czf $REPORTS_ARTIFACTS_DIR/junit_parts.xml.tar.gz $(basename $JUNIT_REPORT_PARTS) $JUNIT_REPORT_XML + - name: write tests summary shell: bash env: GITHUB_TOKEN: ${{ github.token }} run: | - mkdir $ARTIFACTS_DIR/summary/ + sudo -E -H -u github mkdir $ARTIFACTS_DIR/summary/ cat $SUMMARY_LINKS | python3 -c 'import sys; print(" | ".join([v for _, v in sorted([l.strip().split(" ", 1) for l in sys.stdin], key=lambda a: (int(a[0]), a))]))' >> $GITHUB_STEP_SUMMARY platform_name=$(uname | tr '[:upper:]' '[:lower:]')-$(arch) - .github/scripts/tests/generate-summary.py \ + export SUMMARY_OUT_ENV_PATH=$(mktemp -p /home/github) + chown github:github $SUMMARY_OUT_ENV_PATH + sudo -E -H -u github .github/scripts/tests/generate-summary.py \ --summary-out-path "$ARTIFACTS_DIR/summary/" \ + --summary-out-env-path "$SUMMARY_OUT_ENV_PATH" \ --summary-url-prefix "$S3_WEBSITE_PREFIX/summary/" \ --build-preset "${platform_name}-${{ inputs.build_preset }}" \ "Tests" ya-test.html "$JUNIT_REPORT_XML" + cat $SUMMARY_OUT_ENV_PATH | tee -a $GITHUB_STEP_SUMMARY + + - name: check test results + shell: bash + run: | + set -x + sudo -E -H -u github .github/scripts/tests/fail-checker.py "$JUNIT_REPORT_XML" || { + RC=$? + + echo "::group::Copy-failed-tests-data" + sudo -E -H -u github .github/scripts/tests/fail-checker.py "$JUNIT_REPORT_XML" --paths-only + sudo -E -H -u github .github/scripts/tests/fail-checker.py "$JUNIT_REPORT_XML" --paths-only | while read path; do + echo $path + find "${GITHUB_WORKSPACE}/${path}" -print0 | xargs -0 xargs -0 cp -L -r --parents -t "$TESTS_DATA_DIR" + done + chown -R github:github "$TESTS_DATA_DIR" + echo "::endgroup::" + echo "::group::remove-binaries-from-tests-data-dir" + find "$TESTS_DATA_DIR" -type f -print0 | xargs -0 -n 10 file -i | grep "application/x-executable" | awk -F: '{print $1}' + find "$TESTS_DATA_DIR" -type f -print0 | xargs -0 -n 10 file -i | grep "application/x-executable" | awk -F: '{print $1}' | xargs rm + echo "::endgroup::" + echo "::group::s3-sync" + if [ "$SYNC_TO_S3" = "true" ]; + then + sudo -E -H -u github s3cmd sync --follow-symlinks --acl-private --no-progress --stats --no-check-md5 "$TESTS_DATA_DIR/" "$S3_BUCKET_PATH/test_data/" + fi + echo "::endgroup::" + exit $RC + } + env: + SYNC_TO_S3: ${{ inputs.sync_to_s3 || 'false' }} + - name: Sync test results to S3 if: always() shell: bash run: | echo "::group::s3-sync" - s3cmd sync --follow-symlinks --acl-public --no-progress --stats --no-check-md5 "$ARTIFACTS_DIR/" "$S3_BUCKET_PATH/" + sudo -E -H -u github s3cmd sync --follow-symlinks --acl-public --no-progress --stats --no-check-md5 "$ARTIFACTS_DIR/" "$S3_BUCKET_PATH/" echo "::endgroup::" - name: Sync logs results to S3 @@ -156,7 +221,14 @@ runs: shell: bash run: | echo "::group::s3-sync" - s3cmd sync --follow-symlinks --acl-private --no-progress --stats --no-check-md5 "$LOG_DIR/" "$S3_BUCKET_PATH/test_logs/" + sudo -E -H -u github s3cmd sync --follow-symlinks --acl-private --no-progress --stats --no-check-md5 "$LOG_DIR/" "$S3_BUCKET_PATH/test_logs/" + echo "::endgroup::" + - name: Sync reports to S3 + if: always() + shell: bash + run: | + echo "::group::s3-sync" + sudo -E -H -u github s3cmd sync --follow-symlinks --acl-private --no-progress --stats --no-check-md5 "$LOG_DIR/" "$S3_BUCKET_PATH/test_logs/" echo "::endgroup::" - name: Display links to s3 summary @@ -167,10 +239,14 @@ runs: echo ${S3_URL_PREFIX}/summary/ya-test.html echo ${S3_WEBSITE_PREFIX}/summary/ya-test.html echo "::endgroup::" - - name: check test results + + - name: Create directory listing on s3 + if: always() shell: bash run: | - .github/scripts/tests/fail-checker.py "$JUNIT_REPORT_XML" + echo "::group::generate-listing" + sudo -E -H -u github python3 .github/scripts/index.py "$S3_BUCKET_PATH" + echo "::endgroup::" - name: show free space if: always() diff --git a/.github/actions/test_cmake/action.yaml b/.github/actions/test_cmake/action.yaml index 616d263a3c..34c3cb3845 100644 --- a/.github/actions/test_cmake/action.yaml +++ b/.github/actions/test_cmake/action.yaml @@ -3,21 +3,83 @@ description: Run tests using cmake infrastructure runs: using: composite steps: + - name: prepare + shell: bash + run: | + echo "SHELLOPTS=$SHELLOPTS:xtrace" >> $GITHUB_ENV + echo "ARTIFACTS_DIR=${TMP_DIR}/artifacts" >> $GITHUB_ENV + echo "SUMMARY_DIR=${ARTIFACTS_DIR}/summary" >> $GITHUB_ENV + echo "SUMMARY_LINKS=$(mktemp)" >> $GITHUB_ENV + + - name: create dirs + shell: bash + run: | + mkdir -p "$ARTIFACTS_DIR" "$SUMMARY_DIR" - name: ctest shell: bash run: | cd $TMP_DIR - ctest --rerun-failed --output-on-failure | tee -a "$TMP_DIR/ctest.log" + + ctest -j 60 --output-junit $ARTIFACTS_DIR/junit.xml -O "$TMP_DIR/ctest.log" || ( + ctest -j 60 --rerun-failed --output-on-failure --output-junit $ARTIFACTS_DIR/junit_rerun.xml -O "$TMP_DIR/ctest_rerun.log" + ) + + - name: Generate summary + if: always() + shell: bash + run: | + if [ -f "${ARTIFACTS_DIR}/junit.xml" ]; then + if [ -f "${ARTIFACTS_DIR}/junit_rerun.xml" ]; then + .github/scripts/tests/generate-summary.py \ + --summary-out-path "${SUMMARY_DIR}" \ + --summary-url-prefix "${S3_URL_PREFIX}/summary/" \ + "CTest" ctest.html "${ARTIFACTS_DIR}/junit.xml" \ + "CTest Rerun" ctest_rerun.html "${ARTIFACTS_DIR}/junit_rerun.xml" + else + .github/scripts/tests/generate-summary.py \ + --summary-out-path "${SUMMARY_DIR}" \ + --summary-url-prefix "${S3_URL_PREFIX}/summary/" \ + "CTest" ctest.html "${ARTIFACTS_DIR}/junit.xml" + fi + fi - name: Sync logs to S3 if: always() shell: bash run: | echo "::group::s3-sync" - [ -f "$TMP_DIR/ctest.log" ] && { - s3cmd sync --acl-private --no-progress --stats --no-check-md5 "$TMP_DIR/ctest.log" "$S3_BUCKET_PATH/build_logs/" - echo ${S3_WEBSITE_PREFIX}/build_logs/ctest.log + [ -f "${TMP_DIR}/ctest.log" ] && { + s3cmd sync --acl-private --no-progress --stats --no-check-md5 "${TMP_DIR}/ctest.log" "$S3_BUCKET_PATH/build_logs/" + echo "[ctest.log](${S3_WEBSITE_PREFIX}/build_logs/ctest.log)" | tee -a $GITHUB_STEP_SUMMARY } + [ -f "${TMP_DIR}/ctest_rerun.log" ] && { + s3cmd sync --acl-private --no-progress --stats --no-check-md5 "${TMP_DIR}/ctest_rerun.log" "$S3_BUCKET_PATH/build_logs/" + echo "[ctest_rerun.log](${S3_WEBSITE_PREFIX}/build_logs/ctest_rerun.log)" | tee -a $GITHUB_STEP_SUMMARY + } + [ -f "${ARTIFACTS_DIR}/junit.xml" ] && { + s3cmd sync --acl-private --no-progress --stats --no-check-md5 "$ARTIFACTS_DIR/junit.xml" "$S3_BUCKET_PATH/artifacts/" + echo "[junit.xml](${S3_WEBSITE_PREFIX}/artifacts/junit.xml)" | tee -a $GITHUB_STEP_SUMMARY + } + [ -f "${ARTIFACTS_DIR}/junit_rerun.xml" ] && { + s3cmd sync --acl-private --no-progress --stats --no-check-md5 "$ARTIFACTS_DIR/junit_rerun.xml" "$S3_BUCKET_PATH/artifacts/" + echo "[junit_rerun.xml](${S3_WEBSITE_PREFIX}/artifacts/junit_rerun.xml)" | tee -a $GITHUB_STEP_SUMMARY + } + [ -f "${SUMMARY_DIR}/ctest.html" ] && { + s3cmd sync --acl-private --no-progress --stats --no-check-md5 "${SUMMARY_DIR}/ctest.html" "$S3_BUCKET_PATH/summary/" + echo "[ctest.html](${S3_WEBSITE_PREFIX}/summary/ctest.html)" | tee -a $GITHUB_STEP_SUMMARY + } + [ -f "${SUMMARY_DIR}/ctest_rerun.html" ] && { + s3cmd sync --acl-private --no-progress --stats --no-check-md5 "${SUMMARY_DIR}/ctest_rerun.html" "$S3_BUCKET_PATH/summary/" + echo "[ctest_rerun.html](${S3_WEBSITE_PREFIX}/summary/ctest_rerun.html)" | tee -a $GITHUB_STEP_SUMMARY + } + echo "::endgroup::" + + - name: Create directory listing on s3 + if: always() + shell: bash + run: | + echo "::group::generate-listing" + python3 .github/scripts/index.py "$S3_BUCKET_PATH" echo "::endgroup::" - name: show free space diff --git a/.github/config/muted_ya.txt b/.github/config/muted_ya.txt index e024214082..fea5b977ad 100644 --- a/.github/config/muted_ya.txt +++ b/.github/config/muted_ya.txt @@ -1,5 +1,3 @@ -cloud/blockstore/tools/fs/cleanup-ext4-meta * -cloud/blockstore/libs/notify * cloud/blockstore/tests/fio/qemu-vhost-local-test * cloud/blockstore/tests/fio/qemu-vhost-null-test * cloud/blockstore/tests/rdma/rdma-test * diff --git a/.github/packer/github-runner.pkr.hcl b/.github/packer/github-runner.pkr.hcl index fe6abe0d0b..f0b23d67f8 100644 --- a/.github/packer/github-runner.pkr.hcl +++ b/.github/packer/github-runner.pkr.hcl @@ -60,12 +60,12 @@ build { "echo \"deb https://apt.llvm.org/${var.LSB_RELEASE}/ llvm-toolchain-${var.LSB_RELEASE}-14 main\" | sudo tee /etc/apt/sources.list.d/llvm.list >/dev/null", "sudo apt-get update", "echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections", - "sudo apt-get install -y --no-install-recommends git wget gnupg lsb-release curl xz-utils tzdata cmake python3-dev python3-pip ninja-build antlr3 m4 libidn11-dev libaio1 libaio-dev make clang-14 lld-14 llvm-14 file distcc s3cmd qemu-kvm", + "sudo apt-get install -y --no-install-recommends git wget gnupg lsb-release curl xz-utils tzdata cmake python3-dev python3-pip ninja-build antlr3 m4 libidn11-dev libaio1 libaio-dev make clang-14 lld-14 llvm-14 file distcc s3cmd qemu-kvm dpkg-dev", "sudo pip3 install conan==1.59 pytest==7.1.3 pyinstaller==5.13.2 pytest-timeout pytest-xdist==3.3.1 setproctitle==1.3.2 six pyyaml packaging cryptography grpcio grpcio-tools PyHamcrest tornado xmltodict pyarrow boto3 moto[server] psutil pygithub==1.59.1", "curl -L https://github.com/ccache/ccache/releases/download/v${var.CCACHE_VERSION}/ccache-${var.CCACHE_VERSION}-linux-${var.OS_ARCH}.tar.xz | sudo tar -xJ -C /usr/local/bin/ --strip-components=1 --no-same-owner ccache-${var.CCACHE_VERSION}-linux-${var.OS_ARCH}/ccache", # Other packages - "sudo apt-get install -y git jq tree tmux", + "sudo apt-get install -y git jq tree tmux atop", # Clean "rm -rf .sudo_as_admin_successful", diff --git a/.github/scripts/index.py b/.github/scripts/index.py new file mode 100644 index 0000000000..51d588449b --- /dev/null +++ b/.github/scripts/index.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python3 +import boto3 +import sys +import os +import logging +from urllib.parse import unquote_plus, urlparse +from concurrent.futures import ThreadPoolExecutor +from jinja2 import Environment, FileSystemLoader +from datetime import datetime + +# Configure logging +logging.basicConfig( + level=logging.INFO, format="%(asctime)s: %(levelname)s: %(message)s" +) + + +def list_files(client, bucket, prefix): + paginator = client.get_paginator("list_objects_v2") + for page in paginator.paginate(Bucket=bucket, Prefix=prefix, Delimiter="/"): + dirs = page.get("CommonPrefixes", []) + files = [ + content + for content in page.get("Contents", []) + if not content["Key"].endswith("/") + ] + return dirs, files + + +def generate_absolute_url(bucket, key): + return f"https://{bucket}.website.nemax.nebius.cloud/{key}" + + +def generate_index_html(bucket, files, dirs, current_prefix): + # Setup Jinja environment + env = Environment( + loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")) + ) + template = env.get_template("index.html") + + # Prepare data + entries = [] + if current_prefix != "": + parent_dir = os.path.dirname(current_prefix.rstrip("/")) + if parent_dir != "": + parent_dir += "/" + entries.append( + { + "name": "../", + "url": generate_absolute_url(bucket, parent_dir), + "type": "directory", + "date": "", + } + ) + for d in dirs: + dir_name = d["Prefix"] + if dir_name != current_prefix: + dir_url = generate_absolute_url(bucket, unquote_plus(dir_name)) + entries.append( + { + "name": os.path.basename(dir_name[:-1]) + "/", + "url": dir_url, + "type": "directory", + "date": "", + } + ) + for f in files: + file_key = f["Key"] + if file_key != current_prefix + "index.html": + file_url = generate_absolute_url(bucket, unquote_plus(file_key)) + file_date = datetime.fromtimestamp(f["LastModified"].timestamp()).strftime( + "%Y-%m-%d %H:%M:%S" + ) + entries.append( + { + "name": os.path.basename(file_key), + "url": file_url, + "type": "file", + "date": file_date, + } + ) + + # Render template + current_timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + return template.render( + entries=entries, directory=current_prefix, timestamp=current_timestamp + ) + + +def upload_index_html(client, bucket, prefix, html_content): + try: + client.put_object( + Body=html_content, + Bucket=bucket, + Key=os.path.join(prefix, "index.html"), + ContentType="text/html", + ) + logging.info(f"Successfully uploaded index.html to {bucket}/{prefix}") + except Exception as e: + logging.error(f"Error uploading index.html to {bucket}/{prefix}: {e}") + + +def process_directory(bucket, prefix): + client = boto3.client("s3") + dirs, files = list_files(client, bucket, prefix) + html_content = generate_index_html(bucket, files, dirs, prefix) + upload_index_html(client, bucket, prefix, html_content) + return [d["Prefix"] for d in dirs] + + +def main(s3_path): + parsed_url = urlparse(s3_path) + bucket = parsed_url.netloc + prefix = parsed_url.path.lstrip("/") + if not prefix.endswith("/"): + prefix += "/" + + with ThreadPoolExecutor() as executor: + futures = {executor.submit(process_directory, bucket, prefix)} + while futures: + done, futures = futures, set() + for future in done: + new_prefixes = future.result() + for new_prefix in new_prefixes: + futures.add(executor.submit(process_directory, bucket, new_prefix)) + + +if __name__ == "__main__": + if len(sys.argv) != 2: + logging.error("Usage: python index.py s3://bucket-name/path") + sys.exit(1) + s3_path = sys.argv[1] + main(s3_path) diff --git a/.github/scripts/templates/index.html b/.github/scripts/templates/index.html new file mode 100644 index 0000000000..383a7ef77d --- /dev/null +++ b/.github/scripts/templates/index.html @@ -0,0 +1,74 @@ + + + + + Directory Listing for {{ directory }} + + + +

Directory listing for {{ directory }}

+
    + {% for entry in entries %} +
  • + + {{ entry.name }} + {% if entry.date %} + {{ entry.date }} + {% endif %} +
  • + {% endfor %} +
+

Generated on: {{ timestamp }}

+ + + diff --git a/.github/scripts/tests/attach-logs.py b/.github/scripts/tests/attach-logs.py index 55bdb64f3d..a9f8267efb 100755 --- a/.github/scripts/tests/attach-logs.py +++ b/.github/scripts/tests/attach-logs.py @@ -7,8 +7,20 @@ from xml.etree import ElementTree as ET from pathlib import Path from typing import List -from log_parser import ctest_log_parser, parse_yunit_fails, parse_gtest_fails, log_reader, GTEST_MARK, YUNIT_MARK -from junit_utils import add_junit_log_property, create_error_testcase, create_error_testsuite, suite_case_iterator +from log_parser import ( + ctest_log_parser, + parse_yunit_fails, + parse_gtest_fails, + log_reader, + GTEST_MARK, + YUNIT_MARK, +) +from junit_utils import ( + add_junit_log_property, + create_error_testcase, + create_error_testsuite, + suite_case_iterator, +) from ctest_utils import CTestLog fn_shard_part_re = re.compile(r"-\d+$") @@ -132,16 +144,23 @@ def attach_to_unittests(ctest_log: CTestLog, unit_path): fn = f"{shard}-0000.xml" print(f"create {fn}") - testcases = [create_error_testcase(t.shard.name, t.classname, t.method, t.fn, t.url) for t in extra_logs] + testcases = [ + create_error_testcase(t.shard.name, t.classname, t.method, t.fn, t.url) + for t in extra_logs + ] testsuite = create_error_testsuite(testcases) - testsuite.write(os.path.join(unit_path, fn), xml_declaration=True, encoding="UTF-8") + testsuite.write( + os.path.join(unit_path, fn), xml_declaration=True, encoding="UTF-8" + ) def main(): parser = argparse.ArgumentParser() parser.add_argument("--url-prefix", default="./") - parser.add_argument("--decompress", action="store_true", default=False, help="decompress ctest log") + parser.add_argument( + "--decompress", action="store_true", default=False, help="decompress ctest log" + ) parser.add_argument("--ctest-report") parser.add_argument("--junit-reports-path") parser.add_argument("ctest_log") @@ -149,7 +168,11 @@ def main(): args = parser.parse_args() - ctest_log = extract_logs(log_reader(args.ctest_log, args.decompress), Path(args.out_log_dir), args.url_prefix) + ctest_log = extract_logs( + log_reader(args.ctest_log, args.decompress), + Path(args.out_log_dir), + args.url_prefix, + ) if ctest_log.has_logs: attach_to_ctest(ctest_log, args.ctest_report) diff --git a/.github/scripts/tests/ctest-postprocess.py b/.github/scripts/tests/ctest-postprocess.py index 8a0fd18feb..6a57df2d35 100755 --- a/.github/scripts/tests/ctest-postprocess.py +++ b/.github/scripts/tests/ctest-postprocess.py @@ -65,7 +65,9 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument("--dry-run", action="store_true", default=False) parser.add_argument("--filter-file", required=False) - parser.add_argument("--decompress", action="store_true", default=False, help="decompress ctest log") + parser.add_argument( + "--decompress", action="store_true", default=False, help="decompress ctest log" + ) parser.add_argument("ctest_log", type=str) parser.add_argument("ctest_junit_report") args = parser.parse_args() diff --git a/.github/scripts/tests/ctest_utils.py b/.github/scripts/tests/ctest_utils.py index c2f20eb9be..db83b47e55 100644 --- a/.github/scripts/tests/ctest_utils.py +++ b/.github/scripts/tests/ctest_utils.py @@ -56,7 +56,9 @@ def __init__(self): def add_shard(self, name, status, log_url): common_name = get_common_shard_name(name) - shard = self.storage[common_name][name] = self.name_shard[name] = CTestLogShard(name, status, log_url) + shard = self.storage[common_name][name] = self.name_shard[name] = CTestLogShard( + name, status, log_url + ) return shard def has_error_shard(self, name): diff --git a/.github/scripts/tests/fail-checker.py b/.github/scripts/tests/fail-checker.py index fcfd113d1e..0a874153ca 100755 --- a/.github/scripts/tests/fail-checker.py +++ b/.github/scripts/tests/fail-checker.py @@ -27,11 +27,35 @@ def check_for_fail(paths: List[str]): raise SystemExit(-1) +def get_fail_dirs(paths: List[str]): + failed_list = set() + error_list = set() + for path in paths: + for fn, suite, case in iter_xml_files(path): + is_failure = case.find("failure") is not None + is_error = case.find("error") is not None + test_name = f"{case.get('classname')}" + if is_failure: + failed_list.add(test_name) + elif is_error: + error_list.add(test_name) + + if failed_list or error_list: + for t in failed_list: + print(t) + for t in error_list: + print(t) + + def main(): parser = argparse.ArgumentParser() parser.add_argument("path", nargs="+", help="jsuite xml reports directories") + parser.add_argument("--paths-only", default=False, action="store_true") args = parser.parse_args() - check_for_fail(args.path) + if args.paths_only: + get_fail_dirs(args.path) + else: + check_for_fail(args.path) if __name__ == "__main__": diff --git a/.github/scripts/tests/generate-summary.py b/.github/scripts/tests/generate-summary.py index 49c3c472b9..1197ee8c0e 100755 --- a/.github/scripts/tests/generate-summary.py +++ b/.github/scripts/tests/generate-summary.py @@ -12,7 +12,6 @@ from typing import List, Dict from jinja2 import Environment, FileSystemLoader, StrictUndefined from junit_utils import get_property_value, iter_xml_files -from contextlib import nullcontext class TestStatus(Enum): @@ -33,6 +32,7 @@ class TestResult: status: TestStatus log_urls: Dict[str, str] elapsed: float + is_timed_out: bool @property def status_display(self): @@ -64,8 +64,13 @@ def full_name(self): def from_junit(cls, testcase): classname, name = testcase.get("classname"), testcase.get("name") + is_timed_out = False if testcase.find("failure") is not None: status = TestStatus.FAIL + text = testcase.find("failure").text + if text is not None and "Killed by timeout" in text: + print(f"{classname}, {name} is_timed_out = True") + is_timed_out = True elif testcase.find("error") is not None: status = TestStatus.ERROR elif get_property_value(testcase, "mute") is not None: @@ -80,6 +85,9 @@ def from_junit(cls, testcase): "log": get_property_value(testcase, "url:log"), "stdout": get_property_value(testcase, "url:stdout"), "stderr": get_property_value(testcase, "url:stderr"), + "backtrace": get_property_value(testcase, "url:backtrace"), + "recipe_stderr": get_property_value(testcase, "url:recipe stderr"), + "recipe_stdout": get_property_value(testcase, "url:recipe stdout"), } log_urls = {k: v for k, v in log_urls.items() if v} @@ -89,9 +97,11 @@ def from_junit(cls, testcase): elapsed = float(elapsed) except (TypeError, ValueError): elapsed = 0 - print(f"Unable to cast elapsed time for {classname}::{name} value={elapsed!r}") + print( + f"Unable to cast elapsed time for {classname}::{name} value={elapsed!r}" + ) - return cls(classname, name, status, log_urls, elapsed) + return cls(classname, name, status, log_urls, elapsed, is_timed_out) class TestSummaryLine: @@ -145,6 +155,10 @@ def add_line(self, line: TestSummaryLine): self.is_failed |= line.is_failed self.lines.append(line) + @property + def is_empty(self): + return len(self.lines) == 0 + def render_line(self, items): return f"| {' | '.join(items)} |" @@ -154,7 +168,11 @@ def render(self, add_footnote=False): footnote_url = f"{github_srv}/{repo}/tree/main/.github/config" - footnote = "[^1]" if add_footnote else f'[?]({footnote_url} "All mute rules are defined here")' + footnote = ( + "[^1]" + if add_footnote + else f'[?]({footnote_url} "All mute rules are defined here")' + ) columns = ["TESTS", "PASSED", "ERRORS", "FAILED", "SKIPPED", f"MUTED{footnote}"] @@ -217,7 +235,9 @@ def render_pm(value, url, diff=None): def render_testlist_html(rows, fn): TEMPLATES_PATH = os.path.join(os.path.dirname(__file__), "templates") - env = Environment(loader=FileSystemLoader(TEMPLATES_PATH), undefined=StrictUndefined) + env = Environment( + loader=FileSystemLoader(TEMPLATES_PATH), undefined=StrictUndefined + ) status_test = {} has_any_log = set() @@ -230,24 +250,48 @@ def render_testlist_html(rows, fn): for status in status_test.keys(): status_test[status].sort(key=attrgetter("full_name")) - status_order = [TestStatus.ERROR, TestStatus.FAIL, TestStatus.SKIP, TestStatus.MUTE, TestStatus.PASS] + status_order = [ + TestStatus.ERROR, + TestStatus.FAIL, + TestStatus.SKIP, + TestStatus.MUTE, + TestStatus.PASS, + ] # remove status group without tests status_order = [s for s in status_order if s in status_test] - content = env.get_template("summary.html").render(status_order=status_order, tests=status_test, has_any_log=has_any_log) + content = env.get_template("summary.html").render( + status_order=status_order, tests=status_test, has_any_log=has_any_log + ) with open(fn, "w") as fp: fp.write(content) -def write_summary(summary: TestSummary): - summary_fn = os.environ.get("GITHUB_STEP_SUMMARY") +def write_summary(summary: TestSummary, summary_out_env_path=""): + if summary_out_env_path == "": + summary_fn = os.environ.get("GITHUB_STEP_SUMMARY") + else: + summary_fn = summary_out_env_path + + if summary_fn: + fp = open(summary_fn, "at") + else: + fp = sys.stdout - with open(summary_fn, "at") if summary_fn else nullcontext(sys.stdout) as fp: # noqa: SIM115 + if summary.is_empty: + fp.write( + ":red_circle: Test run completed, no test results found. Please check build logs." + ) + else: for line in summary.render(add_footnote=True): fp.write(f"{line}\n") - fp.write("\n") + + fp.write("\n") + + if summary_fn: + fp.close() def gen_summary(summary_url_prefix, summary_out_folder, paths): @@ -260,17 +304,29 @@ def gen_summary(summary_url_prefix, summary_out_folder, paths): test_result = TestResult.from_junit(case) summary_line.add(test_result) + if not summary_line.tests: + continue + report_url = f"{summary_url_prefix}{html_fn}" - render_testlist_html(summary_line.tests, os.path.join(summary_out_folder, html_fn)) + render_testlist_html( + summary_line.tests, os.path.join(summary_out_folder, html_fn) + ) summary_line.add_report(html_fn, report_url) summary.add_line(summary_line) return summary -def get_comment_text(pr: PullRequest, summary: TestSummary, build_preset: str, test_history_url: str): - if summary.is_failed: +def get_comment_text( + pr: PullRequest, summary: TestSummary, build_preset: str, test_history_url: str +): + if summary.is_empty: + return [ + f":red_circle: **{build_preset}**: Test run completed, no test results found for commit {pr.head.sha}. " + f"Please check build logs." + ] + elif summary.is_failed: result = f":red_circle: **{build_preset}**: some tests FAILED" else: result = f":green_circle: **{build_preset}**: all tests PASSED" @@ -286,7 +342,13 @@ def get_comment_text(pr: PullRequest, summary: TestSummary, build_preset: str, t return body -def update_pr_comment(run_number: int, pr: PullRequest, summary: TestSummary, build_preset: str, test_history_url: str): +def update_pr_comment( + run_number: int, + pr: PullRequest, + summary: TestSummary, + build_preset: str, + test_history_url: str, +): header = f"" header_re = re.compile(header.format(r"(\d+)")) @@ -310,18 +372,23 @@ def update_pr_comment(run_number: int, pr: PullRequest, summary: TestSummary, bu body = "\n".join(body) - if comment is None: - pr.create_issue_comment(body) - else: - comment.edit(body) + pr.create_issue_comment(body) def main(): parser = argparse.ArgumentParser() parser.add_argument("--summary-out-path", required=True) + parser.add_argument( + "--summary-out-env-path", + required=False, + help="File to write out summary instead of GITHUB_STEP_SUMMARY", + default="", + ) parser.add_argument("--summary-url-prefix", required=True) parser.add_argument("--test-history-url", required=False) - parser.add_argument("--build-preset", default="default-linux-x86-64-relwithdebinfo", required=False) + parser.add_argument( + "--build-preset", default="default-linux-x86-64-relwithdebinfo", required=False + ) parser.add_argument("args", nargs="+", metavar="TITLE html_out path") args = parser.parse_args() @@ -333,7 +400,7 @@ def main(): title_path = list(zip(paths, paths, paths)) summary = gen_summary(args.summary_url_prefix, args.summary_out_path, title_path) - write_summary(summary) + write_summary(summary, args.summary_out_env_path) if os.environ.get("GITHUB_EVENT_NAME") in ("pull_request", "pull_request_target"): gh = Github(auth=GithubAuth.Token(os.environ["GITHUB_TOKEN"])) @@ -343,7 +410,9 @@ def main(): run_number = int(os.environ.get("GITHUB_RUN_NUMBER")) pr = gh.create_from_raw_data(PullRequest, event["pull_request"]) - update_pr_comment(run_number, pr, summary, args.build_preset, args.test_history_url) + update_pr_comment( + run_number, pr, summary, args.build_preset, args.test_history_url + ) if __name__ == "__main__": diff --git a/.github/scripts/tests/log_parser.py b/.github/scripts/tests/log_parser.py index 36f05000b0..1a8388d684 100644 --- a/.github/scripts/tests/log_parser.py +++ b/.github/scripts/tests/log_parser.py @@ -83,7 +83,9 @@ def ctest_log_parser(fp: TextIO): break if target: - if not (start_re.match(line) or status_re.match(line) or finish_re.match(line)): + if not ( + start_re.match(line) or status_re.match(line) or finish_re.match(line) + ): buf.append(line.rstrip()) else: yield target, reason, buf diff --git a/.github/scripts/tests/mute_utils.py b/.github/scripts/tests/mute_utils.py index 0372679063..d722d1b2d4 100644 --- a/.github/scripts/tests/mute_utils.py +++ b/.github/scripts/tests/mute_utils.py @@ -99,7 +99,9 @@ def dec_attr(node, attr, value): return op_attr(node, attr, operator.sub, value) -def update_suite_info(root, n_remove_failures=None, n_remove_errors=None, n_skipped=None): +def update_suite_info( + root, n_remove_failures=None, n_remove_errors=None, n_skipped=None +): if n_remove_failures: dec_attr(root, "failures", n_remove_failures) diff --git a/.github/scripts/tests/split-junit.py b/.github/scripts/tests/split-junit.py new file mode 100755 index 0000000000..fecbc69545 --- /dev/null +++ b/.github/scripts/tests/split-junit.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 +import os +import sys +import argparse +from pathlib import Path +from xml.etree import ElementTree as ET + + +def save_suite(suite, path): + root = ET.Element("testsuites") + root.append(suite) + tree = ET.ElementTree(root) + tree.write(path) + + +def do_split(fn, out_dir): + try: + tree = ET.parse(fn) + except ET.ParseError as e: + print(f"Unable to parse {fn}: {e}", file=sys.stderr) + sys.exit(1) + + root = tree.getroot() + + for n, suite in enumerate(root.iter("testsuite")): + part_fn = Path(out_dir).joinpath(f"part_{n}.xml") + print(f"write {part_fn}") + save_suite(suite, part_fn) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("-o", dest="out_dir", required=True) + parser.add_argument("in_file", type=argparse.FileType("r")) + + args = parser.parse_args() + + if not os.path.isdir(args.out_dir): + os.makedirs(args.out_dir) + + do_split(args.in_file, args.out_dir) + + +if __name__ == "__main__": + main() diff --git a/.github/scripts/tests/templates/summary.html b/.github/scripts/tests/templates/summary.html index 530af6660f..964b1815b1 100644 --- a/.github/scripts/tests/templates/summary.html +++ b/.github/scripts/tests/templates/summary.html @@ -4,7 +4,7 @@ th { text-transform: uppercase; } - + th, td { padding: 5px; } @@ -55,7 +55,7 @@

{{ status.name }} ({{ tests[status] | length }})

{{ t.full_name }} {{ t.elapsed_display }} - {{ t.status_display }} + {{ t.status_display }}{% if t.is_timed_out %}(TIMEOUT){% endif %} {% if status in has_any_log %} @@ -65,7 +65,7 @@

{{ status.name }} ({{ tests[status] | length }})

{% endfor %} {% else %} -   + LOGS ARE EMPTY {% endif %} {% endif %} diff --git a/.github/scripts/tests/transform-ya-junit.py b/.github/scripts/tests/transform-ya-junit.py index 9b9d074072..39eac048c0 100755 --- a/.github/scripts/tests/transform-ya-junit.py +++ b/.github/scripts/tests/transform-ya-junit.py @@ -4,6 +4,7 @@ import json import os import sys +import shutil import urllib.parse from xml.etree import ElementTree as ET from mute_utils import mute_target, pattern_to_re @@ -62,7 +63,9 @@ def load(self, subdir): return for folder in os.listdir(test_results_dir): - fn = os.path.join(self.out_root, test_results_dir, folder, "ytest.report.trace") + fn = os.path.join( + self.out_root, test_results_dir, folder, "ytest.report.trace" + ) if not os.path.isfile(fn): continue @@ -72,16 +75,41 @@ def load(self, subdir): event = json.loads(line.strip()) if event["name"] == "subtest-finished": event = event["value"] - cls = event["class"] + class_event = event["class"] subtest = event["subtest"] - cls = cls.replace("::", ".") - self.traces[(cls, subtest)] = event + class_event = class_event.replace("::", ".") + log_print(f"loaded ({class_event}, {subtest})") + self.traces[(class_event, subtest)] = event + elif event["name"] == "chunk-event": + event = event["value"] + chunk_idx = event["chunk_index"] + chunk_total = event["nchunks"] + test_name = subdir + log_print(f"loaded ({test_name}, {chunk_idx}, {chunk_total})") + self.traces[(test_name, chunk_idx, chunk_total)] = event - def has(self, cls, name): - return (cls, name) in self.traces + def has(self, class_event, name): + return (class_event, name) in self.traces - def get_logs(self, cls, name): - trace = self.traces.get((cls, name)) + def get_logs(self, class_event, name): + trace = self.traces.get((class_event, name)) + + if not trace: + return {} + + logs = trace["logs"] + + result = {} + for k, path in logs.items(): + if k == "logsdir": + continue + + result[k] = path.replace("$(BUILD_ROOT)", self.out_root) + + return result + + def get_logs_chunks(self, suite, idx, total): + trace = self.traces.get((suite, idx, total)) if not trace: return {} @@ -102,6 +130,7 @@ def filter_empty_logs(logs): result = {} for k, v in logs.items(): if not os.path.isfile(v) or os.stat(v).st_size == 0: + log_print(f"skipping log file {v} as empty or missing") continue result[k] = v return result @@ -130,7 +159,8 @@ def save_log(build_root, fn, out_dir, log_url_prefix, trunc_size): break out_fp.write(buf) else: - os.symlink(fn, out_fn) + if fn != out_fn: + shutil.copy(fn, out_fn) quoted_fpath = urllib.parse.quote(fpath) return f"{log_url_prefix}{quoted_fpath}" @@ -162,14 +192,34 @@ def transform( log_print("mute", suite_name, test_name) mute_target(case) - if is_fail and "." in test_name: - test_cls, test_method = test_name.rsplit(".", maxsplit=1) - logs = filter_empty_logs(traces.get_logs(test_cls, test_method)) + if is_fail: + if "." in test_name: + test_cls, test_method = test_name.rsplit(".", maxsplit=1) + logs = filter_empty_logs(traces.get_logs(test_cls, test_method)) + + elif "chunk" in test_name: + if "sole" in test_name: + chunk_idx = 0 + chunks_total = 1 + else: + pattern = r"\[(\d+)/(\d+)\]" + match = re.search(pattern, test_name) + chunk_idx = int(match.group(1)) + chunks_total = int(match.group(2)) + logs = filter_empty_logs( + traces.get_logs_chunks(suite_name, chunk_idx, chunks_total) + ) + else: + continue if logs: - log_print(f"add {list(logs.keys())!r} properties for {test_cls}.{test_method}") + log_print( + f"add {list(logs.keys())!r} properties for {suite_name}/{test_name}" + ) for name, fn in logs.items(): - url = save_log(ya_out_dir, fn, log_out_dir, log_url_prefix, log_trunc_size) + url = save_log( + ya_out_dir, fn, log_out_dir, log_url_prefix, log_trunc_size + ) add_junit_link_property(case, name, url) if save_inplace: @@ -195,10 +245,12 @@ def main(): "--log-truncate-size", dest="log_trunc_size", type=int, - default=134217728, + default=0, help="truncate log after specific size, 0 disables truncation", ) - parser.add_argument("--ya-out", help="ya make output dir (for searching logs and artifacts)") + parser.add_argument( + "--ya-out", help="ya make output dir (for searching logs and artifacts)" + ) parser.add_argument("in_file", type=argparse.FileType("r")) args = parser.parse_args() diff --git a/.github/workflows/README.md b/.github/workflows/README.md index 61111b653d..08000c0b23 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -31,7 +31,7 @@ Here is few examples how to use it: act -W .github/workflows/pr-github-actions.yaml workflow_dispatch ``` -You can add input values: +You can add input values in `.input` file: ```bash echo <