Skip to content

extra assert

extra assert #130

name: smart test selection
on:
pull_request:
defaults:
run:
# the default default is:
# bash --noprofile --norc -eo pipefail {0}
shell: bash --noprofile --norc -eo pipefail -ux {0}
jobs:
coverage-ats:
runs-on: ubuntu-latest
# Map a step output to a job output
outputs:
ATS_TESTS_TO_RUN: ${{ steps.label_analysis.outputs.ATS_TESTS_TO_RUN }}
ATS_TESTS_TO_SKIP: ${{ steps.label_analysis.outputs.ATS_TESTS_TO_SKIP }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Set up Python 3.10.10
uses: actions/setup-python@v4
with:
python-version: "3.10.10"
# We need the setup to collect the list of tests properly
- name: Download Codecov CLI
run: |
pip install codecov-cli==0.4.1
# Creates the commit and report objects in codecov
- name: Codecov startup
run: |
codecovcli create-commit
codecovcli create-report
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_STAGING_TOKEN }}
# Sends static analysis information to codecov
- name: Static Analysis
run: |
codecovcli static-analysis --token=${CODECOV_STATIC_TOKEN} \
--folders-to-exclude .artifacts \
--folders-to-exclude .github \
--folders-to-exclude .venv \
--folders-to-exclude static \
--folders-to-exclude bin
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_STAGING_TOKEN }}
CODECOV_STATIC_TOKEN: ${{ secrets.CODECOV_STAGING_STATIC_TOKEN }}
- name: Install requirements
run: pip install -r requirements.txt
# Run label analysis in dry mode to get the list of tests to run
# The base commit will be the parent commit (apparently commits on master don't exist in codecov)
# The CLI might fail the label-analysis command for reasons
# If that's the case we still want to do some post-processing (the else part below) to generate the correct outputs
- name: Label Analysis
id: label_analysis
run: |
BASE_COMMIT=$(git rev-parse ${{ github.sha }}^)
echo $BASE_COMMIT
output=$(codecovcli --codecov-yml-path=codecov.yml label-analysis --dry-run --token=${CODECOV_STATIC_TOKEN} --base-sha=${BASE_COMMIT}) || true
: $output
if [ -n "${output}" ];
then
echo ATS_TESTS_TO_RUN=$(jq <<< $output '.runner_options + .ats_tests_to_run | @json | @sh' --raw-output) >> "$GITHUB_OUTPUT"
echo ATS_TESTS_TO_SKIP=$(jq <<< $output '.runner_options + .ats_tests_to_skip | @json | @sh' --raw-output) >> "$GITHUB_OUTPUT"
testcount() { jq <<< $output ".$1 | length"; }
run_count=$(testcount ats_tests_to_run)
skip_count=$(testcount ats_tests_to_skip)
tee <<< "Selected $run_count / $(($run_count + $skip_count)) tests to run" "$GITHUB_STEP_SUMMARY"
else
tee <<< "ATS failed. Can't get list of tests to run. Fallback to all tests" "$GITHUB_STEP_SUMMARY"
# We need not forget to add the search options in the fallback command
# Otherwise pytest might run more tests than expected
echo 'ATS_TESTS_TO_RUN<<EOF' >> $GITHUB_OUTPUT
jq -c @json <<< '[
"--cov-context=test",
"core"
]' >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
echo ATS_TESTS_TO_SKIP="'[]'" >> "$GITHUB_OUTPUT"
echo "::error ATS failed"
exit 1
fi
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_STAGING_TOKEN }}
CODECOV_STATIC_TOKEN: ${{ secrets.CODECOV_STAGING_STATIC_TOKEN }}
debug:
runs-on: ubuntu-latest
needs: coverage-ats
if: ${{ always() }}
steps:
- name: Debug ATS_TESTS_TO_RUN
run: |
: ${{ needs.coverage-ats.outputs.ATS_TESTS_TO_RUN }}
- name: Debug ATS_TESTS_TO_SKIP
run: |
: ${{ needs.coverage-ats.outputs.ATS_TESTS_TO_SKIP }}
debug-runs:
runs-on: ubuntu-latest
needs: coverage-ats
if: ${{ always() }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Set up Python 3.10.10
uses: actions/setup-python@v4
with:
python-version: "3.10.10"
- name: Install requirements
run: pip install -r requirements.txt
- name: Run pytest - multiple threads - ATS_TESTS_TO_RUN
run: |
length_of_tests=$(jq <<< ${{ needs.coverage-ats.outputs.ATS_TESTS_TO_RUN }} 'length')
# The 1st value doesn't count, it's '--cov-context=test' (hence -gt 1)
if [ $length_of_tests -gt 1 ]; then
echo "Running $length_of_tests tests"
jq <<< ${{ needs.coverage-ats.outputs.ATS_TESTS_TO_RUN }} 'join("\u0000")' --raw-output0 | xargs -r0 pytest -n2
else
echo "No tests to run"
fi
- name: Run pytest - multiple threads - ATS_TESTS_TO_SKIP
run: |
length_of_tests=$(jq <<< ${{ needs.coverage-ats.outputs.ATS_TESTS_TO_SKIP }} 'length')
# The 1st value doesn't count, it's '--cov-context=test'
if [ $length_of_tests -gt 1 ]; then
echo "Running $length_of_tests tests"
jq <<< ${{ needs.coverage-ats.outputs.ATS_TESTS_TO_SKIP }} 'join("\u0000")' --raw-output0 | xargs -r0 pytest -n2
else
echo "No tests to run"
fi