-
Notifications
You must be signed in to change notification settings - Fork 14
176 lines (153 loc) · 5.94 KB
/
viash-test.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
name: viash test
on:
pull_request:
push:
branches: [ '**' ]
jobs:
run_ci_check_job:
runs-on: ubuntu-latest
outputs:
run_ci: ${{ steps.github_cli.outputs.check }}
steps:
- name: 'Check if branch has an existing pull request and the trigger was a push'
id: github_cli
run: |
pull_request=$(gh pr list -R ${{ github.repository }} -H ${{ github.ref_name }} --json url --state open --limit 1 | jq '.[0].url')
# If the branch has a PR and this run was triggered by a push event, do not run
if [[ "$pull_request" != "null" && "$GITHUB_REF_NAME" != "main" && "${{ github.event_name == 'push' }}" == "true" && "${{ !contains(github.event.head_commit.message, 'ci force') }}" == "true" ]]; then
echo "check=false" >> $GITHUB_OUTPUT
else
echo "check=true" >> $GITHUB_OUTPUT
fi
env:
GITHUB_TOKEN: ${{ secrets.GTHB_PAT }}
# phase 1
list:
needs: run_ci_check_job
env:
s3_bucket: s3://openpipelines-data/
runs-on: ubuntu-latest
if: "needs.run_ci_check_job.outputs.run_ci == 'true'"
outputs:
matrix: ${{ steps.set_matrix.outputs.matrix }}
cache_key: ${{ steps.cache.outputs.cache_key }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: viash-io/viash-actions/setup@v4
- name: Check if all config can be parsed if there is no unicode support
run: |
LANG=C viash ns list > /dev/null
- uses: viash-io/viash-actions/project/sync-and-cache-s3@v4
id: cache
with:
s3_bucket: $s3_bucket
dest_path: resources_test
cache_key_prefix: resources_test__
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v39
with:
separator: ";"
diff_relative: true
- id: ns_list
uses: viash-io/viash-actions/ns-list@v4
with:
platform: docker
format: json
- id: ns_list_filtered
uses: viash-io/viash-actions/project/detect-changed-components@v4
with:
input_file: "${{ steps.ns_list.outputs.output_file }}"
- id: set_matrix
run: |
echo "matrix=$(jq -c '[ .[] |
{
"name": (.functionality.namespace + "/" + .functionality.name),
"config": .info.config,
"dir": .info.config | capture("^(?<dir>.*\/)").dir
}
]' ${{ steps.ns_list_filtered.outputs.output_file }} )" >> $GITHUB_OUTPUT
# phase 2
viash_test:
needs: list
if: ${{ needs.list.outputs.matrix != '[]' && needs.list.outputs.matrix != '' }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
component: ${{ fromJson(needs.list.outputs.matrix) }}
steps:
# Remove unnecessary files to free up space. Otherwise, we get 'no space left on device.'
- uses: data-intuitive/reclaim-the-bytes@v2
- uses: actions/checkout@v4
- uses: viash-io/viash-actions/setup@v4
# use cache
- name: Cache resources data
id: restore_cache
uses: actions/cache/restore@v3
env:
SEGMENT_DOWNLOAD_TIMEOUT_MINS: 5
with:
path: resources_test
key: ${{ needs.list.outputs.cache_key }}
- name: Sync if caching failed
shell: bash
if: steps.restore_cache.outputs.cache-hit != 'true'
run: |
readarray -t resources < <(viash config view --format json "${{ matrix.component.config }}" | jq -r -c '(.info.config | capture("^(?<dir>.*\/)").dir) as $dir | .functionality.test_resources | map(select(.type == "file")) | map($dir + .path) | unique | .[]')
for resource in "${resources[@]}"; do
if [[ $resource == *"resources_test"* ]]; then
relative_path=${resource#*resources_test/}
relative_path_trailing_slash_removed=${relative_path%/}
s3_path="s3://openpipelines-data/$relative_path_trailing_slash_removed"
s3_lookup=$(AWS_EC2_METADATA_DISABLED=true aws s3 ls --no-sign-request "$s3_path" 2>&1)
extra_args=()
if [[ $s3_lookup =~ .*/$ ]]; then
extra_args+=("--recursive")
fi
AWS_EC2_METADATA_DISABLED=true \
aws s3 cp \
"$s3_path" \
"$resource" \
--no-sign-request \
"${extra_args[@]}"
fi
done
- name: Remove unused test resources to save space (only when restoring from cache)
if: steps.restore_cache.outputs.cache-hit == 'true'
shell: bash
run: |
readarray -t resources < <(viash config view --format json "${{ matrix.component.config }}" | jq -r -c '(.info.config | capture("^(?<dir>.*\/)").dir) as $dir | .functionality.test_resources | map(select(.type == "file")) | map($dir + .path) | unique | .[]')
to_not_remove=()
for resource in "${resources[@]}"; do
if [[ $resource == *"resources_test"* ]]; then
relative_path=${resource#*resources_test/}
relative_path_trailing_slash_removed=${relative_path%/}
to_not_remove+=("-path" "./resources_test/$relative_path_trailing_slash_removed" "-prune" "-o")
fi
done
# Remove last prune and -o
if (( ${#errors[@]} )); then
unset 'to_not_remove[${#to_not_remove[@]}-1]'
unset 'to_not_remove[${#to_not_remove[@]}-1]'
to_not_remove+=( "(" "${to_not_remove[@]}" ")" "-prune" "-o")
fi
find ./resources_test/ "${to_not_remove[@]}" -type f -exec rm {} +
- name: Login to the nvidia container registry
uses: docker/login-action@v3
env:
NVIDIA_PASSWORD: ${{ secrets.NVIDIA_PASSWORD }}
if: ${{ env.NVIDIA_PASSWORD != '' }}
with:
registry: nvcr.io
username: $oauthtoken
password: ${{ env.NVIDIA_PASSWORD }}
- name: Run test
timeout-minutes: 30
run: |
viash test \
"${{ matrix.component.config }}" \
--cpus 2 \
--memory "6gb"