-
Notifications
You must be signed in to change notification settings - Fork 83
312 lines (281 loc) · 11.5 KB
/
tests.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ** Dynamic analysis **
#
# Dynamic analysis requires the Share community platform to run during evaluation. Currently Catroweb's dynamic tests
# are mainly UI-tests and a few unit tests. Hence, the tests take multiple hours. To reduce the run time, the tests
# are executed in multiple independent jobs. However, all jobs use the same Catroweb Docker image to reduce build time.
#
name: 'Dynamic analysis'
# Run-on every creation, update and merge of a pull request.
# However, prevent checks to be initiated twice if the pull request is a branch on the same repository. (E.g dependabot)
on:
pull_request:
branches:
- '**' # all branches
- '!main' # excludes main to prevent double test runs during release prep.
jobs:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Build:
#
# - In order to save computation time the "app.catroweb" image is only build once during this build phase.
# Other jobs can re-use this image to reduce their build time. With several jobs + the matrix build total
# computation time for this workflow can be highly reduced. This is important since we do not have unlimited
# resources/machines to run the jobs.
#
build:
name: Build Catroweb Image
runs-on: ubuntu-latest
outputs:
image: ${{ steps.upload-artifact.outputs.artifact-path }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Cache Docker layers
uses: actions/cache@v4
with:
path: |
~/.docker/cache
~/.docker/buildx-cache
key: ${{ runner.os }}-docker-${{ github.sha }}
restore-keys: |
${{ runner.os }}-docker-
- name: Build Catroweb App Image
run: |
cd docker
docker compose -f docker-compose.test.yaml build app.catroweb
- name: Save Docker Image
run: |
cd docker
docker save app.catroweb | gzip > catroweb-image.tar.gz
- name: Upload Docker Image
uses: actions/upload-artifact@v4
id: upload-artifact
with:
name: catroweb-image
path: docker/catroweb-image.tar.gz
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PhpUnit:
#
# - the tests are executed in our docker since we have many integration tests which access the database, etc.
# One might consider to strictly separate integration and unit tests. Units tests could be executed using
# composer scripts only to reduce the runtime to a few seconds. No build needed + dependencies can be easy cached.
#
# - A code coverage report is pushed to the artifacts where it can be downloaded directly on GitHub.
# Keep in mind the report is not including the tests written for behat.
#
tests_phpunit:
name: PHPUnit
needs: build
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download Docker Image
uses: actions/download-artifact@v4
with:
name: catroweb-image
path: docker
- name: Load Docker Image
run: |
cd docker
gunzip -c catroweb-image.tar.gz | docker load
- name: Build and Start Docker Containers
run: |
cd docker
docker compose -f docker-compose.test.yaml up -d
sleep 60
- name: Run PHPUnit Tests
run: |
docker exec app.catroweb bin/phpunit --coverage-html tests/TestReports/CoverageReports/PhpUnit \
--coverage-clover=tests/TestReports/CoverageReports/PhpUnit/coverage.xml
- name: Upload PHPUnit Code Coverage Report
uses: actions/upload-artifact@v4
if: always()
with:
name: PhpUnitTestReport
path: tests/TestReports/CoverageReports/PhpUnit
- name: Upload Coverage to Codecov
uses: codecov/codecov-action@v5
with:
file: tests/TestReports/CoverageReports/PhpUnit/coverage.xml
flags: phpunit # optional
name: codecov-umbrella # optional
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Behat:
#
# - This job runs all Behat suites parallel using a matrix strategy. This is done since integrations tests which
# are interacting with a browser and the GUI are extremely slow. With a total run-time of over an hour, using this
# approach the run-time can be drastically reduced. The disadvantage, we can't easily create a coverage report
# for the behat scenarios. Something that is considered to be a bad practice anyway since Behat is intended to
# deliver scenario automation in BDD.
#
# - Behat and especially UI tests using Mink tend to flaky.
# A test will only be marked as failed if a test fails more than 3 times in a row.
# Flaky tests should be reduced to a minimum in the codebase!
#
# - Behat only reruns failed tests - Not pending/missing tests or those with exceptions!
# A pending/missing test will NOT result in a failed pipeline!
# This is the reason why the explicit check for the log file had to be added.
#
# - To ease the debugging, besides a log file, screenshots of failing tests are uploaded as artifacts.
#
# Notes:
# - Check the behat.yaml when changing / creating new suites
# - suites will finish their work even if another suite fails (fail-fast: false)
#
tests_behat:
name: Behat
needs: build
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
testSuite:
- api-authentication
- api-mediapackage
- api-projects
- api-user
- api-notifications
- api-utility
- api-translation
- api-tag
- api-studio
- api-deprecated-general
- api-deprecated-project
- api-deprecated-profile
- api-deprecated-upload
- web-achievements
- web-admin
- web-apk-generation
- web-authentication
- web-code-statistics
- web-code-view
- web-comments
- web-general
- web-media-library
- web-notifications
- web-profile
- web-project
- web-project-details
- web-project-list
- web-reactions
- web-recommendations
- web-remix-system
- web-scratch-integration
- web-search
- web-studio
- web-top-bar
- web-translation
- web-system
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download Docker Image
uses: actions/download-artifact@v4
with:
name: catroweb-image
path: docker
- name: Load Docker Image
run: |
cd docker
gunzip -c catroweb-image.tar.gz | docker load
- name: Build and Start Docker Containers
run: |
cd docker
docker compose -f docker-compose.test.yaml up -d
sleep 30
# Test Run
- name: Behat ${{ matrix.testSuite }} tests
id: test-run
continue-on-error: true
# - The output will of the tests will be piped to the stdout and into the log file.
# - A return code != 0 stops the execution of further commands in the pipeline.
# "tee" always returns 0, even if the behat test fails. Therefore, we need to exit with the first entry of
# the pipe status, which contains the correct exit code.
run: |
docker exec app.catroweb chmod -R 777 var
docker exec app.catroweb bin/behat -s ${{ matrix.testSuite }} -f pretty \
|& tee tests/TestReports/Behat/${{ matrix.testSuite }}.log; \
TEST_RUN_EXIT_CODE=${PIPESTATUS[0]}
echo "test-run-exit-code=$TEST_RUN_EXIT_CODE" > $GITHUB_ENV
( exit $TEST_RUN_EXIT_CODE )
# Missing steps are not rerun by behat, without this step they will be lost in the process
# We must explicitly kill the pipeline if the log contains undefined steps
- name: Check that suite has NO missing steps
if: always()
id: missing-check
run: |
if grep -q 'has missing steps. Define them with these snippets:' tests/TestReports/Behat/${{ matrix.testSuite }}.log; then
cat tests/TestReports/Behat/${{ matrix.testSuite }}.log
exit 1
fi
# Pending steps are not rerun by behat, without this step they will be lost in the process
# We must explicitly kill the pipeline if the log contains pending steps
- name: Check that suite has NO pending steps
if: always()
id: pending-check
run: |
if grep -q 'pending)' tests/TestReports/Behat/${{ matrix.testSuite }}.log; then
cat tests/TestReports/Behat/${{ matrix.testSuite }}.log
exit 1
fi
# Chrome exception are problems that can't be fixed with a rerun. However, we can try to run the whole suite one more time
- name: Check for Chrome Exceptions
if: always()
id: chrome-exception-check
run: |
if grep -q '\[DMore\\ChromeDriver\\StreamReadException\]' tests/TestReports/Behat/${{ matrix.testSuite }}.log; then
cat tests/TestReports/Behat/${{ matrix.testSuite }}.log
docker exec app.catroweb bin/behat -s ${{ matrix.testSuite }} -f pretty
fi
- name: Upload Behat Test Artifacts
uses: actions/upload-artifact@v4
with:
name: logs_${{ matrix.testSuite }}
path: |
tests/TestReports/Behat/${{ matrix.testSuite }}.log
tests/TestReports/TestScreenshots
- name: Upload Coverage to Codecov
uses: codecov/codecov-action@v5
with:
file: tests/TestReports/CoverageReports/Behat/coverage.xml
flags: behat # optional
name: codecov-umbrella # optional
# fail_ci_if_error: true # optional (default = false)
- name: Rerun Behat Tests (1st Attempt)
if: env.test-run-exit-code != '0'
id: test-rerun-1
continue-on-error: true
run: |
docker exec app.catroweb chmod -R 777 var
docker exec app.catroweb bin/behat -s ${{ matrix.testSuite }} --rerun
TEST_RUN_EXIT_CODE=$?
echo "test-run-exit-code=$TEST_RUN_EXIT_CODE" > $GITHUB_ENV
( exit $TEST_RUN_EXIT_CODE )
- name: Rerun Behat Tests (2nd Attempt)
if: env.test-run-exit-code != '0'
id: test-rerun-2
continue-on-error: true
run: |
docker exec app.catroweb bin/behat -s ${{ matrix.testSuite }} --rerun
TEST_RUN_EXIT_CODE=$?
echo "test-run-exit-code=$TEST_RUN_EXIT_CODE" > $GITHUB_ENV
( exit $TEST_RUN_EXIT_CODE )
- name: Rerun Behat Tests (3rd Attempt)
if: env.test-run-exit-code != '0'
id: test-rerun-3
run: |
docker exec app.catroweb bin/behat -f pretty -s ${{ matrix.testSuite }} --rerun
## Failure debugging
- name: Debug Failure
if: failure()
run: |
docker ps -a
echo "--- App Logs ---"
docker logs app.catroweb
echo "--- DB Logs ---"
docker logs db.catroweb.test
echo "--- Chrome Logs ---"
docker logs chrome.catroweb