Skip to content

Commit 720c696

Browse files
author
Kernel Patches Daemon
committed
adding ci files
1 parent 46d38f4 commit 720c696

File tree

56 files changed

+4432
-18
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

56 files changed

+4432
-18
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
name: 'run-veristat'
2+
description: 'Run veristat benchmark'
3+
inputs:
4+
veristat_output:
5+
description: 'Veristat output filepath'
6+
required: true
7+
baseline_name:
8+
description: 'Veristat baseline cache name'
9+
required: true
10+
runs:
11+
using: "composite"
12+
steps:
13+
- uses: actions/upload-artifact@v4
14+
with:
15+
name: ${{ inputs.baseline_name }}
16+
if-no-files-found: error
17+
path: ${{ github.workspace }}/${{ inputs.veristat_output }}
18+
19+
# For pull request:
20+
# - get baseline log from cache
21+
# - compare it to current run
22+
- if: ${{ github.event_name == 'pull_request' }}
23+
uses: actions/cache/restore@v4
24+
with:
25+
key: ${{ inputs.baseline_name }}
26+
restore-keys: |
27+
${{ inputs.baseline_name }}-
28+
path: '${{ github.workspace }}/${{ inputs.baseline_name }}'
29+
30+
- if: ${{ github.event_name == 'pull_request' }}
31+
name: Show veristat comparison
32+
shell: bash
33+
run: ./.github/scripts/compare-veristat-results.sh
34+
env:
35+
BASELINE_PATH: ${{ github.workspace }}/${{ inputs.baseline_name }}
36+
VERISTAT_OUTPUT: ${{ inputs.veristat_output }}
37+
38+
# For push: just put baseline log to cache
39+
- if: ${{ github.event_name == 'push' }}
40+
shell: bash
41+
run: |
42+
mv "${{ github.workspace }}/${{ inputs.veristat_output }}" \
43+
"${{ github.workspace }}/${{ inputs.baseline_name }}"
44+
45+
- if: ${{ github.event_name == 'push' }}
46+
uses: actions/cache/save@v4
47+
with:
48+
key: ${{ inputs.baseline_name }}-${{ github.run_id }}
49+
path: '${{ github.workspace }}/${{ inputs.baseline_name }}'
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
#!/bin/bash
2+
3+
if [[ ! -f "${BASELINE_PATH}" ]]; then
4+
echo "# No ${BASELINE_PATH} available" >> "${GITHUB_STEP_SUMMARY}"
5+
6+
echo "No ${BASELINE_PATH} available"
7+
echo "Printing veristat results"
8+
cat "${VERISTAT_OUTPUT}"
9+
10+
exit
11+
fi
12+
13+
selftests/bpf/veristat \
14+
--output-format csv \
15+
--emit file,prog,verdict,states \
16+
--compare "${BASELINE_PATH}" "${VERISTAT_OUTPUT}" > compare.csv
17+
18+
python3 ./.github/scripts/veristat_compare.py compare.csv

.github/scripts/download-gcc-bpf.sh

+30
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
#!/bin/bash
2+
3+
set -euo pipefail
4+
5+
GCC_BPF_RELEASE_GH_REPO=$1
6+
INSTALL_DIR=$(realpath $2)
7+
8+
cd /tmp
9+
10+
tag=$(gh release list -L 1 -R ${GCC_BPF_RELEASE_GH_REPO} --json tagName -q .[].tagName)
11+
if [[ -z "$tag" ]]; then
12+
echo "Could not find latest GCC BPF release at ${GCC_BPF_RELEASE_GH_REPO}"
13+
exit 1
14+
fi
15+
16+
url="https://github.com/${GCC_BPF_RELEASE_GH_REPO}/releases/download/${tag}/${tag}.tar.zst"
17+
echo "Downloading $url"
18+
wget -q "$url"
19+
20+
tarball=${tag}.tar.zst
21+
dir=$(tar tf $tarball | head -1 || true)
22+
23+
echo "Extracting $tarball ..."
24+
tar -I zstd -xf $tarball && rm -f $tarball
25+
26+
rm -rf $INSTALL_DIR
27+
mv -v $dir $INSTALL_DIR
28+
29+
cd -
30+

.github/scripts/matrix.py

+194
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,194 @@
1+
#!/usr/bin/env python3
2+
3+
import os
4+
import dataclasses
5+
import json
6+
7+
from enum import Enum
8+
from typing import Any, Dict, List, Final, Set, Union
9+
10+
MANAGED_OWNER: Final[str] = "kernel-patches"
11+
MANAGED_REPOS: Final[Set[str]] = {
12+
f"{MANAGED_OWNER}/bpf",
13+
f"{MANAGED_OWNER}/vmtest",
14+
}
15+
16+
DEFAULT_SELF_HOSTED_RUNNER_TAGS: Final[List[str]] = ["self-hosted", "docker-noble-main"]
17+
DEFAULT_RUNNER: Final[str] = "ubuntu-24.04"
18+
DEFAULT_LLVM_VERSION: Final[int] = 17
19+
20+
21+
class Arch(str, Enum):
22+
"""
23+
CPU architecture supported by CI.
24+
"""
25+
26+
AARCH64 = "aarch64"
27+
S390X = "s390x"
28+
X86_64 = "x86_64"
29+
30+
31+
class Compiler(str, Enum):
32+
GCC = "gcc"
33+
LLVM = "llvm"
34+
35+
36+
@dataclasses.dataclass
37+
class Toolchain:
38+
compiler: Compiler
39+
# This is relevant ONLY for LLVM and should not be required for GCC
40+
version: int
41+
42+
@property
43+
def short_name(self) -> str:
44+
return str(self.compiler.value)
45+
46+
@property
47+
def full_name(self) -> str:
48+
if self.compiler == Compiler.GCC:
49+
return self.short_name
50+
51+
return f"{self.short_name}-{self.version}"
52+
53+
def to_dict(self) -> Dict[str, Union[str, int]]:
54+
return {
55+
"name": self.short_name,
56+
"fullname": self.full_name,
57+
"version": self.version,
58+
}
59+
60+
61+
@dataclasses.dataclass
62+
class BuildConfig:
63+
arch: Arch
64+
toolchain: Toolchain
65+
kernel: str = "LATEST"
66+
run_veristat: bool = False
67+
parallel_tests: bool = False
68+
build_release: bool = False
69+
70+
@property
71+
def runs_on(self) -> List[str]:
72+
if is_managed_repo():
73+
return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [self.arch.value]
74+
else:
75+
return [DEFAULT_RUNNER]
76+
77+
@property
78+
def build_runs_on(self) -> List[str]:
79+
if is_managed_repo():
80+
return ["codebuild"]
81+
else:
82+
return [DEFAULT_RUNNER]
83+
84+
@property
85+
def tests(self) -> Dict[str, Any]:
86+
tests_list = [
87+
"test_progs",
88+
"test_progs_parallel",
89+
"test_progs_no_alu32",
90+
"test_progs_no_alu32_parallel",
91+
"test_verifier",
92+
]
93+
94+
if self.arch.value != "s390x":
95+
tests_list.append("test_maps")
96+
97+
if self.toolchain.version >= 18:
98+
tests_list.append("test_progs_cpuv4")
99+
100+
# if self.arch in [Arch.X86_64, Arch.AARCH64]:
101+
# tests_list.append("sched_ext")
102+
103+
# Don't run GCC BPF runner, because too many tests are failing
104+
# See: https://lore.kernel.org/bpf/[email protected]/
105+
# if self.arch == Arch.X86_64:
106+
# tests_list.append("test_progs-bpf_gcc")
107+
108+
if not self.parallel_tests:
109+
tests_list = [test for test in tests_list if not test.endswith("parallel")]
110+
111+
return {"include": [generate_test_config(test) for test in tests_list]}
112+
113+
def to_dict(self) -> Dict[str, Any]:
114+
return {
115+
"arch": self.arch.value,
116+
"toolchain": self.toolchain.to_dict(),
117+
"kernel": self.kernel,
118+
"run_veristat": self.run_veristat,
119+
"parallel_tests": self.parallel_tests,
120+
"build_release": self.build_release,
121+
"runs_on": self.runs_on,
122+
"tests": self.tests,
123+
"build_runs_on": self.build_runs_on,
124+
}
125+
126+
127+
def is_managed_repo() -> bool:
128+
return (
129+
os.environ["GITHUB_REPOSITORY_OWNER"] == MANAGED_OWNER
130+
and os.environ["GITHUB_REPOSITORY"] in MANAGED_REPOS
131+
)
132+
133+
134+
def set_output(name, value):
135+
"""Write an output variable to the GitHub output file."""
136+
with open(os.getenv("GITHUB_OUTPUT"), "a", encoding="utf-8") as file:
137+
file.write(f"{name}={value}\n")
138+
139+
140+
def generate_test_config(test: str) -> Dict[str, Union[str, int]]:
141+
"""Create the configuration for the provided test."""
142+
is_parallel = test.endswith("_parallel")
143+
config = {
144+
"test": test,
145+
"continue_on_error": is_parallel,
146+
# While in experimental mode, parallel jobs may get stuck
147+
# anywhere, including in user space where the kernel won't detect
148+
# a problem and panic. We add a second layer of (smaller) timeouts
149+
# here such that if we get stuck in a parallel run, we hit this
150+
# timeout and fail without affecting the overall job success (as
151+
# would be the case if we hit the job-wide timeout). For
152+
# non-experimental jobs, 360 is the default which will be
153+
# superseded by the overall workflow timeout (but we need to
154+
# specify something).
155+
"timeout_minutes": 30 if is_parallel else 360,
156+
}
157+
return config
158+
159+
160+
if __name__ == "__main__":
161+
matrix = [
162+
BuildConfig(
163+
arch=Arch.X86_64,
164+
toolchain=Toolchain(compiler=Compiler.GCC, version=DEFAULT_LLVM_VERSION),
165+
run_veristat=True,
166+
parallel_tests=True,
167+
),
168+
BuildConfig(
169+
arch=Arch.X86_64,
170+
toolchain=Toolchain(compiler=Compiler.LLVM, version=DEFAULT_LLVM_VERSION),
171+
build_release=True,
172+
),
173+
BuildConfig(
174+
arch=Arch.X86_64,
175+
toolchain=Toolchain(compiler=Compiler.LLVM, version=18),
176+
build_release=True,
177+
),
178+
BuildConfig(
179+
arch=Arch.AARCH64,
180+
toolchain=Toolchain(compiler=Compiler.GCC, version=DEFAULT_LLVM_VERSION),
181+
),
182+
BuildConfig(
183+
arch=Arch.S390X,
184+
toolchain=Toolchain(compiler=Compiler.GCC, version=DEFAULT_LLVM_VERSION),
185+
),
186+
]
187+
188+
# Outside of managed repositories only run on x86_64
189+
if not is_managed_repo():
190+
matrix = [config for config in matrix if config.arch == Arch.X86_64]
191+
192+
json_matrix = json.dumps({"include": [config.to_dict() for config in matrix]})
193+
print(json.dumps(json.loads(json_matrix), indent=4))
194+
set_output("build_matrix", json_matrix)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
#!/usr/bin/env python3
2+
3+
import unittest
4+
from typing import Iterable, List
5+
6+
from ..veristat_compare import parse_table, VeristatFields
7+
8+
9+
def gen_csv_table(records: Iterable[str]) -> List[str]:
10+
return [
11+
",".join(VeristatFields.headers()),
12+
*records,
13+
]
14+
15+
16+
class TestVeristatCompare(unittest.TestCase):
17+
def test_parse_table_ignore_new_prog(self):
18+
table = gen_csv_table(
19+
[
20+
"prog_file.bpf.o,prog_name,N/A,success,N/A,N/A,1,N/A",
21+
]
22+
)
23+
veristat_info = parse_table(table)
24+
self.assertEqual(veristat_info.table, [])
25+
self.assertFalse(veristat_info.changes)
26+
self.assertFalse(veristat_info.new_failures)
27+
28+
def test_parse_table_ignore_removed_prog(self):
29+
table = gen_csv_table(
30+
[
31+
"prog_file.bpf.o,prog_name,success,N/A,N/A,1,N/A,N/A",
32+
]
33+
)
34+
veristat_info = parse_table(table)
35+
self.assertEqual(veristat_info.table, [])
36+
self.assertFalse(veristat_info.changes)
37+
self.assertFalse(veristat_info.new_failures)
38+
39+
def test_parse_table_new_failure(self):
40+
table = gen_csv_table(
41+
[
42+
"prog_file.bpf.o,prog_name,success,failure,MISMATCH,1,1,+0 (+0.00%)",
43+
]
44+
)
45+
veristat_info = parse_table(table)
46+
self.assertEqual(
47+
veristat_info.table,
48+
[["prog_file.bpf.o", "prog_name", "success -> failure (!!)", "+0.00 %"]],
49+
)
50+
self.assertTrue(veristat_info.changes)
51+
self.assertTrue(veristat_info.new_failures)
52+
53+
def test_parse_table_new_changes(self):
54+
table = gen_csv_table(
55+
[
56+
"prog_file.bpf.o,prog_name,failure,success,MISMATCH,0,0,+0 (+0.00%)",
57+
"prog_file.bpf.o,prog_name_increase,failure,failure,MATCH,1,2,+1 (+100.00%)",
58+
"prog_file.bpf.o,prog_name_decrease,success,success,MATCH,1,1,-1 (-100.00%)",
59+
]
60+
)
61+
veristat_info = parse_table(table)
62+
self.assertEqual(
63+
veristat_info.table,
64+
[
65+
["prog_file.bpf.o", "prog_name", "failure -> success", "+0.00 %"],
66+
["prog_file.bpf.o", "prog_name_increase", "failure", "+100.00 %"],
67+
["prog_file.bpf.o", "prog_name_decrease", "success", "-100.00 %"],
68+
],
69+
)
70+
self.assertTrue(veristat_info.changes)
71+
self.assertFalse(veristat_info.new_failures)
72+
73+
74+
if __name__ == "__main__":
75+
unittest.main()

.github/scripts/tmpfsify-workspace.sh

+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
#!/bin/bash
2+
3+
set -x -euo pipefail
4+
5+
TMPFS_SIZE=20 # GB
6+
MEM_TOTAL=$(awk '/MemTotal/ {print int($2/1024)}' /proc/meminfo)
7+
8+
# sanity check: total mem is at least double TMPFS_SIZE
9+
if [ $MEM_TOTAL -lt $(($TMPFS_SIZE*1024*2)) ]; then
10+
echo "tmpfsify-workspace.sh: will not allocate tmpfs, total memory is too low (${MEM_TOTAL}MB)"
11+
exit 0
12+
fi
13+
14+
dir="$(basename "$GITHUB_WORKSPACE")"
15+
cd "$(dirname "$GITHUB_WORKSPACE")"
16+
mv "${dir}" "${dir}.backup"
17+
mkdir "${dir}"
18+
sudo mount -t tmpfs -o size=${TMPFS_SIZE}G tmpfs "${dir}"
19+
rsync -a "${dir}.backup/" "${dir}"
20+
cd -
21+

0 commit comments

Comments
 (0)