-
Notifications
You must be signed in to change notification settings - Fork 132
/
Copy pathmatrix.py
194 lines (157 loc) · 5.71 KB
/
matrix.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
#!/usr/bin/env python3
import os
import dataclasses
import json
from enum import Enum
from typing import Any, Dict, List, Final, Set, Union
MANAGED_OWNER: Final[str] = "kernel-patches"
MANAGED_REPOS: Final[Set[str]] = {
f"{MANAGED_OWNER}/bpf",
f"{MANAGED_OWNER}/vmtest",
}
DEFAULT_SELF_HOSTED_RUNNER_TAGS: Final[List[str]] = ["self-hosted", "docker-noble-main"]
DEFAULT_RUNNER: Final[str] = "ubuntu-24.04"
DEFAULT_LLVM_VERSION: Final[int] = 17
class Arch(str, Enum):
"""
CPU architecture supported by CI.
"""
AARCH64 = "aarch64"
S390X = "s390x"
X86_64 = "x86_64"
class Compiler(str, Enum):
GCC = "gcc"
LLVM = "llvm"
@dataclasses.dataclass
class Toolchain:
compiler: Compiler
# This is relevant ONLY for LLVM and should not be required for GCC
version: int
@property
def short_name(self) -> str:
return str(self.compiler.value)
@property
def full_name(self) -> str:
if self.compiler == Compiler.GCC:
return self.short_name
return f"{self.short_name}-{self.version}"
def to_dict(self) -> Dict[str, Union[str, int]]:
return {
"name": self.short_name,
"fullname": self.full_name,
"version": self.version,
}
@dataclasses.dataclass
class BuildConfig:
arch: Arch
toolchain: Toolchain
kernel: str = "LATEST"
run_veristat: bool = False
parallel_tests: bool = False
build_release: bool = False
@property
def runs_on(self) -> List[str]:
if is_managed_repo():
return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [self.arch.value]
else:
return [DEFAULT_RUNNER]
@property
def build_runs_on(self) -> List[str]:
if is_managed_repo():
return ["codebuild"]
else:
return [DEFAULT_RUNNER]
@property
def tests(self) -> Dict[str, Any]:
tests_list = [
"test_progs",
"test_progs_parallel",
"test_progs_no_alu32",
"test_progs_no_alu32_parallel",
"test_verifier",
]
if self.arch.value != "s390x":
tests_list.append("test_maps")
if self.toolchain.version >= 18:
tests_list.append("test_progs_cpuv4")
# if self.arch in [Arch.X86_64, Arch.AARCH64]:
# tests_list.append("sched_ext")
# Don't run GCC BPF runner, because too many tests are failing
# See: https://lore.kernel.org/bpf/[email protected]/
# if self.arch == Arch.X86_64:
# tests_list.append("test_progs-bpf_gcc")
if not self.parallel_tests:
tests_list = [test for test in tests_list if not test.endswith("parallel")]
return {"include": [generate_test_config(test) for test in tests_list]}
def to_dict(self) -> Dict[str, Any]:
return {
"arch": self.arch.value,
"toolchain": self.toolchain.to_dict(),
"kernel": self.kernel,
"run_veristat": self.run_veristat,
"parallel_tests": self.parallel_tests,
"build_release": self.build_release,
"runs_on": self.runs_on,
"tests": self.tests,
"build_runs_on": self.build_runs_on,
}
def is_managed_repo() -> bool:
return (
os.environ["GITHUB_REPOSITORY_OWNER"] == MANAGED_OWNER
and os.environ["GITHUB_REPOSITORY"] in MANAGED_REPOS
)
def set_output(name, value):
"""Write an output variable to the GitHub output file."""
with open(os.getenv("GITHUB_OUTPUT"), "a", encoding="utf-8") as file:
file.write(f"{name}={value}\n")
def generate_test_config(test: str) -> Dict[str, Union[str, int]]:
"""Create the configuration for the provided test."""
is_parallel = test.endswith("_parallel")
config = {
"test": test,
"continue_on_error": is_parallel,
# While in experimental mode, parallel jobs may get stuck
# anywhere, including in user space where the kernel won't detect
# a problem and panic. We add a second layer of (smaller) timeouts
# here such that if we get stuck in a parallel run, we hit this
# timeout and fail without affecting the overall job success (as
# would be the case if we hit the job-wide timeout). For
# non-experimental jobs, 360 is the default which will be
# superseded by the overall workflow timeout (but we need to
# specify something).
"timeout_minutes": 30 if is_parallel else 360,
}
return config
if __name__ == "__main__":
matrix = [
BuildConfig(
arch=Arch.X86_64,
toolchain=Toolchain(compiler=Compiler.GCC, version=DEFAULT_LLVM_VERSION),
run_veristat=True,
parallel_tests=True,
),
BuildConfig(
arch=Arch.X86_64,
toolchain=Toolchain(compiler=Compiler.LLVM, version=DEFAULT_LLVM_VERSION),
build_release=True,
),
BuildConfig(
arch=Arch.X86_64,
toolchain=Toolchain(compiler=Compiler.LLVM, version=18),
build_release=True,
),
BuildConfig(
arch=Arch.AARCH64,
toolchain=Toolchain(compiler=Compiler.GCC, version=DEFAULT_LLVM_VERSION),
),
BuildConfig(
arch=Arch.S390X,
toolchain=Toolchain(compiler=Compiler.GCC, version=DEFAULT_LLVM_VERSION),
),
]
# Outside of managed repositories only run on x86_64
if not is_managed_repo():
matrix = [config for config in matrix if config.arch == Arch.X86_64]
json_matrix = json.dumps({"include": [config.to_dict() for config in matrix]})
print(json.dumps(json.loads(json_matrix), indent=4))
set_output("build_matrix", json_matrix)