Skip to content

Commit 9bb300a

Browse files
kkdwivediKernel Patches Daemon
authored and
Kernel Patches Daemon
committed
selftests/bpf: Add tests for rqspinlock
Introduce selftests that trigger AA, ABBA deadlocks, and test the edge case where the held locks table runs out of entries, since we then fallback to the timeout as the final line of defense. Also exercise verifier's AA detection where applicable. Signed-off-by: Kumar Kartikeya Dwivedi <[email protected]>
1 parent dd9d3f0 commit 9bb300a

File tree

4 files changed

+532
-0
lines changed

4 files changed

+532
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3+
#include <test_progs.h>
4+
#include <network_helpers.h>
5+
6+
#include "res_spin_lock.skel.h"
7+
#include "res_spin_lock_fail.skel.h"
8+
9+
void test_res_spin_lock_failure(void)
10+
{
11+
RUN_TESTS(res_spin_lock_fail);
12+
}
13+
14+
static volatile int skip;
15+
16+
static void *spin_lock_thread(void *arg)
17+
{
18+
int err, prog_fd = *(u32 *) arg;
19+
LIBBPF_OPTS(bpf_test_run_opts, topts,
20+
.data_in = &pkt_v4,
21+
.data_size_in = sizeof(pkt_v4),
22+
.repeat = 10000,
23+
);
24+
25+
while (!READ_ONCE(skip)) {
26+
err = bpf_prog_test_run_opts(prog_fd, &topts);
27+
ASSERT_OK(err, "test_run");
28+
ASSERT_OK(topts.retval, "test_run retval");
29+
}
30+
pthread_exit(arg);
31+
}
32+
33+
void test_res_spin_lock_success(void)
34+
{
35+
LIBBPF_OPTS(bpf_test_run_opts, topts,
36+
.data_in = &pkt_v4,
37+
.data_size_in = sizeof(pkt_v4),
38+
.repeat = 1,
39+
);
40+
struct res_spin_lock *skel;
41+
pthread_t thread_id[16];
42+
int prog_fd, i, err;
43+
void *ret;
44+
45+
skel = res_spin_lock__open_and_load();
46+
if (!ASSERT_OK_PTR(skel, "res_spin_lock__open_and_load"))
47+
return;
48+
/* AA deadlock */
49+
prog_fd = bpf_program__fd(skel->progs.res_spin_lock_test);
50+
err = bpf_prog_test_run_opts(prog_fd, &topts);
51+
ASSERT_OK(err, "error");
52+
ASSERT_OK(topts.retval, "retval");
53+
54+
prog_fd = bpf_program__fd(skel->progs.res_spin_lock_test_held_lock_max);
55+
err = bpf_prog_test_run_opts(prog_fd, &topts);
56+
ASSERT_OK(err, "error");
57+
ASSERT_OK(topts.retval, "retval");
58+
59+
/* Multi-threaded ABBA deadlock. */
60+
61+
prog_fd = bpf_program__fd(skel->progs.res_spin_lock_test_AB);
62+
for (i = 0; i < 16; i++) {
63+
int err;
64+
65+
err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd);
66+
if (!ASSERT_OK(err, "pthread_create"))
67+
goto end;
68+
}
69+
70+
topts.retval = 0;
71+
topts.repeat = 1000;
72+
int fd = bpf_program__fd(skel->progs.res_spin_lock_test_BA);
73+
while (!topts.retval && !err && !READ_ONCE(skel->bss->err)) {
74+
err = bpf_prog_test_run_opts(fd, &topts);
75+
}
76+
77+
WRITE_ONCE(skip, true);
78+
79+
for (i = 0; i < 16; i++) {
80+
if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join"))
81+
goto end;
82+
if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd"))
83+
goto end;
84+
}
85+
86+
ASSERT_EQ(READ_ONCE(skel->bss->err), -EDEADLK, "timeout err");
87+
ASSERT_OK(err, "err");
88+
ASSERT_EQ(topts.retval, -EDEADLK, "timeout");
89+
end:
90+
res_spin_lock__destroy(skel);
91+
return;
92+
}

tools/testing/selftests/bpf/progs/irq.c

+53
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,9 @@ extern void bpf_local_irq_save(unsigned long *) __weak __ksym;
1111
extern void bpf_local_irq_restore(unsigned long *) __weak __ksym;
1212
extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym;
1313

14+
struct bpf_res_spin_lock lockA __hidden SEC(".data.A");
15+
struct bpf_res_spin_lock lockB __hidden SEC(".data.B");
16+
1417
SEC("?tc")
1518
__failure __msg("arg#0 doesn't point to an irq flag on stack")
1619
int irq_save_bad_arg(struct __sk_buff *ctx)
@@ -510,4 +513,54 @@ int irq_sleepable_global_subprog_indirect(void *ctx)
510513
return 0;
511514
}
512515

516+
SEC("?tc")
517+
__failure __msg("cannot restore irq state out of order")
518+
int irq_ooo_lock_cond_inv(struct __sk_buff *ctx)
519+
{
520+
unsigned long flags1, flags2;
521+
522+
if (bpf_res_spin_lock_irqsave(&lockA, &flags1))
523+
return 0;
524+
if (bpf_res_spin_lock_irqsave(&lockB, &flags2)) {
525+
bpf_res_spin_unlock_irqrestore(&lockA, &flags1);
526+
return 0;
527+
}
528+
529+
bpf_res_spin_unlock_irqrestore(&lockB, &flags1);
530+
bpf_res_spin_unlock_irqrestore(&lockA, &flags2);
531+
return 0;
532+
}
533+
534+
SEC("?tc")
535+
__failure __msg("function calls are not allowed")
536+
int irq_wrong_kfunc_class_1(struct __sk_buff *ctx)
537+
{
538+
unsigned long flags1;
539+
540+
if (bpf_res_spin_lock_irqsave(&lockA, &flags1))
541+
return 0;
542+
/* For now, bpf_local_irq_restore is not allowed in critical section,
543+
* but this test ensures error will be caught with kfunc_class when it's
544+
* opened up. Tested by temporarily permitting this kfunc in critical
545+
* section.
546+
*/
547+
bpf_local_irq_restore(&flags1);
548+
bpf_res_spin_unlock_irqrestore(&lockA, &flags1);
549+
return 0;
550+
}
551+
552+
SEC("?tc")
553+
__failure __msg("function calls are not allowed")
554+
int irq_wrong_kfunc_class_2(struct __sk_buff *ctx)
555+
{
556+
unsigned long flags1, flags2;
557+
558+
bpf_local_irq_save(&flags1);
559+
if (bpf_res_spin_lock_irqsave(&lockA, &flags2))
560+
return 0;
561+
bpf_local_irq_restore(&flags2);
562+
bpf_res_spin_unlock_irqrestore(&lockA, &flags1);
563+
return 0;
564+
}
565+
513566
char _license[] SEC("license") = "GPL";
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,143 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3+
#include <vmlinux.h>
4+
#include <bpf/bpf_tracing.h>
5+
#include <bpf/bpf_helpers.h>
6+
#include "bpf_misc.h"
7+
8+
#define EDEADLK 35
9+
#define ETIMEDOUT 110
10+
11+
struct arr_elem {
12+
struct bpf_res_spin_lock lock;
13+
};
14+
15+
struct {
16+
__uint(type, BPF_MAP_TYPE_ARRAY);
17+
__uint(max_entries, 64);
18+
__type(key, int);
19+
__type(value, struct arr_elem);
20+
} arrmap SEC(".maps");
21+
22+
struct bpf_res_spin_lock lockA __hidden SEC(".data.A");
23+
struct bpf_res_spin_lock lockB __hidden SEC(".data.B");
24+
25+
SEC("tc")
26+
int res_spin_lock_test(struct __sk_buff *ctx)
27+
{
28+
struct arr_elem *elem1, *elem2;
29+
int r;
30+
31+
elem1 = bpf_map_lookup_elem(&arrmap, &(int){0});
32+
if (!elem1)
33+
return -1;
34+
elem2 = bpf_map_lookup_elem(&arrmap, &(int){0});
35+
if (!elem2)
36+
return -1;
37+
38+
r = bpf_res_spin_lock(&elem1->lock);
39+
if (r)
40+
return r;
41+
if (!bpf_res_spin_lock(&elem2->lock)) {
42+
bpf_res_spin_unlock(&elem2->lock);
43+
bpf_res_spin_unlock(&elem1->lock);
44+
return -1;
45+
}
46+
bpf_res_spin_unlock(&elem1->lock);
47+
return 0;
48+
}
49+
50+
SEC("tc")
51+
int res_spin_lock_test_AB(struct __sk_buff *ctx)
52+
{
53+
int r;
54+
55+
r = bpf_res_spin_lock(&lockA);
56+
if (r)
57+
return !r;
58+
/* Only unlock if we took the lock. */
59+
if (!bpf_res_spin_lock(&lockB))
60+
bpf_res_spin_unlock(&lockB);
61+
bpf_res_spin_unlock(&lockA);
62+
return 0;
63+
}
64+
65+
int err;
66+
67+
SEC("tc")
68+
int res_spin_lock_test_BA(struct __sk_buff *ctx)
69+
{
70+
int r;
71+
72+
r = bpf_res_spin_lock(&lockB);
73+
if (r)
74+
return !r;
75+
if (!bpf_res_spin_lock(&lockA))
76+
bpf_res_spin_unlock(&lockA);
77+
else
78+
err = -EDEADLK;
79+
bpf_res_spin_unlock(&lockB);
80+
return err ?: 0;
81+
}
82+
83+
SEC("tc")
84+
int res_spin_lock_test_held_lock_max(struct __sk_buff *ctx)
85+
{
86+
struct bpf_res_spin_lock *locks[48] = {};
87+
struct arr_elem *e;
88+
u64 time_beg, time;
89+
int ret = 0, i;
90+
91+
_Static_assert(ARRAY_SIZE(((struct rqspinlock_held){}).locks) == 31,
92+
"RES_NR_HELD assumed to be 31");
93+
94+
for (i = 0; i < 34; i++) {
95+
int key = i;
96+
97+
/* We cannot pass in i as it will get spilled/filled by the compiler and
98+
* loses bounds in verifier state.
99+
*/
100+
e = bpf_map_lookup_elem(&arrmap, &key);
101+
if (!e)
102+
return 1;
103+
locks[i] = &e->lock;
104+
}
105+
106+
for (; i < 48; i++) {
107+
int key = i - 2;
108+
109+
/* We cannot pass in i as it will get spilled/filled by the compiler and
110+
* loses bounds in verifier state.
111+
*/
112+
e = bpf_map_lookup_elem(&arrmap, &key);
113+
if (!e)
114+
return 1;
115+
locks[i] = &e->lock;
116+
}
117+
118+
time_beg = bpf_ktime_get_ns();
119+
for (i = 0; i < 34; i++) {
120+
if (bpf_res_spin_lock(locks[i]))
121+
goto end;
122+
}
123+
124+
/* Trigger AA, after exhausting entries in the held lock table. This
125+
* time, only the timeout can save us, as AA detection won't succeed.
126+
*/
127+
if (!bpf_res_spin_lock(locks[34])) {
128+
bpf_res_spin_unlock(locks[34]);
129+
ret = 1;
130+
goto end;
131+
}
132+
133+
end:
134+
for (i = i - 1; i >= 0; i--)
135+
bpf_res_spin_unlock(locks[i]);
136+
time = bpf_ktime_get_ns() - time_beg;
137+
/* Time spent should be easily above our limit (1/4 s), since AA
138+
* detection won't be expedited due to lack of held lock entry.
139+
*/
140+
return ret ?: (time > 1000000000 / 4 ? 0 : 1);
141+
}
142+
143+
char _license[] SEC("license") = "GPL";

0 commit comments

Comments
 (0)