Skip to content

Commit 8ea3c5e

Browse files
kkdwivediKernel Patches Daemon
authored and
Kernel Patches Daemon
committed
bpf: Maintain FIFO property for rqspinlock unlock
Since out-of-order unlocks are unsupported for rqspinlock, and irqsave variants enforce strict FIFO ordering anyway, make the same change for normal non-irqsave variants, such that FIFO ordering is enforced. Two new verifier state fields (active_lock_id, active_lock_ptr) are used to denote the top of the stack, and prev_id and prev_ptr are ascertained whenever popping the topmost entry through an unlock. Take special care to make these fields part of the state comparison in refsafe. Signed-off-by: Kumar Kartikeya Dwivedi <[email protected]>
1 parent 6b6fd1a commit 8ea3c5e

File tree

2 files changed

+31
-5
lines changed

2 files changed

+31
-5
lines changed

include/linux/bpf_verifier.h

+3
Original file line numberDiff line numberDiff line change
@@ -268,6 +268,7 @@ struct bpf_reference_state {
268268
REF_TYPE_LOCK = (1 << 3),
269269
REF_TYPE_RES_LOCK = (1 << 4),
270270
REF_TYPE_RES_LOCK_IRQ = (1 << 5),
271+
REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
271272
} type;
272273
/* Track each reference created with a unique id, even if the same
273274
* instruction creates the reference multiple times (eg, via CALL).
@@ -434,6 +435,8 @@ struct bpf_verifier_state {
434435
u32 active_locks;
435436
u32 active_preempt_locks;
436437
u32 active_irq_id;
438+
u32 active_lock_id;
439+
void *active_lock_ptr;
437440
bool active_rcu_lock;
438441

439442
bool speculative;

kernel/bpf/verifier.c

+28-5
Original file line numberDiff line numberDiff line change
@@ -1428,6 +1428,8 @@ static int copy_reference_state(struct bpf_verifier_state *dst, const struct bpf
14281428
dst->active_preempt_locks = src->active_preempt_locks;
14291429
dst->active_rcu_lock = src->active_rcu_lock;
14301430
dst->active_irq_id = src->active_irq_id;
1431+
dst->active_lock_id = src->active_lock_id;
1432+
dst->active_lock_ptr = src->active_lock_ptr;
14311433
return 0;
14321434
}
14331435

@@ -1527,6 +1529,8 @@ static int acquire_lock_state(struct bpf_verifier_env *env, int insn_idx, enum r
15271529
s->ptr = ptr;
15281530

15291531
state->active_locks++;
1532+
state->active_lock_id = id;
1533+
state->active_lock_ptr = ptr;
15301534
return 0;
15311535
}
15321536

@@ -1577,16 +1581,24 @@ static bool find_reference_state(struct bpf_verifier_state *state, int ptr_id)
15771581

15781582
static int release_lock_state(struct bpf_verifier_state *state, int type, int id, void *ptr)
15791583
{
1584+
void *prev_ptr = NULL;
1585+
u32 prev_id = 0;
15801586
int i;
15811587

15821588
for (i = 0; i < state->acquired_refs; i++) {
1583-
if (state->refs[i].type != type)
1584-
continue;
1585-
if (state->refs[i].id == id && state->refs[i].ptr == ptr) {
1589+
if (state->refs[i].type == type && state->refs[i].id == id &&
1590+
state->refs[i].ptr == ptr) {
15861591
release_reference_state(state, i);
15871592
state->active_locks--;
1593+
/* Reassign active lock (id, ptr). */
1594+
state->active_lock_id = prev_id;
1595+
state->active_lock_ptr = prev_ptr;
15881596
return 0;
15891597
}
1598+
if (state->refs[i].type & REF_TYPE_LOCK_MASK) {
1599+
prev_id = state->refs[i].id;
1600+
prev_ptr = state->refs[i].ptr;
1601+
}
15901602
}
15911603
return -EINVAL;
15921604
}
@@ -8342,6 +8354,14 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, int flags)
83428354
type = REF_TYPE_RES_LOCK;
83438355
else
83448356
type = REF_TYPE_LOCK;
8357+
if (!find_lock_state(cur, type, reg->id, ptr)) {
8358+
verbose(env, "%s_unlock of different lock\n", lock_str);
8359+
return -EINVAL;
8360+
}
8361+
if (reg->id != cur->active_lock_id || ptr != cur->active_lock_ptr) {
8362+
verbose(env, "%s_unlock cannot be out of order\n", lock_str);
8363+
return -EINVAL;
8364+
}
83458365
if (release_lock_state(cur, type, reg->id, ptr)) {
83468366
verbose(env, "%s_unlock of different lock\n", lock_str);
83478367
return -EINVAL;
@@ -12534,8 +12554,7 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_
1253412554

1253512555
if (!env->cur_state->active_locks)
1253612556
return -EINVAL;
12537-
s = find_lock_state(env->cur_state, REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
12538-
id, ptr);
12557+
s = find_lock_state(env->cur_state, REF_TYPE_LOCK_MASK, id, ptr);
1253912558
if (!s) {
1254012559
verbose(env, "held lock and object are not in the same allocation\n");
1254112560
return -EINVAL;
@@ -18591,6 +18610,10 @@ static bool refsafe(struct bpf_verifier_state *old, struct bpf_verifier_state *c
1859118610
if (!check_ids(old->active_irq_id, cur->active_irq_id, idmap))
1859218611
return false;
1859318612

18613+
if (!check_ids(old->active_lock_id, cur->active_lock_id, idmap) ||
18614+
old->active_lock_ptr != cur->active_lock_ptr)
18615+
return false;
18616+
1859418617
for (i = 0; i < old->acquired_refs; i++) {
1859518618
if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) ||
1859618619
old->refs[i].type != cur->refs[i].type)

0 commit comments

Comments
 (0)