Skip to content

Commit 14c0427

Browse files
peilin-yeAlexei Starovoitov
authored and
Alexei Starovoitov
committed
bpf, x86: Support load-acquire and store-release instructions
Recently we introduced BPF load-acquire (BPF_LOAD_ACQ) and store-release (BPF_STORE_REL) instructions. For x86-64, simply implement them as regular BPF_LDX/BPF_STX loads and stores. The verifier always rejects misaligned load-acquires/store-releases (even if BPF_F_ANY_ALIGNMENT is set), so emitted MOV* instructions are guaranteed to be atomic. Arena accesses are supported. 8- and 16-bit load-acquires are zero-extending (i.e., MOVZBQ, MOVZWQ). Rename emit_atomic{,_index}() to emit_atomic_rmw{,_index}() to make it clear that they only handle read-modify-write atomics, and extend their @atomic_op parameter from u8 to u32, since we are starting to use more than the lowest 8 bits of the 'imm' field. Signed-off-by: Peilin Ye <[email protected]> Link: https://lore.kernel.org/r/d22bb3c69f126af1d962b7314f3489eff606a3b7.1741049567.git.yepeilin@google.com Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 1bfe7f6 commit 14c0427

File tree

1 file changed

+82
-17
lines changed

1 file changed

+82
-17
lines changed

arch/x86/net/bpf_jit_comp.c

+82-17
Original file line numberDiff line numberDiff line change
@@ -1242,8 +1242,8 @@ static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
12421242
emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
12431243
}
12441244

1245-
static int emit_atomic(u8 **pprog, u8 atomic_op,
1246-
u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
1245+
static int emit_atomic_rmw(u8 **pprog, u32 atomic_op,
1246+
u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
12471247
{
12481248
u8 *prog = *pprog;
12491249

@@ -1283,8 +1283,9 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
12831283
return 0;
12841284
}
12851285

1286-
static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
1287-
u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1286+
static int emit_atomic_rmw_index(u8 **pprog, u32 atomic_op, u32 size,
1287+
u32 dst_reg, u32 src_reg, u32 index_reg,
1288+
int off)
12881289
{
12891290
u8 *prog = *pprog;
12901291

@@ -1297,7 +1298,7 @@ static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
12971298
EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
12981299
break;
12991300
default:
1300-
pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n");
1301+
pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
13011302
return -EFAULT;
13021303
}
13031304

@@ -1331,6 +1332,49 @@ static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
13311332
return 0;
13321333
}
13331334

1335+
static int emit_atomic_ld_st(u8 **pprog, u32 atomic_op, u32 dst_reg,
1336+
u32 src_reg, s16 off, u8 bpf_size)
1337+
{
1338+
switch (atomic_op) {
1339+
case BPF_LOAD_ACQ:
1340+
/* dst_reg = smp_load_acquire(src_reg + off16) */
1341+
emit_ldx(pprog, bpf_size, dst_reg, src_reg, off);
1342+
break;
1343+
case BPF_STORE_REL:
1344+
/* smp_store_release(dst_reg + off16, src_reg) */
1345+
emit_stx(pprog, bpf_size, dst_reg, src_reg, off);
1346+
break;
1347+
default:
1348+
pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
1349+
atomic_op);
1350+
return -EFAULT;
1351+
}
1352+
1353+
return 0;
1354+
}
1355+
1356+
static int emit_atomic_ld_st_index(u8 **pprog, u32 atomic_op, u32 size,
1357+
u32 dst_reg, u32 src_reg, u32 index_reg,
1358+
int off)
1359+
{
1360+
switch (atomic_op) {
1361+
case BPF_LOAD_ACQ:
1362+
/* dst_reg = smp_load_acquire(src_reg + idx_reg + off16) */
1363+
emit_ldx_index(pprog, size, dst_reg, src_reg, index_reg, off);
1364+
break;
1365+
case BPF_STORE_REL:
1366+
/* smp_store_release(dst_reg + idx_reg + off16, src_reg) */
1367+
emit_stx_index(pprog, size, dst_reg, src_reg, index_reg, off);
1368+
break;
1369+
default:
1370+
pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
1371+
atomic_op);
1372+
return -EFAULT;
1373+
}
1374+
1375+
return 0;
1376+
}
1377+
13341378
#define DONT_CLEAR 1
13351379

13361380
bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
@@ -2113,6 +2157,13 @@ st: if (is_imm8(insn->off))
21132157
}
21142158
break;
21152159

2160+
case BPF_STX | BPF_ATOMIC | BPF_B:
2161+
case BPF_STX | BPF_ATOMIC | BPF_H:
2162+
if (!bpf_atomic_is_load_store(insn)) {
2163+
pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
2164+
return -EFAULT;
2165+
}
2166+
fallthrough;
21162167
case BPF_STX | BPF_ATOMIC | BPF_W:
21172168
case BPF_STX | BPF_ATOMIC | BPF_DW:
21182169
if (insn->imm == (BPF_AND | BPF_FETCH) ||
@@ -2148,10 +2199,10 @@ st: if (is_imm8(insn->off))
21482199
EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
21492200
add_2reg(0xC0, AUX_REG, real_src_reg));
21502201
/* Attempt to swap in new value */
2151-
err = emit_atomic(&prog, BPF_CMPXCHG,
2152-
real_dst_reg, AUX_REG,
2153-
insn->off,
2154-
BPF_SIZE(insn->code));
2202+
err = emit_atomic_rmw(&prog, BPF_CMPXCHG,
2203+
real_dst_reg, AUX_REG,
2204+
insn->off,
2205+
BPF_SIZE(insn->code));
21552206
if (WARN_ON(err))
21562207
return err;
21572208
/*
@@ -2166,17 +2217,35 @@ st: if (is_imm8(insn->off))
21662217
break;
21672218
}
21682219

2169-
err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
2170-
insn->off, BPF_SIZE(insn->code));
2220+
if (bpf_atomic_is_load_store(insn))
2221+
err = emit_atomic_ld_st(&prog, insn->imm, dst_reg, src_reg,
2222+
insn->off, BPF_SIZE(insn->code));
2223+
else
2224+
err = emit_atomic_rmw(&prog, insn->imm, dst_reg, src_reg,
2225+
insn->off, BPF_SIZE(insn->code));
21712226
if (err)
21722227
return err;
21732228
break;
21742229

2230+
case BPF_STX | BPF_PROBE_ATOMIC | BPF_B:
2231+
case BPF_STX | BPF_PROBE_ATOMIC | BPF_H:
2232+
if (!bpf_atomic_is_load_store(insn)) {
2233+
pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
2234+
return -EFAULT;
2235+
}
2236+
fallthrough;
21752237
case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
21762238
case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
21772239
start_of_ldx = prog;
2178-
err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code),
2179-
dst_reg, src_reg, X86_REG_R12, insn->off);
2240+
2241+
if (bpf_atomic_is_load_store(insn))
2242+
err = emit_atomic_ld_st_index(&prog, insn->imm,
2243+
BPF_SIZE(insn->code), dst_reg,
2244+
src_reg, X86_REG_R12, insn->off);
2245+
else
2246+
err = emit_atomic_rmw_index(&prog, insn->imm, BPF_SIZE(insn->code),
2247+
dst_reg, src_reg, X86_REG_R12,
2248+
insn->off);
21802249
if (err)
21812250
return err;
21822251
goto populate_extable;
@@ -3771,12 +3840,8 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
37713840
if (!in_arena)
37723841
return true;
37733842
switch (insn->code) {
3774-
case BPF_STX | BPF_ATOMIC | BPF_B:
3775-
case BPF_STX | BPF_ATOMIC | BPF_H:
37763843
case BPF_STX | BPF_ATOMIC | BPF_W:
37773844
case BPF_STX | BPF_ATOMIC | BPF_DW:
3778-
if (bpf_atomic_is_load_store(insn))
3779-
return false;
37803845
if (insn->imm == (BPF_AND | BPF_FETCH) ||
37813846
insn->imm == (BPF_OR | BPF_FETCH) ||
37823847
insn->imm == (BPF_XOR | BPF_FETCH))

0 commit comments

Comments
 (0)