@@ -1242,8 +1242,8 @@ static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
1242
1242
emit_st_index (pprog , size , dst_reg , X86_REG_R12 , off , imm );
1243
1243
}
1244
1244
1245
- static int emit_atomic (u8 * * pprog , u8 atomic_op ,
1246
- u32 dst_reg , u32 src_reg , s16 off , u8 bpf_size )
1245
+ static int emit_atomic_rmw (u8 * * pprog , u32 atomic_op ,
1246
+ u32 dst_reg , u32 src_reg , s16 off , u8 bpf_size )
1247
1247
{
1248
1248
u8 * prog = * pprog ;
1249
1249
@@ -1283,8 +1283,9 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
1283
1283
return 0 ;
1284
1284
}
1285
1285
1286
- static int emit_atomic_index (u8 * * pprog , u8 atomic_op , u32 size ,
1287
- u32 dst_reg , u32 src_reg , u32 index_reg , int off )
1286
+ static int emit_atomic_rmw_index (u8 * * pprog , u32 atomic_op , u32 size ,
1287
+ u32 dst_reg , u32 src_reg , u32 index_reg ,
1288
+ int off )
1288
1289
{
1289
1290
u8 * prog = * pprog ;
1290
1291
@@ -1297,7 +1298,7 @@ static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
1297
1298
EMIT1 (add_3mod (0x48 , dst_reg , src_reg , index_reg ));
1298
1299
break ;
1299
1300
default :
1300
- pr_err ("bpf_jit: 1 and 2 byte atomics are not supported\n" );
1301
+ pr_err ("bpf_jit: 1- and 2- byte RMW atomics are not supported\n" );
1301
1302
return - EFAULT ;
1302
1303
}
1303
1304
@@ -1331,6 +1332,49 @@ static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
1331
1332
return 0 ;
1332
1333
}
1333
1334
1335
+ static int emit_atomic_ld_st (u8 * * pprog , u32 atomic_op , u32 dst_reg ,
1336
+ u32 src_reg , s16 off , u8 bpf_size )
1337
+ {
1338
+ switch (atomic_op ) {
1339
+ case BPF_LOAD_ACQ :
1340
+ /* dst_reg = smp_load_acquire(src_reg + off16) */
1341
+ emit_ldx (pprog , bpf_size , dst_reg , src_reg , off );
1342
+ break ;
1343
+ case BPF_STORE_REL :
1344
+ /* smp_store_release(dst_reg + off16, src_reg) */
1345
+ emit_stx (pprog , bpf_size , dst_reg , src_reg , off );
1346
+ break ;
1347
+ default :
1348
+ pr_err ("bpf_jit: unknown atomic load/store opcode %02x\n" ,
1349
+ atomic_op );
1350
+ return - EFAULT ;
1351
+ }
1352
+
1353
+ return 0 ;
1354
+ }
1355
+
1356
+ static int emit_atomic_ld_st_index (u8 * * pprog , u32 atomic_op , u32 size ,
1357
+ u32 dst_reg , u32 src_reg , u32 index_reg ,
1358
+ int off )
1359
+ {
1360
+ switch (atomic_op ) {
1361
+ case BPF_LOAD_ACQ :
1362
+ /* dst_reg = smp_load_acquire(src_reg + idx_reg + off16) */
1363
+ emit_ldx_index (pprog , size , dst_reg , src_reg , index_reg , off );
1364
+ break ;
1365
+ case BPF_STORE_REL :
1366
+ /* smp_store_release(dst_reg + idx_reg + off16, src_reg) */
1367
+ emit_stx_index (pprog , size , dst_reg , src_reg , index_reg , off );
1368
+ break ;
1369
+ default :
1370
+ pr_err ("bpf_jit: unknown atomic load/store opcode %02x\n" ,
1371
+ atomic_op );
1372
+ return - EFAULT ;
1373
+ }
1374
+
1375
+ return 0 ;
1376
+ }
1377
+
1334
1378
#define DONT_CLEAR 1
1335
1379
1336
1380
bool ex_handler_bpf (const struct exception_table_entry * x , struct pt_regs * regs )
@@ -2113,6 +2157,13 @@ st: if (is_imm8(insn->off))
2113
2157
}
2114
2158
break ;
2115
2159
2160
+ case BPF_STX | BPF_ATOMIC | BPF_B :
2161
+ case BPF_STX | BPF_ATOMIC | BPF_H :
2162
+ if (!bpf_atomic_is_load_store (insn )) {
2163
+ pr_err ("bpf_jit: 1- and 2-byte RMW atomics are not supported\n" );
2164
+ return - EFAULT ;
2165
+ }
2166
+ fallthrough ;
2116
2167
case BPF_STX | BPF_ATOMIC | BPF_W :
2117
2168
case BPF_STX | BPF_ATOMIC | BPF_DW :
2118
2169
if (insn -> imm == (BPF_AND | BPF_FETCH ) ||
@@ -2148,10 +2199,10 @@ st: if (is_imm8(insn->off))
2148
2199
EMIT2 (simple_alu_opcodes [BPF_OP (insn -> imm )],
2149
2200
add_2reg (0xC0 , AUX_REG , real_src_reg ));
2150
2201
/* Attempt to swap in new value */
2151
- err = emit_atomic (& prog , BPF_CMPXCHG ,
2152
- real_dst_reg , AUX_REG ,
2153
- insn -> off ,
2154
- BPF_SIZE (insn -> code ));
2202
+ err = emit_atomic_rmw (& prog , BPF_CMPXCHG ,
2203
+ real_dst_reg , AUX_REG ,
2204
+ insn -> off ,
2205
+ BPF_SIZE (insn -> code ));
2155
2206
if (WARN_ON (err ))
2156
2207
return err ;
2157
2208
/*
@@ -2166,17 +2217,35 @@ st: if (is_imm8(insn->off))
2166
2217
break ;
2167
2218
}
2168
2219
2169
- err = emit_atomic (& prog , insn -> imm , dst_reg , src_reg ,
2170
- insn -> off , BPF_SIZE (insn -> code ));
2220
+ if (bpf_atomic_is_load_store (insn ))
2221
+ err = emit_atomic_ld_st (& prog , insn -> imm , dst_reg , src_reg ,
2222
+ insn -> off , BPF_SIZE (insn -> code ));
2223
+ else
2224
+ err = emit_atomic_rmw (& prog , insn -> imm , dst_reg , src_reg ,
2225
+ insn -> off , BPF_SIZE (insn -> code ));
2171
2226
if (err )
2172
2227
return err ;
2173
2228
break ;
2174
2229
2230
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_B :
2231
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_H :
2232
+ if (!bpf_atomic_is_load_store (insn )) {
2233
+ pr_err ("bpf_jit: 1- and 2-byte RMW atomics are not supported\n" );
2234
+ return - EFAULT ;
2235
+ }
2236
+ fallthrough ;
2175
2237
case BPF_STX | BPF_PROBE_ATOMIC | BPF_W :
2176
2238
case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW :
2177
2239
start_of_ldx = prog ;
2178
- err = emit_atomic_index (& prog , insn -> imm , BPF_SIZE (insn -> code ),
2179
- dst_reg , src_reg , X86_REG_R12 , insn -> off );
2240
+
2241
+ if (bpf_atomic_is_load_store (insn ))
2242
+ err = emit_atomic_ld_st_index (& prog , insn -> imm ,
2243
+ BPF_SIZE (insn -> code ), dst_reg ,
2244
+ src_reg , X86_REG_R12 , insn -> off );
2245
+ else
2246
+ err = emit_atomic_rmw_index (& prog , insn -> imm , BPF_SIZE (insn -> code ),
2247
+ dst_reg , src_reg , X86_REG_R12 ,
2248
+ insn -> off );
2180
2249
if (err )
2181
2250
return err ;
2182
2251
goto populate_extable ;
@@ -3771,12 +3840,8 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
3771
3840
if (!in_arena )
3772
3841
return true;
3773
3842
switch (insn -> code ) {
3774
- case BPF_STX | BPF_ATOMIC | BPF_B :
3775
- case BPF_STX | BPF_ATOMIC | BPF_H :
3776
3843
case BPF_STX | BPF_ATOMIC | BPF_W :
3777
3844
case BPF_STX | BPF_ATOMIC | BPF_DW :
3778
- if (bpf_atomic_is_load_store (insn ))
3779
- return false;
3780
3845
if (insn -> imm == (BPF_AND | BPF_FETCH ) ||
3781
3846
insn -> imm == (BPF_OR | BPF_FETCH ) ||
3782
3847
insn -> imm == (BPF_XOR | BPF_FETCH ))
0 commit comments