Skip to content

Commit 04fc3bb

Browse files
puranjaymohanKernel Patches Daemon
authored and
Kernel Patches Daemon
committed
arm64/cfi,bpf: Support kCFI + BPF on arm64
Currently, bpf_dispatcher_*_func() is marked with `__nocfi` therefore calling BPF programs from this interface doesn't cause CFI warnings. When BPF programs are called directly from C: from BPF helpers or struct_ops, CFI warnings are generated. Implement proper CFI prologues for the BPF programs and callbacks and drop __nocfi for arm64. Fix the trampoline generation code to emit kCFI prologue when a struct_ops trampoline is being prepared. Signed-off-by: Puranjay Mohan <[email protected]> Co-Developed-by: Maxwell Bland <[email protected]> Signed-off-by: Maxwell Bland <[email protected]> Co-Developed-by: Sami Tolvanen <[email protected]> Signed-off-by: Sami Tolvanen <[email protected]>
1 parent a72a9be commit 04fc3bb

File tree

3 files changed

+67
-3
lines changed

3 files changed

+67
-3
lines changed

arch/arm64/include/asm/cfi.h

+23
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef _ASM_ARM64_CFI_H
3+
#define _ASM_ARM64_CFI_H
4+
5+
#ifdef CONFIG_CFI_CLANG
6+
#define __bpfcall
7+
static inline int cfi_get_offset(void)
8+
{
9+
return 4;
10+
}
11+
#define cfi_get_offset cfi_get_offset
12+
extern u32 cfi_bpf_hash;
13+
extern u32 cfi_bpf_subprog_hash;
14+
extern u32 cfi_get_func_hash(void *func);
15+
#else
16+
#define cfi_bpf_hash 0U
17+
#define cfi_bpf_subprog_hash 0U
18+
static inline u32 cfi_get_func_hash(void *func)
19+
{
20+
return 0;
21+
}
22+
#endif /* CONFIG_CFI_CLANG */
23+
#endif /* _ASM_ARM64_CFI_H */

arch/arm64/kernel/alternative.c

+25
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,13 @@
88

99
#define pr_fmt(fmt) "alternatives: " fmt
1010

11+
#include <linux/cfi_types.h>
1112
#include <linux/init.h>
1213
#include <linux/cpu.h>
1314
#include <linux/elf.h>
1415
#include <asm/cacheflush.h>
1516
#include <asm/alternative.h>
17+
#include <asm/cfi.h>
1618
#include <asm/cpufeature.h>
1719
#include <asm/insn.h>
1820
#include <asm/module.h>
@@ -298,3 +300,26 @@ noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
298300
updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
299301
}
300302
EXPORT_SYMBOL(alt_cb_patch_nops);
303+
304+
#ifdef CONFIG_CFI_CLANG
305+
struct bpf_insn;
306+
307+
/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
308+
extern unsigned int __bpf_prog_runX(const void *ctx,
309+
const struct bpf_insn *insn);
310+
DEFINE_CFI_TYPE(cfi_bpf_hash, __bpf_prog_runX);
311+
312+
/* Must match bpf_callback_t */
313+
extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
314+
DEFINE_CFI_TYPE(cfi_bpf_subprog_hash, __bpf_callback_fn);
315+
316+
u32 cfi_get_func_hash(void *func)
317+
{
318+
u32 hash;
319+
320+
if (get_kernel_nofault(hash, func - cfi_get_offset()))
321+
return 0;
322+
323+
return hash;
324+
}
325+
#endif /* CONFIG_CFI_CLANG */

arch/arm64/net/bpf_jit_comp.c

+19-3
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <asm/asm-extable.h>
1818
#include <asm/byteorder.h>
1919
#include <asm/cacheflush.h>
20+
#include <asm/cfi.h>
2021
#include <asm/debug-monitors.h>
2122
#include <asm/insn.h>
2223
#include <asm/text-patching.h>
@@ -164,6 +165,12 @@ static inline void emit_bti(u32 insn, struct jit_ctx *ctx)
164165
emit(insn, ctx);
165166
}
166167

168+
static inline void emit_kcfi(u32 hash, struct jit_ctx *ctx)
169+
{
170+
if (IS_ENABLED(CONFIG_CFI_CLANG))
171+
emit(hash, ctx);
172+
}
173+
167174
/*
168175
* Kernel addresses in the vmalloc space use at most 48 bits, and the
169176
* remaining bits are guaranteed to be 0x1. So we can compose the address
@@ -474,7 +481,6 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
474481
const bool is_main_prog = !bpf_is_subprog(prog);
475482
const u8 fp = bpf2a64[BPF_REG_FP];
476483
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
477-
const int idx0 = ctx->idx;
478484
int cur_offset;
479485

480486
/*
@@ -500,6 +506,9 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
500506
*
501507
*/
502508

509+
emit_kcfi(is_main_prog ? cfi_bpf_hash : cfi_bpf_subprog_hash, ctx);
510+
const int idx0 = ctx->idx;
511+
503512
/* bpf function may be invoked by 3 instruction types:
504513
* 1. bl, attached via freplace to bpf prog via short jump
505514
* 2. br, attached via freplace to bpf prog via long jump
@@ -2009,9 +2018,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
20092018
jit_data->ro_header = ro_header;
20102019
}
20112020

2012-
prog->bpf_func = (void *)ctx.ro_image;
2021+
prog->bpf_func = (void *)ctx.ro_image + cfi_get_offset();
20132022
prog->jited = 1;
2014-
prog->jited_len = prog_size;
2023+
prog->jited_len = prog_size - cfi_get_offset();
20152024

20162025
if (!prog->is_func || extra_pass) {
20172026
int i;
@@ -2271,6 +2280,12 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
22712280
/* return address locates above FP */
22722281
retaddr_off = stack_size + 8;
22732282

2283+
if (flags & BPF_TRAMP_F_INDIRECT) {
2284+
/*
2285+
* Indirect call for bpf_struct_ops
2286+
*/
2287+
emit_kcfi(cfi_get_func_hash(func_addr), ctx);
2288+
}
22742289
/* bpf trampoline may be invoked by 3 instruction types:
22752290
* 1. bl, attached to bpf prog or kernel function via short jump
22762291
* 2. br, attached to bpf prog or kernel function via long jump
@@ -2790,6 +2805,7 @@ void bpf_jit_free(struct bpf_prog *prog)
27902805
sizeof(jit_data->header->size));
27912806
kfree(jit_data);
27922807
}
2808+
prog->bpf_func -= cfi_get_offset();
27932809
hdr = bpf_jit_binary_pack_hdr(prog);
27942810
bpf_jit_binary_pack_free(hdr, NULL);
27952811
WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));

0 commit comments

Comments
 (0)