Skip to content

Commit 9b76d71

Browse files
committed
Merge tag 'riscv-for-linus-5.14-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V updates from Palmer Dabbelt: "We have a handful of new features for 5.14: - Support for transparent huge pages. - Support for generic PCI resources mapping. - Support for the mem= kernel parameter. - Support for KFENCE. - A handful of fixes to avoid W+X mappings in the kernel. - Support for VMAP_STACK based overflow detection. - An optimized copy_{to,from}_user" * tag 'riscv-for-linus-5.14-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (37 commits) riscv: xip: Fix duplicate included asm/pgtable.h riscv: Fix PTDUMP output now BPF region moved back to module region riscv: __asm_copy_to-from_user: Optimize unaligned memory access and pipeline stall riscv: add VMAP_STACK overflow detection riscv: ptrace: add argn syntax riscv: mm: fix build errors caused by mk_pmd() riscv: Introduce structure that group all variables regarding kernel mapping riscv: Map the kernel with correct permissions the first time riscv: Introduce set_kernel_memory helper riscv: Enable KFENCE for riscv64 RISC-V: Use asm-generic for {in,out}{bwlq} riscv: add ASID-based tlbflushing methods riscv: pass the mm_struct to __sbi_tlb_flush_range riscv: Add mem kernel parameter support riscv: Simplify xip and !xip kernel address conversion macros riscv: Remove CONFIG_PHYS_RAM_BASE_FIXED riscv: Only initialize swiotlb when necessary riscv: fix typo in init.c riscv: Cleanup unused functions riscv: mm: Use better bitmap_zalloc() ...
2 parents 1459718 + 1958e5a commit 9b76d71

37 files changed

+901
-360
lines changed

arch/riscv/Kconfig

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -65,11 +65,14 @@ config RISCV
6565
select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
6666
select HAVE_ARCH_KASAN if MMU && 64BIT
6767
select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
68+
select HAVE_ARCH_KFENCE if MMU && 64BIT
6869
select HAVE_ARCH_KGDB if !XIP_KERNEL
6970
select HAVE_ARCH_KGDB_QXFER_PKT
7071
select HAVE_ARCH_MMAP_RND_BITS if MMU
7172
select HAVE_ARCH_SECCOMP_FILTER
7273
select HAVE_ARCH_TRACEHOOK
74+
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
75+
select HAVE_ARCH_VMAP_STACK if MMU && 64BIT
7376
select HAVE_ASM_MODVERSIONS
7477
select HAVE_CONTEXT_TRACKING
7578
select HAVE_DEBUG_KMEMLEAK
@@ -83,11 +86,14 @@ config RISCV
8386
select HAVE_KPROBES if !XIP_KERNEL
8487
select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
8588
select HAVE_KRETPROBES if !XIP_KERNEL
89+
select HAVE_MOVE_PMD
90+
select HAVE_MOVE_PUD
8691
select HAVE_PCI
8792
select HAVE_PERF_EVENTS
8893
select HAVE_PERF_REGS
8994
select HAVE_PERF_USER_STACK_DUMP
9095
select HAVE_REGS_AND_STACK_ACCESS_API
96+
select HAVE_FUNCTION_ARG_ACCESS_API
9197
select HAVE_STACKPROTECTOR
9298
select HAVE_SYSCALL_TRACEPOINTS
9399
select IRQ_DOMAIN
@@ -488,13 +494,8 @@ config STACKPROTECTOR_PER_TASK
488494
def_bool y
489495
depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
490496

491-
config PHYS_RAM_BASE_FIXED
492-
bool "Explicitly specified physical RAM address"
493-
default n
494-
495497
config PHYS_RAM_BASE
496498
hex "Platform Physical RAM address"
497-
depends on PHYS_RAM_BASE_FIXED
498499
default "0x80000000"
499500
help
500501
This is the physical address of RAM in the system. It has to be
@@ -507,7 +508,6 @@ config XIP_KERNEL
507508
# This prevents XIP from being enabled by all{yes,mod}config, which
508509
# fail to build since XIP doesn't support large kernels.
509510
depends on !COMPILE_TEST
510-
select PHYS_RAM_BASE_FIXED
511511
help
512512
Execute-In-Place allows the kernel to run from non-volatile storage
513513
directly addressable by the CPU, such as NOR flash. This saves RAM

arch/riscv/include/asm/asm-prototypes.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,4 +25,7 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
2525
DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
2626
DECLARE_DO_ERROR_INFO(do_trap_break);
2727

28+
asmlinkage unsigned long get_overflow_stack(void);
29+
asmlinkage void handle_bad_stack(struct pt_regs *regs);
30+
2831
#endif /* _ASM_RISCV_PROTOTYPES_H */

arch/riscv/include/asm/io.h

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -52,19 +52,6 @@
5252
#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
5353
#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
5454

55-
#define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
56-
#define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
57-
#define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
58-
59-
#define outb(v,c) ({ __io_pbw(); writeb_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
60-
#define outw(v,c) ({ __io_pbw(); writew_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
61-
#define outl(v,c) ({ __io_pbw(); writel_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
62-
63-
#ifdef CONFIG_64BIT
64-
#define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(__v); __v; })
65-
#define outq(v,c) ({ __io_pbw(); writeq_cpu((v),(void*)(c)); __io_paw(); })
66-
#endif
67-
6855
/*
6956
* Accesses from a single hart to a single I/O address must be ordered. This
7057
* allows us to use the raw read macros, but we still need to fence before and

arch/riscv/include/asm/kfence.h

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
3+
#ifndef _ASM_RISCV_KFENCE_H
4+
#define _ASM_RISCV_KFENCE_H
5+
6+
#include <linux/kfence.h>
7+
#include <linux/pfn.h>
8+
#include <asm-generic/pgalloc.h>
9+
#include <asm/pgtable.h>
10+
11+
static inline int split_pmd_page(unsigned long addr)
12+
{
13+
int i;
14+
unsigned long pfn = PFN_DOWN(__pa((addr & PMD_MASK)));
15+
pmd_t *pmd = pmd_off_k(addr);
16+
pte_t *pte = pte_alloc_one_kernel(&init_mm);
17+
18+
if (!pte)
19+
return -ENOMEM;
20+
21+
for (i = 0; i < PTRS_PER_PTE; i++)
22+
set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL));
23+
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE));
24+
25+
flush_tlb_kernel_range(addr, addr + PMD_SIZE);
26+
return 0;
27+
}
28+
29+
static inline bool arch_kfence_init_pool(void)
30+
{
31+
int ret;
32+
unsigned long addr;
33+
pmd_t *pmd;
34+
35+
for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
36+
addr += PAGE_SIZE) {
37+
pmd = pmd_off_k(addr);
38+
39+
if (pmd_leaf(*pmd)) {
40+
ret = split_pmd_page(addr);
41+
if (ret)
42+
return false;
43+
}
44+
}
45+
46+
return true;
47+
}
48+
49+
static inline bool kfence_protect_page(unsigned long addr, bool protect)
50+
{
51+
pte_t *pte = virt_to_kpte(addr);
52+
53+
if (protect)
54+
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
55+
else
56+
set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
57+
58+
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
59+
60+
return true;
61+
}
62+
63+
#endif /* _ASM_RISCV_KFENCE_H */

arch/riscv/include/asm/kprobes.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -29,18 +29,11 @@ struct prev_kprobe {
2929
unsigned int status;
3030
};
3131

32-
/* Single step context for kprobe */
33-
struct kprobe_step_ctx {
34-
unsigned long ss_pending;
35-
unsigned long match_addr;
36-
};
37-
3832
/* per-cpu kprobe control block */
3933
struct kprobe_ctlblk {
4034
unsigned int kprobe_status;
4135
unsigned long saved_status;
4236
struct prev_kprobe prev_kprobe;
43-
struct kprobe_step_ctx ss_ctx;
4437
};
4538

4639
void arch_remove_kprobe(struct kprobe *p);

arch/riscv/include/asm/mmu_context.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,8 @@ static inline int init_new_context(struct task_struct *tsk,
3333
return 0;
3434
}
3535

36+
DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
37+
3638
#include <asm-generic/mmu_context.h>
3739

3840
#endif /* _ASM_RISCV_MMU_CONTEXT_H */

arch/riscv/include/asm/page.h

Lines changed: 40 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -37,16 +37,6 @@
3737

3838
#ifndef __ASSEMBLY__
3939

40-
#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
41-
#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
42-
43-
/* align addr on a size boundary - adjust address up/down if needed */
44-
#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1)))
45-
#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1)))
46-
47-
/* align addr on a size boundary - adjust address up if needed */
48-
#define _ALIGN(addr, size) _ALIGN_UP(addr, size)
49-
5040
#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
5141
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
5242

@@ -89,59 +79,68 @@ typedef struct page *pgtable_t;
8979
#endif
9080

9181
#ifdef CONFIG_MMU
92-
extern unsigned long va_pa_offset;
93-
#ifdef CONFIG_64BIT
94-
extern unsigned long va_kernel_pa_offset;
95-
#endif
96-
#ifdef CONFIG_XIP_KERNEL
97-
extern unsigned long va_kernel_xip_pa_offset;
98-
#endif
9982
extern unsigned long pfn_base;
10083
#define ARCH_PFN_OFFSET (pfn_base)
10184
#else
102-
#define va_pa_offset 0
103-
#ifdef CONFIG_64BIT
104-
#define va_kernel_pa_offset 0
105-
#endif
10685
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
10786
#endif /* CONFIG_MMU */
10887

109-
extern unsigned long kernel_virt_addr;
110-
88+
struct kernel_mapping {
89+
unsigned long virt_addr;
90+
uintptr_t phys_addr;
91+
uintptr_t size;
92+
/* Offset between linear mapping virtual address and kernel load address */
93+
unsigned long va_pa_offset;
11194
#ifdef CONFIG_64BIT
112-
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
95+
/* Offset between kernel mapping virtual address and kernel load address */
96+
unsigned long va_kernel_pa_offset;
97+
#endif
98+
unsigned long va_kernel_xip_pa_offset;
11399
#ifdef CONFIG_XIP_KERNEL
100+
uintptr_t xiprom;
101+
uintptr_t xiprom_sz;
102+
#endif
103+
};
104+
105+
extern struct kernel_mapping kernel_map;
106+
107+
#ifdef CONFIG_64BIT
108+
#define is_kernel_mapping(x) \
109+
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
110+
#define is_linear_mapping(x) \
111+
((x) >= PAGE_OFFSET && (x) < kernel_map.virt_addr)
112+
113+
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
114114
#define kernel_mapping_pa_to_va(y) ({ \
115115
unsigned long _y = y; \
116116
(_y >= CONFIG_PHYS_RAM_BASE) ? \
117-
(void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
118-
(void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
117+
(void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET) : \
118+
(void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset); \
119119
})
120-
#else
121-
#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
122-
#endif
123120
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
124121

125-
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
126-
#ifdef CONFIG_XIP_KERNEL
122+
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
127123
#define kernel_mapping_va_to_pa(y) ({ \
128124
unsigned long _y = y; \
129-
(_y < kernel_virt_addr + XIP_OFFSET) ? \
130-
((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
131-
((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
125+
(_y < kernel_map.virt_addr + XIP_OFFSET) ? \
126+
((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \
127+
((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
132128
})
133-
#else
134-
#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
135-
#endif
129+
136130
#define __va_to_pa_nodebug(x) ({ \
137131
unsigned long _x = x; \
138-
(_x < kernel_virt_addr) ? \
132+
is_linear_mapping(_x) ? \
139133
linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
140134
})
141135
#else
142-
#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
143-
#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
144-
#endif
136+
#define is_kernel_mapping(x) \
137+
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
138+
#define is_linear_mapping(x) \
139+
((x) >= PAGE_OFFSET)
140+
141+
#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + kernel_map.va_pa_offset))
142+
#define __va_to_pa_nodebug(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
143+
#endif /* CONFIG_64BIT */
145144

146145
#ifdef CONFIG_DEBUG_VIRTUAL
147146
extern phys_addr_t __virt_to_phys(unsigned long x);

arch/riscv/include/asm/pci.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
/* RISC-V shim does not initialize PCI bus */
1919
#define pcibios_assign_all_busses() 1
2020

21+
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
22+
2123
extern int isa_dma_bridge_buggy;
2224

2325
#ifdef CONFIG_PCI

arch/riscv/include/asm/pgtable-64.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,7 @@ static inline int pud_bad(pud_t pud)
4646
#define pud_leaf pud_leaf
4747
static inline int pud_leaf(pud_t pud)
4848
{
49-
return pud_present(pud) &&
50-
(pud_val(pud) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
49+
return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF);
5150
}
5251

5352
static inline void set_pud(pud_t *pudp, pud_t pud)
@@ -80,6 +79,8 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
8079
return pmd_val(pmd) >> _PAGE_PFN_SHIFT;
8180
}
8281

82+
#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
83+
8384
#define pmd_ERROR(e) \
8485
pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
8586

arch/riscv/include/asm/pgtable-bits.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,5 +39,10 @@
3939
#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
4040
_PAGE_WRITE | _PAGE_EXEC | \
4141
_PAGE_USER | _PAGE_GLOBAL))
42+
/*
43+
* when all of R/W/X are zero, the PTE is a pointer to the next level
44+
* of the page table; otherwise, it is a leaf PTE.
45+
*/
46+
#define _PAGE_LEAF (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
4247

4348
#endif /* _ASM_RISCV_PGTABLE_BITS_H */

0 commit comments

Comments
 (0)