Skip to content

Commit

Permalink
Add mm/ modifications
Browse files Browse the repository at this point in the history
Signed-off-by: ShivamKumarJha <[email protected]>
  • Loading branch information
ShivamKumarJha committed Mar 30, 2021
1 parent 2ec729d commit 83a3b45
Show file tree
Hide file tree
Showing 12 changed files with 587 additions and 50 deletions.
9 changes: 0 additions & 9 deletions mm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -518,15 +518,6 @@ config CMA_DEBUGFS
help
Turns on the DebugFS interface for CMA.

config CMA_ALLOW_WRITE_DEBUGFS
depends on CMA_DEBUGFS
bool "Allow CMA debugfs write"
help
Say 'y' here to allow the CMA debugfs write.
CMA debugfs write could be risky as it allows
cma allocation, so it is not recommended to
enable this option on any production device.

config CMA_AREAS
int "Maximum count of the CMA areas"
depends on CMA
Expand Down
10 changes: 0 additions & 10 deletions mm/cma_debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ static int cma_maxchunk_get(void *data, u64 *val)
}
DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");

#ifdef CONFIG_CMA_ALLOW_WRITE_DEBUGFS
static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
{
spin_lock(&cma->mem_head_lock);
Expand Down Expand Up @@ -133,13 +132,8 @@ static int cma_free_write(void *data, u64 val)

return cma_free_mem(cma, pages);
}
#else
#define cma_free_write NULL
#endif

DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");

#ifdef CONFIG_CMA_ALLOW_WRITE_DEBUGFS
static int cma_alloc_mem(struct cma *cma, int count)
{
struct cma_mem *mem;
Expand Down Expand Up @@ -170,10 +164,6 @@ static int cma_alloc_write(void *data, u64 val)

return cma_alloc_mem(cma, pages);
}
#else
#define cma_alloc_write NULL
#endif

DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");

static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
Expand Down
24 changes: 18 additions & 6 deletions mm/gup.c
Original file line number Diff line number Diff line change
Expand Up @@ -161,13 +161,12 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
}

/*
* FOLL_FORCE can write to even unwritable pte's, but only
* after we've gone through a COW cycle and they are dirty.
* FOLL_FORCE or a forced COW break can write even to unwritable pte's,
* but only after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
return pte_write(pte) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
}

/*
Expand Down Expand Up @@ -833,12 +832,18 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto out;
}
if (is_vm_hugetlb_page(vma)) {
if (should_force_cow_break(vma, foll_flags))
foll_flags |= FOLL_WRITE;
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i,
gup_flags, nonblocking);
foll_flags, nonblocking);
continue;
}
}

if (should_force_cow_break(vma, foll_flags))
foll_flags |= FOLL_WRITE;

retry:
/*
* If we have a pending SIGKILL, don't keep faulting pages and
Expand Down Expand Up @@ -2435,10 +2440,17 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
if (unlikely(!access_ok((void __user *)start, len)))
return -EFAULT;

/*
* The FAST_GUP case requires FOLL_WRITE even for pure reads,
* because get_user_pages() may need to cause an early COW in
* order to avoid confusing the normal COW routines. So only
* targets that are already writable are safe to do by just
* looking at the page tables.
*/
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
gup_fast_permitted(start, end)) {
local_irq_disable();
gup_pgd_range(addr, end, gup_flags, pages, &nr);
gup_pgd_range(addr, end, gup_flags | FOLL_WRITE, pages, &nr);
local_irq_enable();
ret = nr;
}
Expand Down
7 changes: 3 additions & 4 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1454,13 +1454,12 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
}

/*
* FOLL_FORCE can write to even unwritable pmd's, but only
* after we've gone through a COW cycle and they are dirty.
* FOLL_FORCE or a forced COW break can write even to unwritable pmd's,
* but only after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
return pmd_write(pmd) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd));
}

struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
Expand Down
40 changes: 40 additions & 0 deletions mm/kasan/quarantine.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
*
* Author: Alexander Potapenko <[email protected]>
* Copyright (C) 2016 Google, Inc.
* Copyright (C) 2021 XiaoMi, Inc.
*
* Based on code by Dmitry Chernenkov.
*
Expand All @@ -29,6 +30,7 @@
#include <linux/srcu.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/cpuhotplug.h>

#include "../slab.h"
#include "kasan.h"
Expand All @@ -43,6 +45,7 @@ struct qlist_head {
struct qlist_node *head;
struct qlist_node *tail;
size_t bytes;
bool offline;
};

#define QLIST_INIT { NULL, NULL, 0 }
Expand Down Expand Up @@ -187,6 +190,10 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
local_irq_save(flags);

q = this_cpu_ptr(&cpu_quarantine);
if (q->offline) {
local_irq_restore(flags);
return;
}
qlist_put(q, &info->quarantine_link, cache->size);
if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
qlist_move_all(q, &temp);
Expand Down Expand Up @@ -327,3 +334,36 @@ void quarantine_remove_cache(struct kmem_cache *cache)

synchronize_srcu(&remove_cache_srcu);
}

static int kasan_cpu_online(unsigned int cpu)
{
this_cpu_ptr(&cpu_quarantine)->offline = false;
return 0;
}

static int kasan_cpu_offline(unsigned int cpu)
{
struct qlist_head *q;

q = this_cpu_ptr(&cpu_quarantine);
/* Ensure the ordering between the writing to q->offline and
* qlist_free_all. Otherwise, cpu_quarantine may be corrupted
* by interrupt.
*/
WRITE_ONCE(q->offline, true);
barrier();
qlist_free_all(q, NULL);
return 0;
}

static int __init kasan_cpu_quarantine_init(void)
{
int ret = 0;

ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online",
kasan_cpu_online, kasan_cpu_offline);
if (ret < 0)
pr_err("kasan cpu quarantine register failed [%d]\n", ret);
return ret;
}
late_initcall(kasan_cpu_quarantine_init);
25 changes: 25 additions & 0 deletions mm/ksm.c
Original file line number Diff line number Diff line change
Expand Up @@ -2660,6 +2660,31 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
goto again;
}

bool reuse_ksm_page(struct page *page,
struct vm_area_struct *vma,
unsigned long address)
{
#ifdef CONFIG_DEBUG_VM
if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
WARN_ON(!page_mapped(page)) ||
WARN_ON(!PageLocked(page))) {
dump_page(page, "reuse_ksm_page");
return false;
}
#endif

if (PageSwapCache(page) || !page_stable_node(page))
return false;
/* Prohibit parallel get_ksm_page() */
if (!page_ref_freeze(page, 1))
return false;

page_move_anon_rmap(page, vma);
page->index = linear_page_index(vma, address);
page_ref_unfreeze(page, 1);

return true;
}
#ifdef CONFIG_MIGRATION
void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
Expand Down
3 changes: 2 additions & 1 deletion mm/madvise.c
Original file line number Diff line number Diff line change
Expand Up @@ -1280,7 +1280,8 @@ SYSCALL_DEFINE6(process_madvise, int, which, pid_t, upid,
goto put_pid;
}

if (!process_madvise_behavior_valid(behavior)) {
if (task->mm != current->mm &&
!process_madvise_behavior_valid(behavior)) {
ret = -EINVAL;
goto release_task;
}
Expand Down
1 change: 0 additions & 1 deletion mm/memblock.c
Original file line number Diff line number Diff line change
Expand Up @@ -1630,7 +1630,6 @@ phys_addr_t __init_memblock memblock_end_of_DRAM(void)

return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
}
EXPORT_SYMBOL_GPL(memblock_end_of_DRAM);

static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
{
Expand Down
Loading

0 comments on commit 83a3b45

Please sign in to comment.