Skip to content

Commit

Permalink
page: Add a very basic LRU queue
Browse files Browse the repository at this point in the history
Add a very basic LRU queue. Right now it does no LRU (i.e it just takes
the first page in the queue), but it's a good first start and survives
basic memory reclaim tests (dd if=bigfile of=/dev/null for instance).

Signed-off-by: Pedro Falcato <[email protected]>
  • Loading branch information
heatd committed Jun 19, 2024
1 parent f1be491 commit 0d76c13
Show file tree
Hide file tree
Showing 11 changed files with 478 additions and 135 deletions.
37 changes: 37 additions & 0 deletions kernel/include/onyx/mm/page_lru.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
/*
* Copyright (c) 2024 Pedro Falcato
* This file is part of Onyx, and is released under the terms of the MIT License
* check LICENSE at the root directory for more information
*
* SPDX-License-Identifier: MIT
*/

#ifndef _ONYX_MM_PAGE_LRU_H
#define _ONYX_MM_PAGE_LRU_H

#include <onyx/list.h>
#include <onyx/spinlock.h>

struct page;

struct page_lru
{
/* Very simple single LRU list (for the CLOCK algorithm) */
struct list_head lru_list;
struct spinlock lock;
};

CONSTEXPR static inline void page_lru_init(struct page_lru *lru)
{
INIT_LIST_HEAD(&lru->lru_list);
spinlock_init(&lru->lock);
}

__BEGIN_CDECLS

void page_add_lru(struct page *page);
void page_remove_lru(struct page *page);

__END_CDECLS

#endif
68 changes: 68 additions & 0 deletions kernel/include/onyx/mm/page_node.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
/*
* Copyright (c) 2017 - 2024 Pedro Falcato
* This file is part of Onyx, and is released under the terms of the MIT License
* check LICENSE at the root directory for more information
*
* SPDX-License-Identifier: MIT
*/

#ifndef _ONYX_MM_PAGE_NODE_H
#define _ONYX_MM_PAGE_NODE_H

#include <onyx/list.h>
#include <onyx/mm/page_zone.h>
#include <onyx/spinlock.h>

struct page_node
{
struct spinlock node_lock;
struct list_head cpu_list_node;
unsigned long used_pages;
unsigned long total_pages;
struct page_zone zones[NR_ZONES];

#ifdef __cplusplus
struct page_zone *pick_zone(unsigned long page);

constexpr page_node() : node_lock{}, cpu_list_node{}, used_pages{}, total_pages{}
{
spinlock_init(&node_lock);
page_zone_init(&zones[0], "DMA32", 0, UINT32_MAX);
page_zone_init(&zones[1], "Normal", (u64) UINT32_MAX + 1, UINT64_MAX);
}

void init()
{
INIT_LIST_HEAD(&cpu_list_node);
}

void add_region(unsigned long base, size_t size);
struct page *alloc_order(unsigned int order, unsigned long flags);
struct page *allocate_pages(unsigned long nr_pages, unsigned long flags);
struct page *alloc_page(unsigned long flags);
void free_page(struct page *p);

template <typename Callable>
bool for_every_zone(Callable c)
{
for (auto &zone : zones)
{
if (!c(&zone))
return false;
}

return true;
}
#endif
};

/* ugh */

__BEGIN_CDECLS
extern struct page_node main_node;

#define for_zones_in_node(node, zone) \
for (zone = node->zones; zone < node->zones + NR_ZONES; zone++)

__END_CDECLS
#endif
111 changes: 111 additions & 0 deletions kernel/include/onyx/mm/page_zone.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
/*
* Copyright (c) 2017 - 2024 Pedro Falcato
* This file is part of Onyx, and is released under the terms of the MIT License
* check LICENSE at the root directory for more information
*
* SPDX-License-Identifier: MIT
*/

#ifndef _ONYX_MM_PAGE_ZONE_H
#define _ONYX_MM_PAGE_ZONE_H

#include <onyx/list.h>
#include <onyx/mm/page_lru.h>
#include <onyx/page.h>

#ifndef CONFIG_SMP_NR_CPUS
#define CONFIG_SMP_NR_CPUS 64
#endif

#define PAGEALLOC_NR_ORDERS 14

struct page_pcpu_data
{
struct list_head page_list;
unsigned long nr_pages;
unsigned long nr_fast_path;
unsigned long nr_slow_path;
unsigned long nr_queue_reclaims;
long pagestats[PAGE_STATS_MAX];

#ifdef __cplusplus
constexpr page_pcpu_data() : nr_pages{}, nr_fast_path{}, nr_slow_path{}, nr_queue_reclaims{}
{
INIT_LIST_HEAD(&page_list);
for (auto &stat : pagestats)
stat = 0;
}

/**
* @brief Allocate from pcpu state.
* IRQs must be disabled
* @return Allocated struct page, or nullptr
*/
__attribute__((always_inline)) struct page *alloc()
{
if (nr_pages == 0) [[unlikely]]
return nullptr;

struct page *page = container_of(list_first_element(&page_list), struct page,
page_allocator_node.list_node);
list_remove(&page->page_allocator_node.list_node);

nr_pages--;

return page;
}

/**
* @brief Free to pcpu state
* IRQs must be disabled
* @param page Page to free
*/
__attribute__((always_inline)) void free(struct page *page)
{
list_add_tail(&page->page_allocator_node.list_node, &page_list);
nr_pages++;
}
#endif

} __align_cache;

struct page_zone
{
const char *name;
unsigned long start;
unsigned long end;
unsigned long min_watermark;
unsigned long low_watermark;
unsigned long high_watermark;
struct list_head pages[PAGEALLOC_NR_ORDERS];
unsigned long total_pages;
long used_pages;
unsigned long splits;
unsigned long merges;
struct page_lru zone_lru;
struct spinlock lock;
struct page_pcpu_data pcpu[CONFIG_SMP_NR_CPUS] __align_cache;
};

#ifdef __cplusplus
constexpr void page_zone_init(page_zone *zone, const char *name, unsigned long start,
unsigned long end)
{
zone->name = name;
zone->start = start;
zone->end = end;
zone->high_watermark = zone->min_watermark = zone->low_watermark = 0;
spinlock_init(&zone->lock);
for (auto &order : zone->pages)
{
INIT_LIST_HEAD(&order);
}

zone->total_pages = 0;
zone->used_pages = 0;
zone->merges = zone->splits = 0;
page_lru_init(&zone->zone_lru);
}
#endif

#endif
2 changes: 2 additions & 0 deletions kernel/include/onyx/mm/vm_object.h
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,8 @@ void vm_obj_clean_page(struct vm_object *obj, struct page *page);

void vm_obj_reassign_mapping(struct vm_object *vm_obj, struct vm_area_struct *vma);

bool vm_obj_remove_page(struct vm_object *obj, struct page *page);

__END_CDECLS

#endif
8 changes: 8 additions & 0 deletions kernel/include/onyx/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ __BEGIN_CDECLS
#define PAGE_FLAG_UPTODATE (1 << 8)
#define PAGE_FLAG_WRITEBACK (1 << 9)
#define PAGE_FLAG_READAHEAD (1 << 10)
#define PAGE_FLAG_LRU (1 << 11)
#define PAGE_FLAG_REFERENCED (1 << 12)

struct vm_object;

Expand Down Expand Up @@ -100,6 +102,8 @@ struct CAPABILITY("page") page
struct page *next_virtual_region;
} next_un;
};

struct list_head lru_node;
};

unsigned long priv;
Expand Down Expand Up @@ -486,6 +490,7 @@ void page_drain_pcpu();
enum page_stat
{
NR_FILE = 0,
NR_SHARED,
NR_ANON,
NR_DIRTY,
NR_WRITEBACK,
Expand All @@ -504,6 +509,9 @@ void dec_page_stat(struct page *page, enum page_stat stat);

void page_accumulate_stats(unsigned long pages[PAGE_STATS_MAX]);

struct page_lru;
struct page_lru *page_to_page_lru(struct page *page);

__END_CDECLS

#endif
4 changes: 2 additions & 2 deletions kernel/kernel/fs/filemap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include <onyx/filemap.h>
#include <onyx/gen/trace_filemap.h>
#include <onyx/mm/amap.h>
#include <onyx/mm/page_lru.h>
#include <onyx/page.h>
#include <onyx/pagecache.h>
#include <onyx/readahead.h>
Expand Down Expand Up @@ -63,6 +64,7 @@ int filemap_find_page(struct inode *ino, size_t pgoff, unsigned int flags, struc
{
inc_page_stat(p, NR_FILE);
page_ref(p);
page_add_lru(p);
}

p = p2;
Expand Down Expand Up @@ -660,8 +662,6 @@ int filemap_private_fault(struct vm_pf_context *ctx)
* 'adopts' our reference. This works because amaps are inherently region-specific, and we have
* the address_space locked.
*/
if (!newp)
page_unref(page);

return 0;
enomem:
Expand Down
2 changes: 1 addition & 1 deletion kernel/kernel/mm/Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
mm-y:= bootmem.o page.o pagealloc.o vm_object.o vm.o vmalloc.o reclaim.o amap.o anon.o mincore.o
mm-y:= bootmem.o page.o pagealloc.o vm_object.o vm.o vmalloc.o reclaim.o amap.o anon.o mincore.o page_lru.o
mm-$(CONFIG_KUNIT)+= vm_tests.o

ifeq ($(CONFIG_KASAN), y)
Expand Down
30 changes: 30 additions & 0 deletions kernel/kernel/mm/page_lru.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
/*
* Copyright (c) 2024 Pedro Falcato
* This file is part of Onyx, and is released under the terms of the MIT License
* check LICENSE at the root directory for more information
*
* SPDX-License-Identifier: MIT
*/
#include <onyx/mm/page_lru.h>
#include <onyx/page.h>

void page_add_lru(struct page *page)
{
DCHECK(!page_flag_set(page, PAGE_FLAG_LRU));
DCHECK(page->owner != NULL);
struct page_lru *lru = page_to_page_lru(page);
spin_lock(&lru->lock);
list_add_tail(&page->lru_node, &lru->lru_list);
page_test_set_flag(page, PAGE_FLAG_LRU);
spin_unlock(&lru->lock);
}

void page_remove_lru(struct page *page)
{
DCHECK(page_flag_set(page, PAGE_FLAG_LRU));
struct page_lru *lru = page_to_page_lru(page);
spin_lock(&lru->lock);
list_remove(&page->lru_node);
__atomic_and_fetch(&page->flags, ~PAGE_FLAG_LRU, __ATOMIC_RELEASE);
spin_unlock(&lru->lock);
}
Loading

0 comments on commit 0d76c13

Please sign in to comment.