Skip to content

Commit 9b62e02

Browse files
committed
Merge tag 'mm-hotfixes-stable-2024-05-25-09-13' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "16 hotfixes, 11 of which are cc:stable. A few nilfs2 fixes, the remainder are for MM: a couple of selftests fixes, various singletons fixing various issues in various parts" * tag 'mm-hotfixes-stable-2024-05-25-09-13' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/ksm: fix possible UAF of stable_node mm/memory-failure: fix handling of dissolved but not taken off from buddy pages mm: /proc/pid/smaps_rollup: avoid skipping vma after getting mmap_lock again nilfs2: fix potential hang in nilfs_detach_log_writer() nilfs2: fix unexpected freezing of nilfs_segctor_sync() nilfs2: fix use-after-free of timer for log writer thread selftests/mm: fix build warnings on ppc64 arm64: patching: fix handling of execmem addresses selftests/mm: compaction_test: fix bogus test success and reduce probability of OOM-killer invocation selftests/mm: compaction_test: fix incorrect write of zero to nr_hugepages selftests/mm: compaction_test: fix bogus test success on Aarch64 mailmap: update email address for Satya Priya mm/huge_memory: don't unpoison huge_zero_folio kasan, fortify: properly rename memintrinsics lib: add version into /proc/allocinfo output mm/vmalloc: fix vmalloc which may return null if called with __GFP_NOFAIL
2 parents a0db36e + 90e8234 commit 9b62e02

File tree

13 files changed

+187
-69
lines changed

13 files changed

+187
-69
lines changed

.mailmap

+1-1
Original file line numberDiff line numberDiff line change
@@ -572,7 +572,7 @@ Sarangdhar Joshi <[email protected]>
572572
Sascha Hauer <[email protected]>
573573
574574
Sathishkumar Muruganandam <[email protected]> <[email protected]>
575-
575+
576576
S.Çağlar Onur <[email protected]>
577577
578578
Sean Christopherson <[email protected]> <[email protected]>

Documentation/filesystems/proc.rst

+3-2
Original file line numberDiff line numberDiff line change
@@ -961,13 +961,14 @@ Provides information about memory allocations at all locations in the code
961961
base. Each allocation in the code is identified by its source file, line
962962
number, module (if originates from a loadable module) and the function calling
963963
the allocation. The number of bytes allocated and number of calls at each
964-
location are reported.
964+
location are reported. The first line indicates the version of the file, the
965+
second line is the header listing fields in the file.
965966

966967
Example output.
967968

968969
::
969970

970-
> sort -rn /proc/allocinfo
971+
> tail -n +3 /proc/allocinfo | sort -rn
971972
127664128 31168 mm/page_ext.c:270 func:alloc_page_ext
972973
56373248 4737 mm/slub.c:2259 func:alloc_slab_page
973974
14880768 3633 mm/readahead.c:247 func:page_cache_ra_unbounded

arch/arm64/kernel/patching.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ static void __kprobes *patch_map(void *addr, int fixmap)
3636

3737
if (image)
3838
page = phys_to_page(__pa_symbol(addr));
39-
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
39+
else if (IS_ENABLED(CONFIG_EXECMEM))
4040
page = vmalloc_to_page(addr);
4141
else
4242
return addr;

fs/nilfs2/segment.c

+50-13
Original file line numberDiff line numberDiff line change
@@ -2118,8 +2118,10 @@ static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
21182118
{
21192119
spin_lock(&sci->sc_state_lock);
21202120
if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2121-
sci->sc_timer.expires = jiffies + sci->sc_interval;
2122-
add_timer(&sci->sc_timer);
2121+
if (sci->sc_task) {
2122+
sci->sc_timer.expires = jiffies + sci->sc_interval;
2123+
add_timer(&sci->sc_timer);
2124+
}
21232125
sci->sc_state |= NILFS_SEGCTOR_COMMIT;
21242126
}
21252127
spin_unlock(&sci->sc_state_lock);
@@ -2166,19 +2168,36 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
21662168
struct nilfs_segctor_wait_request wait_req;
21672169
int err = 0;
21682170

2169-
spin_lock(&sci->sc_state_lock);
21702171
init_wait(&wait_req.wq);
21712172
wait_req.err = 0;
21722173
atomic_set(&wait_req.done, 0);
2174+
init_waitqueue_entry(&wait_req.wq, current);
2175+
2176+
/*
2177+
* To prevent a race issue where completion notifications from the
2178+
* log writer thread are missed, increment the request sequence count
2179+
* "sc_seq_request" and insert a wait queue entry using the current
2180+
* sequence number into the "sc_wait_request" queue at the same time
2181+
* within the lock section of "sc_state_lock".
2182+
*/
2183+
spin_lock(&sci->sc_state_lock);
21732184
wait_req.seq = ++sci->sc_seq_request;
2185+
add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
21742186
spin_unlock(&sci->sc_state_lock);
21752187

2176-
init_waitqueue_entry(&wait_req.wq, current);
2177-
add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2178-
set_current_state(TASK_INTERRUPTIBLE);
21792188
wake_up(&sci->sc_wait_daemon);
21802189

21812190
for (;;) {
2191+
set_current_state(TASK_INTERRUPTIBLE);
2192+
2193+
/*
2194+
* Synchronize only while the log writer thread is alive.
2195+
* Leave flushing out after the log writer thread exits to
2196+
* the cleanup work in nilfs_segctor_destroy().
2197+
*/
2198+
if (!sci->sc_task)
2199+
break;
2200+
21822201
if (atomic_read(&wait_req.done)) {
21832202
err = wait_req.err;
21842203
break;
@@ -2194,15 +2213,15 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
21942213
return err;
21952214
}
21962215

2197-
static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2216+
static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
21982217
{
21992218
struct nilfs_segctor_wait_request *wrq, *n;
22002219
unsigned long flags;
22012220

22022221
spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
22032222
list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
22042223
if (!atomic_read(&wrq->done) &&
2205-
nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2224+
(force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) {
22062225
wrq->err = err;
22072226
atomic_set(&wrq->done, 1);
22082227
}
@@ -2320,10 +2339,21 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
23202339
*/
23212340
static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
23222341
{
2342+
bool thread_is_alive;
2343+
23232344
spin_lock(&sci->sc_state_lock);
23242345
sci->sc_seq_accepted = sci->sc_seq_request;
2346+
thread_is_alive = (bool)sci->sc_task;
23252347
spin_unlock(&sci->sc_state_lock);
2326-
del_timer_sync(&sci->sc_timer);
2348+
2349+
/*
2350+
* This function does not race with the log writer thread's
2351+
* termination. Therefore, deleting sc_timer, which should not be
2352+
* done after the log writer thread exits, can be done safely outside
2353+
* the area protected by sc_state_lock.
2354+
*/
2355+
if (thread_is_alive)
2356+
del_timer_sync(&sci->sc_timer);
23272357
}
23282358

23292359
/**
@@ -2340,7 +2370,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
23402370
if (mode == SC_LSEG_SR) {
23412371
sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
23422372
sci->sc_seq_done = sci->sc_seq_accepted;
2343-
nilfs_segctor_wakeup(sci, err);
2373+
nilfs_segctor_wakeup(sci, err, false);
23442374
sci->sc_flush_request = 0;
23452375
} else {
23462376
if (mode == SC_FLUSH_FILE)
@@ -2349,7 +2379,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
23492379
sci->sc_flush_request &= ~FLUSH_DAT_BIT;
23502380

23512381
/* re-enable timer if checkpoint creation was not done */
2352-
if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2382+
if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && sci->sc_task &&
23532383
time_before(jiffies, sci->sc_timer.expires))
23542384
add_timer(&sci->sc_timer);
23552385
}
@@ -2539,6 +2569,7 @@ static int nilfs_segctor_thread(void *arg)
25392569
int timeout = 0;
25402570

25412571
sci->sc_timer_task = current;
2572+
timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
25422573

25432574
/* start sync. */
25442575
sci->sc_task = current;
@@ -2606,6 +2637,7 @@ static int nilfs_segctor_thread(void *arg)
26062637
end_thread:
26072638
/* end sync. */
26082639
sci->sc_task = NULL;
2640+
timer_shutdown_sync(&sci->sc_timer);
26092641
wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
26102642
spin_unlock(&sci->sc_state_lock);
26112643
return 0;
@@ -2669,7 +2701,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
26692701
INIT_LIST_HEAD(&sci->sc_gc_inodes);
26702702
INIT_LIST_HEAD(&sci->sc_iput_queue);
26712703
INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2672-
timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
26732704

26742705
sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
26752706
sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
@@ -2723,6 +2754,13 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
27232754
|| sci->sc_seq_request != sci->sc_seq_done);
27242755
spin_unlock(&sci->sc_state_lock);
27252756

2757+
/*
2758+
* Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can
2759+
* be called from delayed iput() via nilfs_evict_inode() and can race
2760+
* with the above log writer thread termination.
2761+
*/
2762+
nilfs_segctor_wakeup(sci, 0, true);
2763+
27262764
if (flush_work(&sci->sc_iput_work))
27272765
flag = true;
27282766

@@ -2748,7 +2786,6 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
27482786

27492787
down_write(&nilfs->ns_segctor_sem);
27502788

2751-
timer_shutdown_sync(&sci->sc_timer);
27522789
kfree(sci);
27532790
}
27542791

fs/proc/task_mmu.c

+7-2
Original file line numberDiff line numberDiff line change
@@ -970,12 +970,17 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
970970
break;
971971

972972
/* Case 1 and 2 above */
973-
if (vma->vm_start >= last_vma_end)
973+
if (vma->vm_start >= last_vma_end) {
974+
smap_gather_stats(vma, &mss, 0);
975+
last_vma_end = vma->vm_end;
974976
continue;
977+
}
975978

976979
/* Case 4 above */
977-
if (vma->vm_end > last_vma_end)
980+
if (vma->vm_end > last_vma_end) {
978981
smap_gather_stats(vma, &mss, last_vma_end);
982+
last_vma_end = vma->vm_end;
983+
}
979984
}
980985
} for_each_vma(vmi, vma);
981986

include/linux/fortify-string.h

+18-4
Original file line numberDiff line numberDiff line change
@@ -75,17 +75,30 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("
7575
__ret; \
7676
})
7777

78-
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
78+
#if defined(__SANITIZE_ADDRESS__)
79+
80+
#if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY)
81+
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
82+
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
83+
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
84+
#elif defined(CONFIG_KASAN_GENERIC)
85+
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__asan_memset);
86+
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memmove);
87+
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memcpy);
88+
#else /* CONFIG_KASAN_SW_TAGS */
89+
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__hwasan_memset);
90+
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memmove);
91+
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memcpy);
92+
#endif
93+
7994
extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
8095
extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
81-
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
82-
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
83-
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
8496
extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
8597
extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
8698
extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
8799
extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
88100
extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
101+
89102
#else
90103

91104
#if defined(__SANITIZE_MEMORY__)
@@ -110,6 +123,7 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
110123
#define __underlying_strlen __builtin_strlen
111124
#define __underlying_strncat __builtin_strncat
112125
#define __underlying_strncpy __builtin_strncpy
126+
113127
#endif
114128

115129
/**

lib/alloc_tag.c

+32-15
Original file line numberDiff line numberDiff line change
@@ -16,47 +16,60 @@ EXPORT_SYMBOL(_shared_alloc_tag);
1616
DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
1717
mem_alloc_profiling_key);
1818

19+
struct allocinfo_private {
20+
struct codetag_iterator iter;
21+
bool print_header;
22+
};
23+
1924
static void *allocinfo_start(struct seq_file *m, loff_t *pos)
2025
{
21-
struct codetag_iterator *iter;
26+
struct allocinfo_private *priv;
2227
struct codetag *ct;
2328
loff_t node = *pos;
2429

25-
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
26-
m->private = iter;
27-
if (!iter)
30+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
31+
m->private = priv;
32+
if (!priv)
2833
return NULL;
2934

35+
priv->print_header = (node == 0);
3036
codetag_lock_module_list(alloc_tag_cttype, true);
31-
*iter = codetag_get_ct_iter(alloc_tag_cttype);
32-
while ((ct = codetag_next_ct(iter)) != NULL && node)
37+
priv->iter = codetag_get_ct_iter(alloc_tag_cttype);
38+
while ((ct = codetag_next_ct(&priv->iter)) != NULL && node)
3339
node--;
3440

35-
return ct ? iter : NULL;
41+
return ct ? priv : NULL;
3642
}
3743

3844
static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos)
3945
{
40-
struct codetag_iterator *iter = (struct codetag_iterator *)arg;
41-
struct codetag *ct = codetag_next_ct(iter);
46+
struct allocinfo_private *priv = (struct allocinfo_private *)arg;
47+
struct codetag *ct = codetag_next_ct(&priv->iter);
4248

4349
(*pos)++;
4450
if (!ct)
4551
return NULL;
4652

47-
return iter;
53+
return priv;
4854
}
4955

5056
static void allocinfo_stop(struct seq_file *m, void *arg)
5157
{
52-
struct codetag_iterator *iter = (struct codetag_iterator *)m->private;
58+
struct allocinfo_private *priv = (struct allocinfo_private *)m->private;
5359

54-
if (iter) {
60+
if (priv) {
5561
codetag_lock_module_list(alloc_tag_cttype, false);
56-
kfree(iter);
62+
kfree(priv);
5763
}
5864
}
5965

66+
static void print_allocinfo_header(struct seq_buf *buf)
67+
{
68+
/* Output format version, so we can change it. */
69+
seq_buf_printf(buf, "allocinfo - version: 1.0\n");
70+
seq_buf_printf(buf, "# <size> <calls> <tag info>\n");
71+
}
72+
6073
static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
6174
{
6275
struct alloc_tag *tag = ct_to_alloc_tag(ct);
@@ -71,13 +84,17 @@ static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
7184

7285
static int allocinfo_show(struct seq_file *m, void *arg)
7386
{
74-
struct codetag_iterator *iter = (struct codetag_iterator *)arg;
87+
struct allocinfo_private *priv = (struct allocinfo_private *)arg;
7588
char *bufp;
7689
size_t n = seq_get_buf(m, &bufp);
7790
struct seq_buf buf;
7891

7992
seq_buf_init(&buf, bufp, n);
80-
alloc_tag_to_text(&buf, iter->ct);
93+
if (priv->print_header) {
94+
print_allocinfo_header(&buf);
95+
priv->print_header = false;
96+
}
97+
alloc_tag_to_text(&buf, priv->iter.ct);
8198
seq_commit(m, seq_buf_used(&buf));
8299
return 0;
83100
}

mm/ksm.c

+2-1
Original file line numberDiff line numberDiff line change
@@ -2153,7 +2153,6 @@ static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
21532153

21542154
INIT_HLIST_HEAD(&stable_node_dup->hlist);
21552155
stable_node_dup->kpfn = kpfn;
2156-
folio_set_stable_node(kfolio, stable_node_dup);
21572156
stable_node_dup->rmap_hlist_len = 0;
21582157
DO_NUMA(stable_node_dup->nid = nid);
21592158
if (!need_chain) {
@@ -2172,6 +2171,8 @@ static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
21722171
stable_node_chain_add_dup(stable_node_dup, stable_node);
21732172
}
21742173

2174+
folio_set_stable_node(kfolio, stable_node_dup);
2175+
21752176
return stable_node_dup;
21762177
}
21772178

mm/memory-failure.c

+9-2
Original file line numberDiff line numberDiff line change
@@ -1221,7 +1221,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
12211221
* subpages.
12221222
*/
12231223
folio_put(folio);
1224-
if (__page_handle_poison(p) >= 0) {
1224+
if (__page_handle_poison(p) > 0) {
12251225
page_ref_inc(p);
12261226
res = MF_RECOVERED;
12271227
} else {
@@ -2091,7 +2091,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
20912091
*/
20922092
if (res == 0) {
20932093
folio_unlock(folio);
2094-
if (__page_handle_poison(p) >= 0) {
2094+
if (__page_handle_poison(p) > 0) {
20952095
page_ref_inc(p);
20962096
res = MF_RECOVERED;
20972097
} else {
@@ -2546,6 +2546,13 @@ int unpoison_memory(unsigned long pfn)
25462546
goto unlock_mutex;
25472547
}
25482548

2549+
if (is_huge_zero_folio(folio)) {
2550+
unpoison_pr_info("Unpoison: huge zero page is not supported %#lx\n",
2551+
pfn, &unpoison_rs);
2552+
ret = -EOPNOTSUPP;
2553+
goto unlock_mutex;
2554+
}
2555+
25492556
if (!PageHWPoison(p)) {
25502557
unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
25512558
pfn, &unpoison_rs);

0 commit comments

Comments
 (0)