Skip to content

Commit a8cc743

Browse files
committed
Merge tag 'mm-hotfixes-stable-2024-11-03-10-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "17 hotfixes. 9 are cc:stable. 13 are MM and 4 are non-MM. The usual collection of singletons - please see the changelogs" * tag 'mm-hotfixes-stable-2024-11-03-10-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm: multi-gen LRU: use {ptep,pmdp}_clear_young_notify() mm: multi-gen LRU: remove MM_LEAF_OLD and MM_NONLEAF_TOTAL stats mm, mmap: limit THP alignment of anonymous mappings to PMD-aligned sizes mm: shrinker: avoid memleak in alloc_shrinker_info .mailmap: update e-mail address for Eugen Hristev vmscan,migrate: fix page count imbalance on node stats when demoting pages mailmap: update Jarkko's email addresses mm: allow set/clear page_type again nilfs2: fix potential deadlock with newly created symlinks Squashfs: fix variable overflow in squashfs_readpage_block kasan: remove vmalloc_percpu test tools/mm: -Werror fixes in page-types/slabinfo mm, swap: avoid over reclaim of full clusters mm: fix PSWPIN counter for large folios swap-in mm: avoid VM_BUG_ON when try to map an anon large folio to zero page. mm/codetag: fix null pointer check logic for ref and tag mm/gup: stop leaking pinned pages in low memory conditions
2 parents d5aaa0b + 1d4832b commit a8cc743

18 files changed

+159
-143
lines changed

.mailmap

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,8 @@ Elliot Berman <[email protected]> <[email protected]>
199199
Enric Balletbo i Serra <[email protected]> <[email protected]>
200200
Enric Balletbo i Serra <[email protected]> <[email protected]>
201201
202-
202+
203+
203204
Evgeniy Polyakov <[email protected]>
204205
205206
@@ -282,7 +283,7 @@ Jan Glauber <[email protected]> <[email protected]>
282283
283284
284285
285-
Jarkko Sakkinen <[email protected]> <jarkko.sakkinen@tuni.fi>
286+
Jarkko Sakkinen <[email protected]> <jarkko.sakkinen@parity.io>
286287
287288
288289

fs/nilfs2/namei.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,9 @@ static int nilfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
157157
/* slow symlink */
158158
inode->i_op = &nilfs_symlink_inode_operations;
159159
inode_nohighmem(inode);
160+
mapping_set_gfp_mask(inode->i_mapping,
161+
mapping_gfp_constraint(inode->i_mapping,
162+
~__GFP_FS));
160163
inode->i_mapping->a_ops = &nilfs_aops;
161164
err = page_symlink(inode, symname, l);
162165
if (err)

fs/squashfs/file_direct.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,8 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
3030
int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
3131
loff_t start_index = folio->index & ~mask;
3232
loff_t end_index = start_index | mask;
33-
int i, n, pages, bytes, res = -ENOMEM;
33+
loff_t index;
34+
int i, pages, bytes, res = -ENOMEM;
3435
struct page **page, *last_page;
3536
struct squashfs_page_actor *actor;
3637
void *pageaddr;
@@ -45,9 +46,9 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
4546
return res;
4647

4748
/* Try to grab all the pages covered by the Squashfs block */
48-
for (i = 0, n = start_index; n <= end_index; n++) {
49-
page[i] = (n == folio->index) ? target_page :
50-
grab_cache_page_nowait(target_page->mapping, n);
49+
for (i = 0, index = start_index; index <= end_index; index++) {
50+
page[i] = (index == folio->index) ? target_page :
51+
grab_cache_page_nowait(target_page->mapping, index);
5152

5253
if (page[i] == NULL)
5354
continue;

include/linux/alloc_tag.h

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -135,31 +135,35 @@ static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
135135
#endif
136136

137137
/* Caller should verify both ref and tag to be valid */
138-
static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
138+
static inline bool __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
139139
{
140140
alloc_tag_add_check(ref, tag);
141141
if (!ref || !tag)
142-
return;
142+
return false;
143143

144144
ref->ct = &tag->ct;
145+
return true;
145146
}
146147

147-
static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
148+
static inline bool alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
148149
{
149-
__alloc_tag_ref_set(ref, tag);
150+
if (unlikely(!__alloc_tag_ref_set(ref, tag)))
151+
return false;
152+
150153
/*
151154
* We need in increment the call counter every time we have a new
152155
* allocation or when we split a large allocation into smaller ones.
153156
* Each new reference for every sub-allocation needs to increment call
154157
* counter because when we free each part the counter will be decremented.
155158
*/
156159
this_cpu_inc(tag->counters->calls);
160+
return true;
157161
}
158162

159163
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
160164
{
161-
alloc_tag_ref_set(ref, tag);
162-
this_cpu_add(tag->counters->bytes, bytes);
165+
if (likely(alloc_tag_ref_set(ref, tag)))
166+
this_cpu_add(tag->counters->bytes, bytes);
163167
}
164168

165169
static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)

include/linux/mmzone.h

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -458,9 +458,7 @@ struct lru_gen_folio {
458458

459459
enum {
460460
MM_LEAF_TOTAL, /* total leaf entries */
461-
MM_LEAF_OLD, /* old leaf entries */
462461
MM_LEAF_YOUNG, /* young leaf entries */
463-
MM_NONLEAF_TOTAL, /* total non-leaf entries */
464462
MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
465463
MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
466464
NR_MM_STATS
@@ -557,7 +555,7 @@ struct lru_gen_memcg {
557555

558556
void lru_gen_init_pgdat(struct pglist_data *pgdat);
559557
void lru_gen_init_lruvec(struct lruvec *lruvec);
560-
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
558+
bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
561559

562560
void lru_gen_init_memcg(struct mem_cgroup *memcg);
563561
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
@@ -576,8 +574,9 @@ static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
576574
{
577575
}
578576

579-
static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
577+
static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
580578
{
579+
return false;
581580
}
582581

583582
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)

include/linux/page-flags.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -975,12 +975,16 @@ static __always_inline bool folio_test_##fname(const struct folio *folio) \
975975
} \
976976
static __always_inline void __folio_set_##fname(struct folio *folio) \
977977
{ \
978+
if (folio_test_##fname(folio)) \
979+
return; \
978980
VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
979981
folio); \
980982
folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
981983
} \
982984
static __always_inline void __folio_clear_##fname(struct folio *folio) \
983985
{ \
986+
if (folio->page.page_type == UINT_MAX) \
987+
return; \
984988
VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
985989
folio->page.page_type = UINT_MAX; \
986990
}
@@ -993,11 +997,15 @@ static __always_inline int Page##uname(const struct page *page) \
993997
} \
994998
static __always_inline void __SetPage##uname(struct page *page) \
995999
{ \
1000+
if (Page##uname(page)) \
1001+
return; \
9961002
VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
9971003
page->page_type = (unsigned int)PGTY_##lname << 24; \
9981004
} \
9991005
static __always_inline void __ClearPage##uname(struct page *page) \
10001006
{ \
1007+
if (page->page_type == UINT_MAX) \
1008+
return; \
10011009
VM_BUG_ON_PAGE(!Page##uname(page), page); \
10021010
page->page_type = UINT_MAX; \
10031011
}

include/linux/swap.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -335,6 +335,7 @@ struct swap_info_struct {
335335
* list.
336336
*/
337337
struct work_struct discard_work; /* discard worker */
338+
struct work_struct reclaim_work; /* reclaim worker */
338339
struct list_head discard_clusters; /* discard clusters list */
339340
struct plist_node avail_lists[]; /*
340341
* entries in swap_avail_heads, one

mm/gup.c

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2394,20 +2394,25 @@ static int migrate_longterm_unpinnable_folios(
23942394
}
23952395

23962396
/*
2397-
* Check whether all folios are *allowed* to be pinned indefinitely (longterm).
2397+
* Check whether all folios are *allowed* to be pinned indefinitely (long term).
23982398
* Rather confusingly, all folios in the range are required to be pinned via
23992399
* FOLL_PIN, before calling this routine.
24002400
*
2401-
* If any folios in the range are not allowed to be pinned, then this routine
2402-
* will migrate those folios away, unpin all the folios in the range and return
2403-
* -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then
2404-
* call this routine again.
2401+
* Return values:
24052402
*
2406-
* If an error other than -EAGAIN occurs, this indicates a migration failure.
2407-
* The caller should give up, and propagate the error back up the call stack.
2408-
*
2409-
* If everything is OK and all folios in the range are allowed to be pinned,
2403+
* 0: if everything is OK and all folios in the range are allowed to be pinned,
24102404
* then this routine leaves all folios pinned and returns zero for success.
2405+
*
2406+
* -EAGAIN: if any folios in the range are not allowed to be pinned, then this
2407+
* routine will migrate those folios away, unpin all the folios in the range. If
2408+
* migration of the entire set of folios succeeds, then -EAGAIN is returned. The
2409+
* caller should re-pin the entire range with FOLL_PIN and then call this
2410+
* routine again.
2411+
*
2412+
* -ENOMEM, or any other -errno: if an error *other* than -EAGAIN occurs, this
2413+
* indicates a migration failure. The caller should give up, and propagate the
2414+
* error back up the call stack. The caller does not need to unpin any folios in
2415+
* that case, because this routine will do the unpinning.
24112416
*/
24122417
static long check_and_migrate_movable_folios(unsigned long nr_folios,
24132418
struct folio **folios)
@@ -2425,10 +2430,8 @@ static long check_and_migrate_movable_folios(unsigned long nr_folios,
24252430
}
24262431

24272432
/*
2428-
* This routine just converts all the pages in the @pages array to folios and
2429-
* calls check_and_migrate_movable_folios() to do the heavy lifting.
2430-
*
2431-
* Please see the check_and_migrate_movable_folios() documentation for details.
2433+
* Return values and behavior are the same as those for
2434+
* check_and_migrate_movable_folios().
24322435
*/
24332436
static long check_and_migrate_movable_pages(unsigned long nr_pages,
24342437
struct page **pages)
@@ -2437,8 +2440,10 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
24372440
long i, ret;
24382441

24392442
folios = kmalloc_array(nr_pages, sizeof(*folios), GFP_KERNEL);
2440-
if (!folios)
2443+
if (!folios) {
2444+
unpin_user_pages(pages, nr_pages);
24412445
return -ENOMEM;
2446+
}
24422447

24432448
for (i = 0; i < nr_pages; i++)
24442449
folios[i] = page_folio(pages[i]);

mm/kasan/kasan_test_c.c

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1810,32 +1810,6 @@ static void vm_map_ram_tags(struct kunit *test)
18101810
free_pages((unsigned long)p_ptr, 1);
18111811
}
18121812

1813-
static void vmalloc_percpu(struct kunit *test)
1814-
{
1815-
char __percpu *ptr;
1816-
int cpu;
1817-
1818-
/*
1819-
* This test is specifically crafted for the software tag-based mode,
1820-
* the only tag-based mode that poisons percpu mappings.
1821-
*/
1822-
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1823-
1824-
ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
1825-
1826-
for_each_possible_cpu(cpu) {
1827-
char *c_ptr = per_cpu_ptr(ptr, cpu);
1828-
1829-
KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN);
1830-
KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL);
1831-
1832-
/* Make sure that in-bounds accesses don't crash the kernel. */
1833-
*c_ptr = 0;
1834-
}
1835-
1836-
free_percpu(ptr);
1837-
}
1838-
18391813
/*
18401814
* Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
18411815
* KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
@@ -2023,7 +1997,6 @@ static struct kunit_case kasan_kunit_test_cases[] = {
20231997
KUNIT_CASE(vmalloc_oob),
20241998
KUNIT_CASE(vmap_tags),
20251999
KUNIT_CASE(vm_map_ram_tags),
2026-
KUNIT_CASE(vmalloc_percpu),
20272000
KUNIT_CASE(match_all_not_assigned),
20282001
KUNIT_CASE(match_all_ptr_tag),
20292002
KUNIT_CASE(match_all_mem_tag),

mm/migrate.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,8 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
206206
pte_t newpte;
207207
void *addr;
208208

209-
VM_BUG_ON_PAGE(PageCompound(page), page);
209+
if (PageCompound(page))
210+
return false;
210211
VM_BUG_ON_PAGE(!PageAnon(page), page);
211212
VM_BUG_ON_PAGE(!PageLocked(page), page);
212213
VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page);
@@ -1177,7 +1178,7 @@ static void migrate_folio_done(struct folio *src,
11771178
* not accounted to NR_ISOLATED_*. They can be recognized
11781179
* as __folio_test_movable
11791180
*/
1180-
if (likely(!__folio_test_movable(src)))
1181+
if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION)
11811182
mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
11821183
folio_is_file_lru(src), -folio_nr_pages(src));
11831184

mm/mmap.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -900,7 +900,8 @@ __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
900900

901901
if (get_area) {
902902
addr = get_area(file, addr, len, pgoff, flags);
903-
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
903+
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)
904+
&& IS_ALIGNED(len, PMD_SIZE)) {
904905
/* Ensures that larger anonymous mappings are THP aligned. */
905906
addr = thp_get_unmapped_area_vmflags(file, addr, len,
906907
pgoff, flags, vm_flags);

mm/page_io.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -570,7 +570,7 @@ static void swap_read_folio_bdev_sync(struct folio *folio,
570570
* attempt to access it in the page fault retry time check.
571571
*/
572572
get_task_struct(current);
573-
count_vm_event(PSWPIN);
573+
count_vm_events(PSWPIN, folio_nr_pages(folio));
574574
submit_bio_wait(&bio);
575575
__end_swap_bio_read(&bio);
576576
put_task_struct(current);
@@ -585,7 +585,7 @@ static void swap_read_folio_bdev_async(struct folio *folio,
585585
bio->bi_iter.bi_sector = swap_folio_sector(folio);
586586
bio->bi_end_io = end_swap_bio_read;
587587
bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
588-
count_vm_event(PSWPIN);
588+
count_vm_events(PSWPIN, folio_nr_pages(folio));
589589
submit_bio(bio);
590590
}
591591

mm/rmap.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -885,13 +885,10 @@ static bool folio_referenced_one(struct folio *folio,
885885
return false;
886886
}
887887

888-
if (pvmw.pte) {
889-
if (lru_gen_enabled() &&
890-
pte_young(ptep_get(pvmw.pte))) {
891-
lru_gen_look_around(&pvmw);
888+
if (lru_gen_enabled() && pvmw.pte) {
889+
if (lru_gen_look_around(&pvmw))
892890
referenced++;
893-
}
894-
891+
} else if (pvmw.pte) {
895892
if (ptep_clear_flush_young_notify(vma, address,
896893
pvmw.pte))
897894
referenced++;

mm/shrinker.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -76,19 +76,21 @@ void free_shrinker_info(struct mem_cgroup *memcg)
7676

7777
int alloc_shrinker_info(struct mem_cgroup *memcg)
7878
{
79-
struct shrinker_info *info;
8079
int nid, ret = 0;
8180
int array_size = 0;
8281

8382
mutex_lock(&shrinker_mutex);
8483
array_size = shrinker_unit_size(shrinker_nr_max);
8584
for_each_node(nid) {
86-
info = kvzalloc_node(sizeof(*info) + array_size, GFP_KERNEL, nid);
85+
struct shrinker_info *info = kvzalloc_node(sizeof(*info) + array_size,
86+
GFP_KERNEL, nid);
8787
if (!info)
8888
goto err;
8989
info->map_nr_max = shrinker_nr_max;
90-
if (shrinker_unit_alloc(info, NULL, nid))
90+
if (shrinker_unit_alloc(info, NULL, nid)) {
91+
kvfree(info);
9192
goto err;
93+
}
9294
rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
9395
}
9496
mutex_unlock(&shrinker_mutex);

0 commit comments

Comments
 (0)