Skip to content
/ linux Public
forked from torvalds/linux

Commit

Permalink
Merge tag 'mm-hotfixes-stable-2024-11-03-10-50' of git://git.kernel.o…
Browse files Browse the repository at this point in the history
…rg/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "17 hotfixes.  9 are cc:stable.  13 are MM and 4 are non-MM.

  The usual collection of singletons - please see the changelogs"

* tag 'mm-hotfixes-stable-2024-11-03-10-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm: multi-gen LRU: use {ptep,pmdp}_clear_young_notify()
  mm: multi-gen LRU: remove MM_LEAF_OLD and MM_NONLEAF_TOTAL stats
  mm, mmap: limit THP alignment of anonymous mappings to PMD-aligned sizes
  mm: shrinker: avoid memleak in alloc_shrinker_info
  .mailmap: update e-mail address for Eugen Hristev
  vmscan,migrate: fix page count imbalance on node stats when demoting pages
  mailmap: update Jarkko's email addresses
  mm: allow set/clear page_type again
  nilfs2: fix potential deadlock with newly created symlinks
  Squashfs: fix variable overflow in squashfs_readpage_block
  kasan: remove vmalloc_percpu test
  tools/mm: -Werror fixes in page-types/slabinfo
  mm, swap: avoid over reclaim of full clusters
  mm: fix PSWPIN counter for large folios swap-in
  mm: avoid VM_BUG_ON when try to map an anon large folio to zero page.
  mm/codetag: fix null pointer check logic for ref and tag
  mm/gup: stop leaking pinned pages in low memory conditions
  • Loading branch information
torvalds committed Nov 3, 2024
2 parents d5aaa0b + 1d4832b commit a8cc743
Show file tree
Hide file tree
Showing 18 changed files with 159 additions and 143 deletions.
5 changes: 3 additions & 2 deletions .mailmap
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,8 @@ Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org>
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
Enric Balletbo i Serra <eballetbo@kernel.org> <eballetbo@iseebcn.com>
Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
Eugen Hristev <eugen.hristev@collabora.com> <eugen.hristev@microchip.com>
Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@microchip.com>
Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@collabora.com>
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> <ezequiel@collabora.com>
Faith Ekstrand <faith.ekstrand@collabora.com> <jason@jlekstrand.net>
Expand Down Expand Up @@ -282,7 +283,7 @@ Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
Jan Kuliga <jtkuliga.kdev@gmail.com> <jankul@alatek.krakow.pl>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@tuni.fi>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@parity.io>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
Expand Down
3 changes: 3 additions & 0 deletions fs/nilfs2/namei.c
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,9 @@ static int nilfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
/* slow symlink */
inode->i_op = &nilfs_symlink_inode_operations;
inode_nohighmem(inode);
mapping_set_gfp_mask(inode->i_mapping,
mapping_gfp_constraint(inode->i_mapping,
~__GFP_FS));
inode->i_mapping->a_ops = &nilfs_aops;
err = page_symlink(inode, symname, l);
if (err)
Expand Down
9 changes: 5 additions & 4 deletions fs/squashfs/file_direct.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
loff_t start_index = folio->index & ~mask;
loff_t end_index = start_index | mask;
int i, n, pages, bytes, res = -ENOMEM;
loff_t index;
int i, pages, bytes, res = -ENOMEM;
struct page **page, *last_page;
struct squashfs_page_actor *actor;
void *pageaddr;
Expand All @@ -45,9 +46,9 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
return res;

/* Try to grab all the pages covered by the Squashfs block */
for (i = 0, n = start_index; n <= end_index; n++) {
page[i] = (n == folio->index) ? target_page :
grab_cache_page_nowait(target_page->mapping, n);
for (i = 0, index = start_index; index <= end_index; index++) {
page[i] = (index == folio->index) ? target_page :
grab_cache_page_nowait(target_page->mapping, index);

if (page[i] == NULL)
continue;
Expand Down
16 changes: 10 additions & 6 deletions include/linux/alloc_tag.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,31 +135,35 @@ static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
#endif

/* Caller should verify both ref and tag to be valid */
static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
static inline bool __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
{
alloc_tag_add_check(ref, tag);
if (!ref || !tag)
return;
return false;

ref->ct = &tag->ct;
return true;
}

static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
static inline bool alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
{
__alloc_tag_ref_set(ref, tag);
if (unlikely(!__alloc_tag_ref_set(ref, tag)))
return false;

/*
* We need in increment the call counter every time we have a new
* allocation or when we split a large allocation into smaller ones.
* Each new reference for every sub-allocation needs to increment call
* counter because when we free each part the counter will be decremented.
*/
this_cpu_inc(tag->counters->calls);
return true;
}

static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
{
alloc_tag_ref_set(ref, tag);
this_cpu_add(tag->counters->bytes, bytes);
if (likely(alloc_tag_ref_set(ref, tag)))
this_cpu_add(tag->counters->bytes, bytes);
}

static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
Expand Down
7 changes: 3 additions & 4 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -458,9 +458,7 @@ struct lru_gen_folio {

enum {
MM_LEAF_TOTAL, /* total leaf entries */
MM_LEAF_OLD, /* old leaf entries */
MM_LEAF_YOUNG, /* young leaf entries */
MM_NONLEAF_TOTAL, /* total non-leaf entries */
MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
NR_MM_STATS
Expand Down Expand Up @@ -557,7 +555,7 @@ struct lru_gen_memcg {

void lru_gen_init_pgdat(struct pglist_data *pgdat);
void lru_gen_init_lruvec(struct lruvec *lruvec);
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw);

void lru_gen_init_memcg(struct mem_cgroup *memcg);
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
Expand All @@ -576,8 +574,9 @@ static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
{
}

static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
{
return false;
}

static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
Expand Down
8 changes: 8 additions & 0 deletions include/linux/page-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -975,12 +975,16 @@ static __always_inline bool folio_test_##fname(const struct folio *folio) \
} \
static __always_inline void __folio_set_##fname(struct folio *folio) \
{ \
if (folio_test_##fname(folio)) \
return; \
VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
folio); \
folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
} \
static __always_inline void __folio_clear_##fname(struct folio *folio) \
{ \
if (folio->page.page_type == UINT_MAX) \
return; \
VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
folio->page.page_type = UINT_MAX; \
}
Expand All @@ -993,11 +997,15 @@ static __always_inline int Page##uname(const struct page *page) \
} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
if (Page##uname(page)) \
return; \
VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
page->page_type = (unsigned int)PGTY_##lname << 24; \
} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
if (page->page_type == UINT_MAX) \
return; \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
page->page_type = UINT_MAX; \
}
Expand Down
1 change: 1 addition & 0 deletions include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -335,6 +335,7 @@ struct swap_info_struct {
* list.
*/
struct work_struct discard_work; /* discard worker */
struct work_struct reclaim_work; /* reclaim worker */
struct list_head discard_clusters; /* discard clusters list */
struct plist_node avail_lists[]; /*
* entries in swap_avail_heads, one
Expand Down
33 changes: 19 additions & 14 deletions mm/gup.c
Original file line number Diff line number Diff line change
Expand Up @@ -2394,20 +2394,25 @@ static int migrate_longterm_unpinnable_folios(
}

/*
* Check whether all folios are *allowed* to be pinned indefinitely (longterm).
* Check whether all folios are *allowed* to be pinned indefinitely (long term).
* Rather confusingly, all folios in the range are required to be pinned via
* FOLL_PIN, before calling this routine.
*
* If any folios in the range are not allowed to be pinned, then this routine
* will migrate those folios away, unpin all the folios in the range and return
* -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then
* call this routine again.
* Return values:
*
* If an error other than -EAGAIN occurs, this indicates a migration failure.
* The caller should give up, and propagate the error back up the call stack.
*
* If everything is OK and all folios in the range are allowed to be pinned,
* 0: if everything is OK and all folios in the range are allowed to be pinned,
* then this routine leaves all folios pinned and returns zero for success.
*
* -EAGAIN: if any folios in the range are not allowed to be pinned, then this
* routine will migrate those folios away, unpin all the folios in the range. If
* migration of the entire set of folios succeeds, then -EAGAIN is returned. The
* caller should re-pin the entire range with FOLL_PIN and then call this
* routine again.
*
* -ENOMEM, or any other -errno: if an error *other* than -EAGAIN occurs, this
* indicates a migration failure. The caller should give up, and propagate the
* error back up the call stack. The caller does not need to unpin any folios in
* that case, because this routine will do the unpinning.
*/
static long check_and_migrate_movable_folios(unsigned long nr_folios,
struct folio **folios)
Expand All @@ -2425,10 +2430,8 @@ static long check_and_migrate_movable_folios(unsigned long nr_folios,
}

/*
* This routine just converts all the pages in the @pages array to folios and
* calls check_and_migrate_movable_folios() to do the heavy lifting.
*
* Please see the check_and_migrate_movable_folios() documentation for details.
* Return values and behavior are the same as those for
* check_and_migrate_movable_folios().
*/
static long check_and_migrate_movable_pages(unsigned long nr_pages,
struct page **pages)
Expand All @@ -2437,8 +2440,10 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
long i, ret;

folios = kmalloc_array(nr_pages, sizeof(*folios), GFP_KERNEL);
if (!folios)
if (!folios) {
unpin_user_pages(pages, nr_pages);
return -ENOMEM;
}

for (i = 0; i < nr_pages; i++)
folios[i] = page_folio(pages[i]);
Expand Down
27 changes: 0 additions & 27 deletions mm/kasan/kasan_test_c.c
Original file line number Diff line number Diff line change
Expand Up @@ -1810,32 +1810,6 @@ static void vm_map_ram_tags(struct kunit *test)
free_pages((unsigned long)p_ptr, 1);
}

static void vmalloc_percpu(struct kunit *test)
{
char __percpu *ptr;
int cpu;

/*
* This test is specifically crafted for the software tag-based mode,
* the only tag-based mode that poisons percpu mappings.
*/
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);

ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);

for_each_possible_cpu(cpu) {
char *c_ptr = per_cpu_ptr(ptr, cpu);

KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN);
KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL);

/* Make sure that in-bounds accesses don't crash the kernel. */
*c_ptr = 0;
}

free_percpu(ptr);
}

/*
* Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
* KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
Expand Down Expand Up @@ -2023,7 +1997,6 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(vmalloc_oob),
KUNIT_CASE(vmap_tags),
KUNIT_CASE(vm_map_ram_tags),
KUNIT_CASE(vmalloc_percpu),
KUNIT_CASE(match_all_not_assigned),
KUNIT_CASE(match_all_ptr_tag),
KUNIT_CASE(match_all_mem_tag),
Expand Down
5 changes: 3 additions & 2 deletions mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,8 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
pte_t newpte;
void *addr;

VM_BUG_ON_PAGE(PageCompound(page), page);
if (PageCompound(page))
return false;
VM_BUG_ON_PAGE(!PageAnon(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page);
Expand Down Expand Up @@ -1177,7 +1178,7 @@ static void migrate_folio_done(struct folio *src,
* not accounted to NR_ISOLATED_*. They can be recognized
* as __folio_test_movable
*/
if (likely(!__folio_test_movable(src)))
if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION)
mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
folio_is_file_lru(src), -folio_nr_pages(src));

Expand Down
3 changes: 2 additions & 1 deletion mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -900,7 +900,8 @@ __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,

if (get_area) {
addr = get_area(file, addr, len, pgoff, flags);
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)
&& IS_ALIGNED(len, PMD_SIZE)) {
/* Ensures that larger anonymous mappings are THP aligned. */
addr = thp_get_unmapped_area_vmflags(file, addr, len,
pgoff, flags, vm_flags);
Expand Down
4 changes: 2 additions & 2 deletions mm/page_io.c
Original file line number Diff line number Diff line change
Expand Up @@ -570,7 +570,7 @@ static void swap_read_folio_bdev_sync(struct folio *folio,
* attempt to access it in the page fault retry time check.
*/
get_task_struct(current);
count_vm_event(PSWPIN);
count_vm_events(PSWPIN, folio_nr_pages(folio));
submit_bio_wait(&bio);
__end_swap_bio_read(&bio);
put_task_struct(current);
Expand All @@ -585,7 +585,7 @@ static void swap_read_folio_bdev_async(struct folio *folio,
bio->bi_iter.bi_sector = swap_folio_sector(folio);
bio->bi_end_io = end_swap_bio_read;
bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
count_vm_event(PSWPIN);
count_vm_events(PSWPIN, folio_nr_pages(folio));
submit_bio(bio);
}

Expand Down
9 changes: 3 additions & 6 deletions mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -885,13 +885,10 @@ static bool folio_referenced_one(struct folio *folio,
return false;
}

if (pvmw.pte) {
if (lru_gen_enabled() &&
pte_young(ptep_get(pvmw.pte))) {
lru_gen_look_around(&pvmw);
if (lru_gen_enabled() && pvmw.pte) {
if (lru_gen_look_around(&pvmw))
referenced++;
}

} else if (pvmw.pte) {
if (ptep_clear_flush_young_notify(vma, address,
pvmw.pte))
referenced++;
Expand Down
8 changes: 5 additions & 3 deletions mm/shrinker.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,19 +76,21 @@ void free_shrinker_info(struct mem_cgroup *memcg)

int alloc_shrinker_info(struct mem_cgroup *memcg)
{
struct shrinker_info *info;
int nid, ret = 0;
int array_size = 0;

mutex_lock(&shrinker_mutex);
array_size = shrinker_unit_size(shrinker_nr_max);
for_each_node(nid) {
info = kvzalloc_node(sizeof(*info) + array_size, GFP_KERNEL, nid);
struct shrinker_info *info = kvzalloc_node(sizeof(*info) + array_size,
GFP_KERNEL, nid);
if (!info)
goto err;
info->map_nr_max = shrinker_nr_max;
if (shrinker_unit_alloc(info, NULL, nid))
if (shrinker_unit_alloc(info, NULL, nid)) {
kvfree(info);
goto err;
}
rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
}
mutex_unlock(&shrinker_mutex);
Expand Down
Loading

0 comments on commit a8cc743

Please sign in to comment.