Skip to content

Commit 429ed0a

Browse files
VMoolagregkh
authored andcommitted
mm/khugepaged: convert hpage_collapse_scan_pmd() to use folios
[ Upstream commit 5c07ebb ] Replaces 5 calls to compound_head(), and removes 1385 bytes of kernel text. Link: https://lkml.kernel.org/r/20231020183331.10770-3-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Reviewed-by: Rik van Riel <riel@surriel.com> Reviewed-by: Yang Shi <shy828301@gmail.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Stable-dep-of: 394bfac ("mm/khugepaged: fix the address passed to notifier on testing young") Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 2bae1bf commit 429ed0a

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

mm/khugepaged.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1140,6 +1140,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
11401140
int result = SCAN_FAIL, referenced = 0;
11411141
int none_or_zero = 0, shared = 0;
11421142
struct page *page = NULL;
1143+
struct folio *folio = NULL;
11431144
unsigned long _address;
11441145
spinlock_t *ptl;
11451146
int node = NUMA_NO_NODE, unmapped = 0;
@@ -1221,29 +1222,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
12211222
}
12221223
}
12231224

1224-
page = compound_head(page);
1225-
1225+
folio = page_folio(page);
12261226
/*
12271227
* Record which node the original page is from and save this
12281228
* information to cc->node_load[].
12291229
* Khugepaged will allocate hugepage from the node has the max
12301230
* hit record.
12311231
*/
1232-
node = page_to_nid(page);
1232+
node = folio_nid(folio);
12331233
if (hpage_collapse_scan_abort(node, cc)) {
12341234
result = SCAN_SCAN_ABORT;
12351235
goto out_unmap;
12361236
}
12371237
cc->node_load[node]++;
1238-
if (!PageLRU(page)) {
1238+
if (!folio_test_lru(folio)) {
12391239
result = SCAN_PAGE_LRU;
12401240
goto out_unmap;
12411241
}
1242-
if (PageLocked(page)) {
1242+
if (folio_test_locked(folio)) {
12431243
result = SCAN_PAGE_LOCK;
12441244
goto out_unmap;
12451245
}
1246-
if (!PageAnon(page)) {
1246+
if (!folio_test_anon(folio)) {
12471247
result = SCAN_PAGE_ANON;
12481248
goto out_unmap;
12491249
}
@@ -1265,7 +1265,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
12651265
* has excessive GUP pins (i.e. 512). Anyway the same check
12661266
* will be done again later the risk seems low.
12671267
*/
1268-
if (!is_refcount_suitable(page)) {
1268+
if (!is_refcount_suitable(&folio->page)) {
12691269
result = SCAN_PAGE_COUNT;
12701270
goto out_unmap;
12711271
}
@@ -1275,8 +1275,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
12751275
* enough young pte to justify collapsing the page
12761276
*/
12771277
if (cc->is_khugepaged &&
1278-
(pte_young(pteval) || page_is_young(page) ||
1279-
PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1278+
(pte_young(pteval) || folio_test_young(folio) ||
1279+
folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
12801280
address)))
12811281
referenced++;
12821282
}
@@ -1298,7 +1298,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
12981298
*mmap_locked = false;
12991299
}
13001300
out:
1301-
trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1301+
trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
13021302
none_or_zero, result, unmapped);
13031303
return result;
13041304
}

0 commit comments

Comments
 (0)