@@ -1140,6 +1140,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1140
1140
int result = SCAN_FAIL , referenced = 0 ;
1141
1141
int none_or_zero = 0 , shared = 0 ;
1142
1142
struct page * page = NULL ;
1143
+ struct folio * folio = NULL ;
1143
1144
unsigned long _address ;
1144
1145
spinlock_t * ptl ;
1145
1146
int node = NUMA_NO_NODE , unmapped = 0 ;
@@ -1221,29 +1222,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1221
1222
}
1222
1223
}
1223
1224
1224
- page = compound_head (page );
1225
-
1225
+ folio = page_folio (page );
1226
1226
/*
1227
1227
* Record which node the original page is from and save this
1228
1228
* information to cc->node_load[].
1229
1229
* Khugepaged will allocate hugepage from the node has the max
1230
1230
* hit record.
1231
1231
*/
1232
- node = page_to_nid ( page );
1232
+ node = folio_nid ( folio );
1233
1233
if (hpage_collapse_scan_abort (node , cc )) {
1234
1234
result = SCAN_SCAN_ABORT ;
1235
1235
goto out_unmap ;
1236
1236
}
1237
1237
cc -> node_load [node ]++ ;
1238
- if (!PageLRU ( page )) {
1238
+ if (!folio_test_lru ( folio )) {
1239
1239
result = SCAN_PAGE_LRU ;
1240
1240
goto out_unmap ;
1241
1241
}
1242
- if (PageLocked ( page )) {
1242
+ if (folio_test_locked ( folio )) {
1243
1243
result = SCAN_PAGE_LOCK ;
1244
1244
goto out_unmap ;
1245
1245
}
1246
- if (!PageAnon ( page )) {
1246
+ if (!folio_test_anon ( folio )) {
1247
1247
result = SCAN_PAGE_ANON ;
1248
1248
goto out_unmap ;
1249
1249
}
@@ -1265,7 +1265,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1265
1265
* has excessive GUP pins (i.e. 512). Anyway the same check
1266
1266
* will be done again later the risk seems low.
1267
1267
*/
1268
- if (!is_refcount_suitable (page )) {
1268
+ if (!is_refcount_suitable (& folio -> page )) {
1269
1269
result = SCAN_PAGE_COUNT ;
1270
1270
goto out_unmap ;
1271
1271
}
@@ -1275,8 +1275,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1275
1275
* enough young pte to justify collapsing the page
1276
1276
*/
1277
1277
if (cc -> is_khugepaged &&
1278
- (pte_young (pteval ) || page_is_young ( page ) ||
1279
- PageReferenced ( page ) || mmu_notifier_test_young (vma -> vm_mm ,
1278
+ (pte_young (pteval ) || folio_test_young ( folio ) ||
1279
+ folio_test_referenced ( folio ) || mmu_notifier_test_young (vma -> vm_mm ,
1280
1280
address )))
1281
1281
referenced ++ ;
1282
1282
}
@@ -1298,7 +1298,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1298
1298
* mmap_locked = false;
1299
1299
}
1300
1300
out :
1301
- trace_mm_khugepaged_scan_pmd (mm , page , writable , referenced ,
1301
+ trace_mm_khugepaged_scan_pmd (mm , & folio -> page , writable , referenced ,
1302
1302
none_or_zero , result , unmapped );
1303
1303
return result ;
1304
1304
}
0 commit comments