Skip to content

Commit d0ce0e4

Browse files
sidkumar99akpm00
authored andcommitted
mm/hugetlb: convert hugetlb fault paths to use alloc_hugetlb_folio()
Change alloc_huge_page() to alloc_hugetlb_folio() by changing all callers to handle the now folio return type of the function. In this conversion, alloc_huge_page_vma() is also changed to alloc_hugetlb_folio_vma() and hugepage_add_new_anon_rmap() is changed to take in a folio directly. Many additions of '&folio->page' are cleaned up in subsequent patches. hugetlbfs_fallocate() is also refactored to use the RCU + page_cache_next_miss() API. Link: https://lkml.kernel.org/r/20230125170537.96973-5-sidhartha.kumar@oracle.com Suggested-by: Mike Kravetz <mike.kravetz@oracle.com> Reported-by: kernel test robot <lkp@intel.com> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent ea8e72f commit d0ce0e4

File tree

6 files changed

+133
-130
lines changed

6 files changed

+133
-130
lines changed

fs/hugetlbfs/inode.c

Lines changed: 21 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -819,8 +819,9 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
819819
* This is supposed to be the vaddr where the page is being
820820
* faulted in, but we have no vaddr here.
821821
*/
822-
struct page *page;
822+
struct folio *folio;
823823
unsigned long addr;
824+
bool present;
824825

825826
cond_resched();
826827

@@ -844,48 +845,49 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
844845
mutex_lock(&hugetlb_fault_mutex_table[hash]);
845846

846847
/* See if already present in mapping to avoid alloc/free */
847-
page = find_get_page(mapping, index);
848-
if (page) {
849-
put_page(page);
848+
rcu_read_lock();
849+
present = page_cache_next_miss(mapping, index, 1) != index;
850+
rcu_read_unlock();
851+
if (present) {
850852
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
851853
hugetlb_drop_vma_policy(&pseudo_vma);
852854
continue;
853855
}
854856

855857
/*
856-
* Allocate page without setting the avoid_reserve argument.
858+
* Allocate folio without setting the avoid_reserve argument.
857859
* There certainly are no reserves associated with the
858860
* pseudo_vma. However, there could be shared mappings with
859861
* reserves for the file at the inode level. If we fallocate
860-
* pages in these areas, we need to consume the reserves
862+
* folios in these areas, we need to consume the reserves
861863
* to keep reservation accounting consistent.
862864
*/
863-
page = alloc_huge_page(&pseudo_vma, addr, 0);
865+
folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0);
864866
hugetlb_drop_vma_policy(&pseudo_vma);
865-
if (IS_ERR(page)) {
867+
if (IS_ERR(folio)) {
866868
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
867-
error = PTR_ERR(page);
869+
error = PTR_ERR(folio);
868870
goto out;
869871
}
870-
clear_huge_page(page, addr, pages_per_huge_page(h));
871-
__SetPageUptodate(page);
872-
error = hugetlb_add_to_page_cache(page, mapping, index);
872+
clear_huge_page(&folio->page, addr, pages_per_huge_page(h));
873+
__folio_mark_uptodate(folio);
874+
error = hugetlb_add_to_page_cache(&folio->page, mapping, index);
873875
if (unlikely(error)) {
874-
restore_reserve_on_error(h, &pseudo_vma, addr, page);
875-
put_page(page);
876+
restore_reserve_on_error(h, &pseudo_vma, addr, &folio->page);
877+
folio_put(folio);
876878
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
877879
goto out;
878880
}
879881

880882
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
881883

882-
SetHPageMigratable(page);
884+
folio_set_hugetlb_migratable(folio);
883885
/*
884-
* unlock_page because locked by hugetlb_add_to_page_cache()
885-
* put_page() due to reference from alloc_huge_page()
886+
* folio_unlock because locked by hugetlb_add_to_page_cache()
887+
* folio_put() due to reference from alloc_hugetlb_folio()
886888
*/
887-
unlock_page(page);
888-
put_page(page);
889+
folio_unlock(folio);
890+
folio_put(folio);
889891
}
890892

891893
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)

include/linux/hugetlb.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -717,11 +717,11 @@ struct huge_bootmem_page {
717717
};
718718

719719
int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
720-
struct page *alloc_huge_page(struct vm_area_struct *vma,
720+
struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
721721
unsigned long addr, int avoid_reserve);
722722
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
723723
nodemask_t *nmask, gfp_t gfp_mask);
724-
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
724+
struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
725725
unsigned long address);
726726
int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
727727
pgoff_t idx);
@@ -1033,7 +1033,7 @@ static inline int isolate_or_dissolve_huge_page(struct page *page,
10331033
return -ENOMEM;
10341034
}
10351035

1036-
static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
1036+
static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
10371037
unsigned long addr,
10381038
int avoid_reserve)
10391039
{
@@ -1047,7 +1047,7 @@ alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
10471047
return NULL;
10481048
}
10491049

1050-
static inline struct page *alloc_huge_page_vma(struct hstate *h,
1050+
static inline struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
10511051
struct vm_area_struct *vma,
10521052
unsigned long address)
10531053
{

include/linux/rmap.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ void page_remove_rmap(struct page *, struct vm_area_struct *,
203203

204204
void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
205205
unsigned long address, rmap_t flags);
206-
void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
206+
void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
207207
unsigned long address);
208208

209209
static inline void __page_dup_rmap(struct page *page, bool compound)

0 commit comments

Comments
 (0)