1818void pte_frag_destroy (void * pte_frag )
1919{
2020 int count ;
21- struct page * page ;
21+ struct ptdesc * ptdesc ;
2222
23- page = virt_to_page (pte_frag );
23+ ptdesc = virt_to_ptdesc (pte_frag );
2424 /* drop all the pending references */
2525 count = ((unsigned long )pte_frag & ~PAGE_MASK ) >> PTE_FRAG_SIZE_SHIFT ;
2626 /* We allow PTE_FRAG_NR fragments from a PTE page */
27- if (atomic_sub_and_test (PTE_FRAG_NR - count , & page -> pt_frag_refcount )) {
28- pgtable_pte_page_dtor ( page );
29- __free_page ( page );
27+ if (atomic_sub_and_test (PTE_FRAG_NR - count , & ptdesc -> pt_frag_refcount )) {
28+ pagetable_pte_dtor ( ptdesc );
29+ pagetable_free ( ptdesc );
3030 }
3131}
3232
@@ -55,25 +55,25 @@ static pte_t *get_pte_from_cache(struct mm_struct *mm)
5555static pte_t * __alloc_for_ptecache (struct mm_struct * mm , int kernel )
5656{
5757 void * ret = NULL ;
58- struct page * page ;
58+ struct ptdesc * ptdesc ;
5959
6060 if (!kernel ) {
61- page = alloc_page (PGALLOC_GFP | __GFP_ACCOUNT );
62- if (!page )
61+ ptdesc = pagetable_alloc (PGALLOC_GFP | __GFP_ACCOUNT , 0 );
62+ if (!ptdesc )
6363 return NULL ;
64- if (!pgtable_pte_page_ctor ( page )) {
65- __free_page ( page );
64+ if (!pagetable_pte_ctor ( ptdesc )) {
65+ pagetable_free ( ptdesc );
6666 return NULL ;
6767 }
6868 } else {
69- page = alloc_page (PGALLOC_GFP );
70- if (!page )
69+ ptdesc = pagetable_alloc (PGALLOC_GFP , 0 );
70+ if (!ptdesc )
7171 return NULL ;
7272 }
7373
74- atomic_set (& page -> pt_frag_refcount , 1 );
74+ atomic_set (& ptdesc -> pt_frag_refcount , 1 );
7575
76- ret = page_address ( page );
76+ ret = ptdesc_address ( ptdesc );
7777 /*
7878 * if we support only one fragment just return the
7979 * allocated page.
@@ -82,12 +82,12 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
8282 return ret ;
8383 spin_lock (& mm -> page_table_lock );
8484 /*
85- * If we find pgtable_page set, we return
85+ * If we find ptdesc_page set, we return
8686 * the allocated page with single fragment
8787 * count.
8888 */
8989 if (likely (!pte_frag_get (& mm -> context ))) {
90- atomic_set (& page -> pt_frag_refcount , PTE_FRAG_NR );
90+ atomic_set (& ptdesc -> pt_frag_refcount , PTE_FRAG_NR );
9191 pte_frag_set (& mm -> context , ret + PTE_FRAG_SIZE );
9292 }
9393 spin_unlock (& mm -> page_table_lock );
@@ -108,28 +108,28 @@ pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel)
108108
109109static void pte_free_now (struct rcu_head * head )
110110{
111- struct page * page ;
111+ struct ptdesc * ptdesc ;
112112
113- page = container_of (head , struct page , rcu_head );
114- pgtable_pte_page_dtor ( page );
115- __free_page ( page );
113+ ptdesc = container_of (head , struct ptdesc , pt_rcu_head );
114+ pagetable_pte_dtor ( ptdesc );
115+ pagetable_free ( ptdesc );
116116}
117117
118118void pte_fragment_free (unsigned long * table , int kernel )
119119{
120- struct page * page = virt_to_page (table );
120+ struct ptdesc * ptdesc = virt_to_ptdesc (table );
121121
122- if (PageReserved ( page ))
123- return free_reserved_page ( page );
122+ if (pagetable_is_reserved ( ptdesc ))
123+ return free_reserved_ptdesc ( ptdesc );
124124
125- BUG_ON (atomic_read (& page -> pt_frag_refcount ) <= 0 );
126- if (atomic_dec_and_test (& page -> pt_frag_refcount )) {
125+ BUG_ON (atomic_read (& ptdesc -> pt_frag_refcount ) <= 0 );
126+ if (atomic_dec_and_test (& ptdesc -> pt_frag_refcount )) {
127127 if (kernel )
128- __free_page ( page );
129- else if (TestClearPageActive ( page ))
130- call_rcu (& page -> rcu_head , pte_free_now );
128+ pagetable_free ( ptdesc );
129+ else if (folio_test_clear_active ( ptdesc_folio ( ptdesc ) ))
130+ call_rcu (& ptdesc -> pt_rcu_head , pte_free_now );
131131 else
132- pte_free_now (& page -> rcu_head );
132+ pte_free_now (& ptdesc -> pt_rcu_head );
133133 }
134134}
135135
0 commit comments