Skip to content

Commit 188cac5

Browse files
davidhildenbrandakpm00
authored andcommitted
mm/hugetlb: enforce that PMD PT sharing has split PMD PT locks
Sharing page tables between processes but falling back to per-MM page table locks cannot possibly work. So, let's make sure that we do have split PMD locks by adding a new Kconfig option and letting that depend on CONFIG_SPLIT_PMD_PTLOCKS. Link: https://lkml.kernel.org/r/20240726150728.3159964-3-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Borislav Petkov <bp@alien8.de> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Muchun Song <muchun.song@linux.dev> Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Xu <peterx@redhat.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 394290c commit 188cac5

File tree

3 files changed

+10
-7
lines changed

3 files changed

+10
-7
lines changed

fs/Kconfig

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -288,6 +288,10 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
288288
depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
289289
depends on SPARSEMEM_VMEMMAP
290290

291+
config HUGETLB_PMD_PAGE_TABLE_SHARING
292+
def_bool HUGETLB_PAGE
293+
depends on ARCH_WANT_HUGE_PMD_SHARE && SPLIT_PMD_PTLOCKS
294+
291295
config ARCH_HAS_GIGANTIC_PAGE
292296
bool
293297

include/linux/hugetlb.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1251,7 +1251,7 @@ static inline __init void hugetlb_cma_reserve(int order)
12511251
}
12521252
#endif
12531253

1254-
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
1254+
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
12551255
static inline bool hugetlb_pmd_shared(pte_t *pte)
12561256
{
12571257
return page_count(virt_to_page(pte)) > 1;
@@ -1287,8 +1287,7 @@ bool __vma_private_lock(struct vm_area_struct *vma);
12871287
static inline pte_t *
12881288
hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
12891289
{
1290-
#if defined(CONFIG_HUGETLB_PAGE) && \
1291-
defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
1290+
#if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP)
12921291
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
12931292

12941293
/*

mm/hugetlb.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7211,7 +7211,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
72117211
return 0;
72127212
}
72137213

7214-
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7214+
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
72157215
static unsigned long page_table_shareable(struct vm_area_struct *svma,
72167216
struct vm_area_struct *vma,
72177217
unsigned long addr, pgoff_t idx)
@@ -7373,7 +7373,7 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
73737373
return 1;
73747374
}
73757375

7376-
#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7376+
#else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
73777377

73787378
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
73797379
unsigned long addr, pud_t *pud)
@@ -7396,7 +7396,7 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
73967396
{
73977397
return false;
73987398
}
7399-
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7399+
#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
74007400

74017401
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
74027402
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -7494,7 +7494,7 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
74947494
/* See description above. Architectures can provide their own version. */
74957495
__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
74967496
{
7497-
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7497+
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
74987498
if (huge_page_size(h) == PMD_SIZE)
74997499
return PUD_SIZE - PMD_SIZE;
75007500
#endif

0 commit comments

Comments
 (0)