Skip to content

Commit 3b0fc8b

Browse files
davidhildenbrandmehmetb0
authored andcommitted
mm/hugetlb: enforce that PMD PT sharing has split PMD PT locks
BugLink: https://bugs.launchpad.net/bugs/2106770 [ Upstream commit 188cac5 ] Sharing page tables between processes but falling back to per-MM page table locks cannot possibly work. So, let's make sure that we do have split PMD locks by adding a new Kconfig option and letting that depend on CONFIG_SPLIT_PMD_PTLOCKS. Link: https://lkml.kernel.org/r/20240726150728.3159964-3-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Borislav Petkov <bp@alien8.de> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Muchun Song <muchun.song@linux.dev> Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Xu <peterx@redhat.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Stable-dep-of: 59d9094 ("mm: hugetlb: independent PMD page table shared count") Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Manuel Diewald <manuel.diewald@canonical.com> Signed-off-by: Mehmet Basaran <mehmet.basaran@canonical.com>
1 parent e178482 commit 3b0fc8b

File tree

3 files changed

+10
-7
lines changed

3 files changed

+10
-7
lines changed

fs/Kconfig

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,10 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
289289
depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
290290
depends on SPARSEMEM_VMEMMAP
291291

292+
config HUGETLB_PMD_PAGE_TABLE_SHARING
293+
def_bool HUGETLB_PAGE
294+
depends on ARCH_WANT_HUGE_PMD_SHARE && SPLIT_PMD_PTLOCKS
295+
292296
config ARCH_HAS_GIGANTIC_PAGE
293297
bool
294298

include/linux/hugetlb.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1242,7 +1242,7 @@ static inline __init void hugetlb_cma_reserve(int order)
12421242
}
12431243
#endif
12441244

1245-
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
1245+
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
12461246
static inline bool hugetlb_pmd_shared(pte_t *pte)
12471247
{
12481248
return page_count(virt_to_page(pte)) > 1;
@@ -1278,8 +1278,7 @@ bool __vma_private_lock(struct vm_area_struct *vma);
12781278
static inline pte_t *
12791279
hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
12801280
{
1281-
#if defined(CONFIG_HUGETLB_PAGE) && \
1282-
defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
1281+
#if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP)
12831282
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
12841283

12851284
/*

mm/hugetlb.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7199,7 +7199,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
71997199
return 0;
72007200
}
72017201

7202-
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7202+
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
72037203
static unsigned long page_table_shareable(struct vm_area_struct *svma,
72047204
struct vm_area_struct *vma,
72057205
unsigned long addr, pgoff_t idx)
@@ -7361,7 +7361,7 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
73617361
return 1;
73627362
}
73637363

7364-
#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7364+
#else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
73657365

73667366
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
73677367
unsigned long addr, pud_t *pud)
@@ -7384,7 +7384,7 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
73847384
{
73857385
return false;
73867386
}
7387-
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7387+
#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
73887388

73897389
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
73907390
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -7482,7 +7482,7 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
74827482
/* See description above. Architectures can provide their own version. */
74837483
__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
74847484
{
7485-
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7485+
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
74867486
if (huge_page_size(h) == PMD_SIZE)
74877487
return PUD_SIZE - PMD_SIZE;
74887488
#endif

0 commit comments

Comments
 (0)