Skip to content

Commit 7b15c27

Browse files
committed
Merge tag 'core-mm-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull tlb gather updates from Ingo Molnar: "Theses fix MM (soft-)dirty bit management in the procfs code & clean up the TLB gather API" * tag 'core-mm-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/ldt: Use tlb_gather_mmu_fullmm() when freeing LDT page-tables tlb: arch: Remove empty __tlb_remove_tlb_entry() stubs tlb: mmu_gather: Remove start/end arguments from tlb_gather_mmu() tlb: mmu_gather: Introduce tlb_gather_mmu_fullmm() tlb: mmu_gather: Remove unused start/end arguments from tlb_finish_mmu() mm: proc: Invalidate TLB after clearing soft-dirty page state
2 parents 9eef023 + 8cf55f2 commit 7b15c27

File tree

14 files changed

+60
-65
lines changed

14 files changed

+60
-65
lines changed

arch/ia64/include/asm/tlb.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
* unmapping a portion of the virtual address space, these hooks are called according to
2424
* the following template:
2525
*
26-
* tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
26+
* tlb <- tlb_gather_mmu(mm); // start unmap for address space MM
2727
* {
2828
* for each vma that needs a shootdown do {
2929
* tlb_start_vma(tlb, vma);
@@ -36,7 +36,7 @@
3636
* tlb_end_vma(tlb, vma);
3737
* }
3838
* }
39-
* tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
39+
* tlb_finish_mmu(tlb); // finish unmap for address space MM
4040
*/
4141
#include <linux/mm.h>
4242
#include <linux/pagemap.h>

arch/sparc/include/asm/tlb_64.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@ void flush_tlb_pending(void);
2424

2525
#define tlb_start_vma(tlb, vma) do { } while (0)
2626
#define tlb_end_vma(tlb, vma) do { } while (0)
27-
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
2827
#define tlb_flush(tlb) flush_tlb_pending()
2928

3029
/*

arch/x86/include/asm/tlb.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55
#define tlb_start_vma(tlb, vma) do { } while (0)
66
#define tlb_end_vma(tlb, vma) do { } while (0)
7-
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
87

98
#define tlb_flush tlb_flush
109
static inline void tlb_flush(struct mmu_gather *tlb);

arch/x86/kernel/ldt.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -398,9 +398,15 @@ static void free_ldt_pgtables(struct mm_struct *mm)
398398
if (!boot_cpu_has(X86_FEATURE_PTI))
399399
return;
400400

401-
tlb_gather_mmu(&tlb, mm, start, end);
401+
/*
402+
* Although free_pgd_range() is intended for freeing user
403+
* page-tables, it also works out for kernel mappings on x86.
404+
* We use tlb_gather_mmu_fullmm() to avoid confusing the
405+
* range-tracking logic in __tlb_adjust_range().
406+
*/
407+
tlb_gather_mmu_fullmm(&tlb, mm);
402408
free_pgd_range(&tlb, start, end, start, end);
403-
tlb_finish_mmu(&tlb, start, end);
409+
tlb_finish_mmu(&tlb);
404410
#endif
405411
}
406412

fs/exec.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -708,7 +708,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
708708
return -ENOMEM;
709709

710710
lru_add_drain();
711-
tlb_gather_mmu(&tlb, mm, old_start, old_end);
711+
tlb_gather_mmu(&tlb, mm);
712712
if (new_end > old_start) {
713713
/*
714714
* when the old and new regions overlap clear from new_end.
@@ -725,7 +725,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
725725
free_pgd_range(&tlb, old_start, old_end, new_end,
726726
vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
727727
}
728-
tlb_finish_mmu(&tlb, old_start, old_end);
728+
tlb_finish_mmu(&tlb);
729729

730730
/*
731731
* Shrink the vma to just the new range. Always succeeds.

fs/proc/task_mmu.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1210,7 +1210,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
12101210
struct mm_struct *mm;
12111211
struct vm_area_struct *vma;
12121212
enum clear_refs_types type;
1213-
struct mmu_gather tlb;
12141213
int itype;
12151214
int rv;
12161215

@@ -1249,7 +1248,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
12491248
goto out_unlock;
12501249
}
12511250

1252-
tlb_gather_mmu(&tlb, mm, 0, -1);
12531251
if (type == CLEAR_REFS_SOFT_DIRTY) {
12541252
for (vma = mm->mmap; vma; vma = vma->vm_next) {
12551253
if (!(vma->vm_flags & VM_SOFTDIRTY))
@@ -1258,15 +1256,18 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
12581256
vma_set_page_prot(vma);
12591257
}
12601258

1259+
inc_tlb_flush_pending(mm);
12611260
mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
12621261
0, NULL, mm, 0, -1UL);
12631262
mmu_notifier_invalidate_range_start(&range);
12641263
}
12651264
walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
12661265
&cp);
1267-
if (type == CLEAR_REFS_SOFT_DIRTY)
1266+
if (type == CLEAR_REFS_SOFT_DIRTY) {
12681267
mmu_notifier_invalidate_range_end(&range);
1269-
tlb_finish_mmu(&tlb, 0, -1);
1268+
flush_tlb_mm(mm);
1269+
dec_tlb_flush_pending(mm);
1270+
}
12701271
out_unlock:
12711272
mmap_write_unlock(mm);
12721273
out_mm:

include/asm-generic/tlb.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,9 @@
4646
*
4747
* The mmu_gather API consists of:
4848
*
49-
* - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
49+
* - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
50+
*
51+
* start and finish a mmu_gather
5052
*
5153
* Finish in particular will issue a (final) TLB invalidate and free
5254
* all (remaining) queued pages.
@@ -91,7 +93,7 @@
9193
*
9294
* - mmu_gather::fullmm
9395
*
94-
* A flag set by tlb_gather_mmu() to indicate we're going to free
96+
* A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
9597
* the entire mm; this allows a number of optimizations.
9698
*
9799
* - We can ignore tlb_{start,end}_vma(); because we don't

include/linux/mm_types.h

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -588,10 +588,9 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
588588
}
589589

590590
struct mmu_gather;
591-
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
592-
unsigned long start, unsigned long end);
593-
extern void tlb_finish_mmu(struct mmu_gather *tlb,
594-
unsigned long start, unsigned long end);
591+
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
592+
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
593+
extern void tlb_finish_mmu(struct mmu_gather *tlb);
595594

596595
static inline void init_tlb_flush_pending(struct mm_struct *mm)
597596
{

mm/hugetlb.c

Lines changed: 2 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -4008,25 +4008,11 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
40084008
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
40094009
unsigned long end, struct page *ref_page)
40104010
{
4011-
struct mm_struct *mm;
40124011
struct mmu_gather tlb;
4013-
unsigned long tlb_start = start;
4014-
unsigned long tlb_end = end;
40154012

4016-
/*
4017-
* If shared PMDs were possibly used within this vma range, adjust
4018-
* start/end for worst case tlb flushing.
4019-
* Note that we can not be sure if PMDs are shared until we try to
4020-
* unmap pages. However, we want to make sure TLB flushing covers
4021-
* the largest possible range.
4022-
*/
4023-
adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
4024-
4025-
mm = vma->vm_mm;
4026-
4027-
tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
4013+
tlb_gather_mmu(&tlb, vma->vm_mm);
40284014
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
4029-
tlb_finish_mmu(&tlb, tlb_start, tlb_end);
4015+
tlb_finish_mmu(&tlb);
40304016
}
40314017

40324018
/*

mm/madvise.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -506,9 +506,9 @@ static long madvise_cold(struct vm_area_struct *vma,
506506
return -EINVAL;
507507

508508
lru_add_drain();
509-
tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
509+
tlb_gather_mmu(&tlb, mm);
510510
madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
511-
tlb_finish_mmu(&tlb, start_addr, end_addr);
511+
tlb_finish_mmu(&tlb);
512512

513513
return 0;
514514
}
@@ -558,9 +558,9 @@ static long madvise_pageout(struct vm_area_struct *vma,
558558
return 0;
559559

560560
lru_add_drain();
561-
tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
561+
tlb_gather_mmu(&tlb, mm);
562562
madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
563-
tlb_finish_mmu(&tlb, start_addr, end_addr);
563+
tlb_finish_mmu(&tlb);
564564

565565
return 0;
566566
}
@@ -723,7 +723,7 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
723723
range.start, range.end);
724724

725725
lru_add_drain();
726-
tlb_gather_mmu(&tlb, mm, range.start, range.end);
726+
tlb_gather_mmu(&tlb, mm);
727727
update_hiwater_rss(mm);
728728

729729
mmu_notifier_invalidate_range_start(&range);
@@ -732,7 +732,7 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
732732
&madvise_free_walk_ops, &tlb);
733733
tlb_end_vma(&tlb, vma);
734734
mmu_notifier_invalidate_range_end(&range);
735-
tlb_finish_mmu(&tlb, range.start, range.end);
735+
tlb_finish_mmu(&tlb);
736736

737737
return 0;
738738
}

0 commit comments

Comments
 (0)