Skip to content

Commit

Permalink
riscv: Use accessors to page table entries instead of direct dereference
Browse files Browse the repository at this point in the history
mainline inclusion
from mainline-v6.8-rc1
link:deepin-community#398

As very well explained in commit 20a004e ("arm64: mm: Use
READ_ONCE/WRITE_ONCE when accessing page tables"), an architecture whose
page table walker can modify the PTE in parallel must use
READ_ONCE()/WRITE_ONCE() macro to avoid any compiler transformation.

So apply that to riscv which is such architecture.

Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Acked-by: Anup Patel <anup@brainfault.org>
Link: https://lore.kernel.org/r/20231213203001.179237-5-alexghiti@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Signed-off-by: Han Gao <gaohan@iscas.ac.cn>
(cherry picked from commit edf9556)
Signed-off-by: Wentao Guan <guanwentao@uniontech.com>
  • Loading branch information
Alexandre Ghiti authored and opsiff committed Sep 4, 2024
1 parent a03b800 commit 416066b
Show file tree
Hide file tree
Showing 10 changed files with 128 additions and 113 deletions.
4 changes: 2 additions & 2 deletions arch/riscv/include/asm/kfence.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
pte_t *pte = virt_to_kpte(addr);

if (protect)
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~_PAGE_PRESENT));
else
set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));

flush_tlb_kernel_range(addr, addr + PAGE_SIZE);

Expand Down
16 changes: 2 additions & 14 deletions arch/riscv/include/asm/pgtable-64.h
Original file line number Diff line number Diff line change
Expand Up @@ -336,13 +336,7 @@ static inline struct page *p4d_page(p4d_t p4d)
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))

#define pud_offset pud_offset
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
if (pgtable_l4_enabled)
return p4d_pgtable(*p4d) + pud_index(address);

return (pud_t *)p4d;
}
pud_t *pud_offset(p4d_t *p4d, unsigned long address);

static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
{
Expand Down Expand Up @@ -400,12 +394,6 @@ static inline struct page *pgd_page(pgd_t pgd)
#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))

#define p4d_offset p4d_offset
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{
if (pgtable_l5_enabled)
return pgd_pgtable(*pgd) + p4d_index(address);

return (p4d_t *)pgd;
}
p4d_t *p4d_offset(pgd_t *pgd, unsigned long address);

#endif /* _ASM_RISCV_PGTABLE_64_H */
29 changes: 6 additions & 23 deletions arch/riscv/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -549,19 +549,12 @@ static inline void pte_clear(struct mm_struct *mm,
__set_pte_at(ptep, __pte(0));
}

#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
static inline int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
if (!pte_same(*ptep, entry))
__set_pte_at(ptep, entry);
/*
* update_mmu_cache will unconditionally execute, handling both
* the case that the PTE changed and the spurious fault case.
*/
return true;
}
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, pte_t entry, int dirty);
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */
extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep);

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
Expand All @@ -574,16 +567,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return pte;
}

#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pte_t *ptep)
{
if (!pte_young(*ptep))
return 0;
return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
}

#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
Expand Down
2 changes: 1 addition & 1 deletion arch/riscv/kernel/efi.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
{
efi_memory_desc_t *md = data;
pte_t pte = READ_ONCE(*ptep);
pte_t pte = ptep_get(ptep);
unsigned long val;

if (md->attribute & EFI_MEMORY_RO) {
Expand Down
22 changes: 11 additions & 11 deletions arch/riscv/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
*ptep_level = current_level;
ptep = (pte_t *)kvm->arch.pgd;
ptep = &ptep[gstage_pte_index(addr, current_level)];
while (ptep && pte_val(*ptep)) {
while (ptep && pte_val(ptep_get(ptep))) {
if (gstage_pte_leaf(ptep)) {
*ptep_level = current_level;
*ptepp = ptep;
Expand All @@ -113,7 +113,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
if (current_level) {
current_level--;
*ptep_level = current_level;
ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
ptep = &ptep[gstage_pte_index(addr, current_level)];
} else {
ptep = NULL;
Expand Down Expand Up @@ -149,25 +149,25 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
if (gstage_pte_leaf(ptep))
return -EEXIST;

if (!pte_val(*ptep)) {
if (!pte_val(ptep_get(ptep))) {
if (!pcache)
return -ENOMEM;
next_ptep = kvm_mmu_memory_cache_alloc(pcache);
if (!next_ptep)
return -ENOMEM;
*ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
__pgprot(_PAGE_TABLE));
set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
__pgprot(_PAGE_TABLE)));
} else {
if (gstage_pte_leaf(ptep))
return -EEXIST;
next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
}

current_level--;
ptep = &next_ptep[gstage_pte_index(addr, current_level)];
}

*ptep = *new_pte;
set_pte(ptep, *new_pte);
if (gstage_pte_leaf(ptep))
gstage_remote_tlb_flush(kvm, current_level, addr);

Expand Down Expand Up @@ -239,11 +239,11 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,

BUG_ON(addr & (page_size - 1));

if (!pte_val(*ptep))
if (!pte_val(ptep_get(ptep)))
return;

if (ptep_level && !gstage_pte_leaf(ptep)) {
next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
next_ptep_level = ptep_level - 1;
ret = gstage_level_to_page_size(next_ptep_level,
&next_page_size);
Expand All @@ -261,7 +261,7 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
if (op == GSTAGE_OP_CLEAR)
set_pte(ptep, __pte(0));
else if (op == GSTAGE_OP_WP)
set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE));
set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE));
gstage_remote_tlb_flush(kvm, ptep_level, addr);
}
}
Expand Down Expand Up @@ -603,7 +603,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
&ptep, &ptep_level))
return false;

return pte_young(*ptep);
return pte_young(ptep_get(ptep));
}

int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
Expand Down
16 changes: 8 additions & 8 deletions arch/riscv/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -137,36 +137,36 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
pgd = (pgd_t *)pfn_to_virt(pfn) + index;
pgd_k = init_mm.pgd + index;

if (!pgd_present(*pgd_k)) {
if (!pgd_present(pgdp_get(pgd_k))) {
no_context(regs, addr);
return;
}
set_pgd(pgd, *pgd_k);
set_pgd(pgd, pgdp_get(pgd_k));

p4d_k = p4d_offset(pgd_k, addr);
if (!p4d_present(*p4d_k)) {
if (!p4d_present(p4dp_get(p4d_k))) {
no_context(regs, addr);
return;
}

pud_k = pud_offset(p4d_k, addr);
if (!pud_present(*pud_k)) {
if (!pud_present(pudp_get(pud_k))) {
no_context(regs, addr);
return;
}
if (pud_leaf(*pud_k))
if (pud_leaf(pudp_get(pud_k)))
goto flush_tlb;

/*
* Since the vmalloc area is global, it is unnecessary
* to copy individual PTEs
*/
pmd_k = pmd_offset(pud_k, addr);
if (!pmd_present(*pmd_k)) {
if (!pmd_present(pmdp_get(pmd_k))) {
no_context(regs, addr);
return;
}
if (pmd_leaf(*pmd_k))
if (pmd_leaf(pmdp_get(pmd_k)))
goto flush_tlb;

/*
Expand All @@ -176,7 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
* silently loop forever.
*/
pte_k = pte_offset_kernel(pmd_k, addr);
if (!pte_present(*pte_k)) {
if (!pte_present(ptep_get(pte_k))) {
no_context(regs, addr);
return;
}
Expand Down
12 changes: 6 additions & 6 deletions arch/riscv/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
}

if (sz == PMD_SIZE) {
if (want_pmd_share(vma, addr) && pud_none(*pud))
if (want_pmd_share(vma, addr) && pud_none(pudp_get(pud)))
pte = huge_pmd_share(mm, vma, addr, pud);
else
pte = (pte_t *)pmd_alloc(mm, pud, addr);
Expand Down Expand Up @@ -93,27 +93,27 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
pmd_t *pmd;

pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
if (!pgd_present(pgdp_get(pgd)))
return NULL;

p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d))
if (!p4d_present(p4dp_get(p4d)))
return NULL;

pud = pud_offset(p4d, addr);
if (sz == PUD_SIZE)
/* must be pud huge, non-present or none */
return (pte_t *)pud;

if (!pud_present(*pud))
if (!pud_present(pudp_get(pud)))
return NULL;

pmd = pmd_offset(pud, addr);
if (sz == PMD_SIZE)
/* must be pmd huge, non-present or none */
return (pte_t *)pmd;

if (!pmd_present(*pmd))
if (!pmd_present(pmdp_get(pmd)))
return NULL;

for_each_napot_order(order) {
Expand Down Expand Up @@ -351,7 +351,7 @@ void huge_pte_clear(struct mm_struct *mm,
pte_t *ptep,
unsigned long sz)
{
pte_t pte = READ_ONCE(*ptep);
pte_t pte = ptep_get(ptep);
int i, pte_num;

if (!pte_napot(pte)) {
Expand Down
Loading

0 comments on commit 416066b

Please sign in to comment.