Skip to content

Commit 266c85f

Browse files
vineetgarcSergey Matyukevich
authored andcommitted
ARCv3: mm: initial bits for MMUv6
TODO From squashed fixups: _PAGE_KERNEL created from scratch (iso _PAGE_BASE based) to avoid clearing multiple bits With this change, the static kernel image (code/data) which in older MMUs used to be unstranslated is now translated using an "Identity Mapping". It still crashes later when handling kernel vmalloc translations. - This is the commit message #2: ARCv3: mm: retain AF bit to avoid Access Fault exceptions Signed-off-by: Vineet Gupta <vgupta@synopsys.com> - This is the commit message #3: ARCv3: mm: fix pte_modify() not clearing exec related bits: _PAGE_NOTEXEC_U This showed up a LTP mprotect04 looping on same ProtV fault when trying to exeute self-modifying code after an mprotect(PROT_EXEC) pte_modify() needs to clear out existing access/permission bits and set the ones per mprotect(), while leaving the rest of pte bits intact. The old code used a mask to "keep" existing bits and supposedly cleared the rest (since it used PAGE_MASK which cleared everything). However in ARC64, PAGE_MASK misses the high bits NXU and NXK. So invert the mask strategy - clear out everything not needed explicitly and rely on newprot to DTRT. Implementation wise we are clearing AP.RO and AP.UK so it would seem that we are making them read-write and user-n-kernel but that is just an intermeduate step as OR with newprot brings in any '1' bits - so __P001 will reinstate AP.RO thus DTRT. This is just an implementation detail worth noting here. - This is the commit message #4: ARCv3: mm: Initialize MMUv6 registers - MMU_TTBC with T0SZ/T1SZ (ATM kernel linked under 4GB so uses RTP0) - MMU_MEM_ATTR with 3 attributes: normal, uncached, volatile - MMU_CTRL set to enable MMU Signed-off-by: Vineet Gupta <vgupta@synopsys.com> - This is the commit message #5: ARC: Force to use correct MMUv6 version Some tools doesn't work properly when mmu version is set to 6. We can catch it early in Linux and fail to boot, since MMU version is changed to 16 in HW for a long time now. Signed-off-by: Vladimir Isaev <isaev@synopsys.com>
1 parent 113a494 commit 266c85f

File tree

6 files changed

+287
-11
lines changed

6 files changed

+287
-11
lines changed

arch/arc/include/asm/mmu-arcv2.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@
1212
/*
1313
* TLB Management regs
1414
*/
15-
#define ARC_REG_MMU_BCR 0x06f
16-
1715
#ifdef CONFIG_ARC_MMU_V3
1816
#define ARC_REG_TLBPD0 0x405
1917
#define ARC_REG_TLBPD1 0x406

arch/arc/include/asm/mmu-arcv3.h

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* MMUv6 hardware page walked
4+
* This file contains the TLB access registers and commands
5+
*/
6+
7+
#ifndef _ASM_ARC_MMUV6_H
8+
#define _ASM_ARC_MMUV6_H
9+
10+
#define ARC_REG_MMU_CTRL 0x468
11+
#define ARC_REG_MMU_RTP0 0x460
12+
#define ARC_REG_MMU_RTP1 0x462
13+
#define ARC_REG_MMU_TTBC 0x469
14+
#define ARC_REG_MMU_FAULT_STS 0x46b
15+
#define ARC_REG_MMU_MEM_ATTR 0x46a
16+
17+
#define ARC_REG_MMU_TLB_CMD 0x465
18+
#define ARC_REG_MMU_TLB_DATA0 0x466
19+
#define ARC_REG_MMU_TLB_DATA1 0x467
20+
21+
#ifndef __ASSEMBLY__
22+
23+
extern void mmu_setup_asid(struct mm_struct *mm, unsigned long asid);
24+
25+
static void inline mmu_setup_pgd(struct mm_struct *mm, void *pgd)
26+
{
27+
/*
28+
* Only called by switch_mm() which apriori calls get_new_mmu_context()
29+
* which unconditionally calls mmu_setup_asid() to set the ASID
30+
* Since on this MMU both ASID and pgd are in same register, we program
31+
* both there and do nothing here.
32+
*/
33+
}
34+
35+
#endif
36+
37+
#endif

arch/arc/include/asm/mmu.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
#ifndef _ASM_ARC_MMU_H
77
#define _ASM_ARC_MMU_H
88

9+
#define ARC_REG_MMU_BCR 0x06f
10+
911
#ifndef __ASSEMBLY__
1012

1113
#include <linux/threads.h> /* NR_CPUS */
@@ -16,6 +18,10 @@ typedef struct {
1618

1719
#endif
1820

21+
#ifdef CONFIG_ARC_MMU_V6
22+
#include <asm/mmu-arcv3.h>
23+
#else
1924
#include <asm/mmu-arcv2.h>
25+
#endif
2026

2127
#endif
Lines changed: 180 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,180 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
4+
*/
5+
6+
7+
#ifndef _ASM_ARC_PGTABLE_BITS_ARCV3_H
8+
#define _ASM_ARC_PGTABLE_BITS_ARCV3_H
9+
10+
#include <linux/sizes.h>
11+
12+
#define _PAGE_VALID (_UL(1) << 0)
13+
#define _PAGE_LINK (_UL(1) << 1)
14+
#define _PAGE_MEMATTR_MASK (_UL(7) << 2)
15+
#define _PAGE_MEMATTR(idx) ((idx) << 2)
16+
17+
/* PAGE_USER is only relevant for data r/w accesses */
18+
#define _PAGE_AP_U_N_K (_UL(1) << 6) /* 1: User + Kernel, 0: Kernel only) */
19+
#define _PAGE_AP_READONLY (_UL(1) << 7) /* 1: Read only, 0: Read + Write) */
20+
21+
#define __SHR_NONE 0
22+
#define __SHR_OUTER 2
23+
#define __SHR_INNER 3
24+
25+
#define _PAGE_SHARED_NONE (_UL(__SHR_NONE) << 8)
26+
#define _PAGE_SHARED_OUTER (_UL(__SHR_OUTER) << 8) /* Outer Shareable */
27+
#define _PAGE_SHARED_INNER (_UL(__SHR_INNER) << 8) /* Inner Shareable */
28+
29+
#define _PAGE_ACCESSED (_UL(1) << 10) /* software managed, exception if clear */
30+
#define _PAGE_NOTGLOBAL (_UL(1) << 11) /* ASID */
31+
32+
#define _PAGE_DIRTY (_UL(1) << 51) /* software managed */
33+
#define _PAGE_NOTEXEC_K (_UL(1) << 53) /* Execute User */
34+
#define _PAGE_NOTEXEC_U (_UL(1) << 54) /* Execute Kernel */
35+
36+
#define _PAGE_SPECIAL (_UL(1) << 55)
37+
38+
/* TBD: revisit if this needs to be standalone for PROT_NONE */
39+
#define _PAGE_PRESENT _PAGE_VALID
40+
41+
/*
42+
* PAGE_LINK indicates to hw walker to keep going down.
43+
* - Set for all intermediate Table Descriptors (pgd, pud, pmd)
44+
* - Set for last level Table descriptor (ptr) pointing to 4KB page frame
45+
* - Not set for "Block descriptors", where intermediate levels point to
46+
* bigger page frames.
47+
*/
48+
49+
#define _PAGE_TABLE (_PAGE_VALID | _PAGE_LINK)
50+
#define _PAGE_BLOCK _PAGE_VALID
51+
52+
53+
#define MEMATTR_NORMAL 0x69
54+
#define MEMATTR_UNCACHED 0x01
55+
#define MEMATTR_VOLATILE 0x00 /* Uncached + No Early Write Ack + Strict Ordering */
56+
57+
#define MEMATTR_IDX_NORMAL 0
58+
#define MEMATTR_IDX_UNCACHED 1
59+
#define MEMATTR_IDX_VOLATILE 2
60+
61+
/* Read is always set since AP is specified as RO; !RO == R+W */
62+
#define _PAGE_BASE (_PAGE_VALID | _PAGE_LINK | \
63+
_PAGE_AP_READONLY | \
64+
_PAGE_NOTEXEC_U | _PAGE_NOTEXEC_K | \
65+
_PAGE_AP_U_N_K | _PAGE_NOTGLOBAL | \
66+
_PAGE_ACCESSED | \
67+
_PAGE_SHARED_INNER | \
68+
_PAGE_MEMATTR(MEMATTR_IDX_NORMAL))
69+
70+
/* Exec implies Read since Read is always set */
71+
#define _PAGE_RW (_PAGE_BASE & ~_PAGE_AP_READONLY)
72+
#define _PAGE_RX (_PAGE_BASE & ~_PAGE_NOTEXEC_U)
73+
#define _PAGE_RWX (_PAGE_BASE & ~_PAGE_AP_READONLY & ~_PAGE_NOTEXEC_U)
74+
75+
/* TBD: kernel is RWX by default, split it to code/data */
76+
#define _PAGE_KERNEL (_PAGE_VALID | \
77+
/* writable */ \
78+
_PAGE_NOTEXEC_U | /* exec k */ \
79+
/* AP kernel only | global */ \
80+
_PAGE_ACCESSED | \
81+
_PAGE_SHARED_INNER | \
82+
_PAGE_MEMATTR(MEMATTR_IDX_NORMAL))
83+
84+
#define PAGE_NONE __pgprot(_PAGE_BASE) /* TBD */
85+
#define PAGE_TABLE __pgprot(_PAGE_TABLE)
86+
#define PAGE_BLOCK __pgprot(_PAGE_BLOCK)
87+
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
88+
89+
#define PAGE_R __pgprot(_PAGE_BASE)
90+
#define PAGE_RW __pgprot(_PAGE_RW)
91+
#define PAGE_RX __pgprot(_PAGE_RX)
92+
#define PAGE_RWX __pgprot(_PAGE_RWX)
93+
94+
/* xwr */
95+
#define __P000 PAGE_NONE
96+
#define __P001 PAGE_R
97+
#define __P010 PAGE_R
98+
#define __P011 PAGE_R
99+
#define __P100 PAGE_RX
100+
#define __P101 PAGE_RX
101+
#define __P110 PAGE_RX
102+
#define __P111 PAGE_RX
103+
104+
#define __S000 PAGE_NONE
105+
#define __S001 PAGE_R
106+
#define __S010 PAGE_RW
107+
#define __S011 PAGE_RW
108+
#define __S100 PAGE_RX
109+
#define __S101 PAGE_RX
110+
#define __S110 PAGE_RWX
111+
#define __S111 PAGE_RWX
112+
113+
#ifndef __ASSEMBLY__
114+
115+
#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MEMATTR_MASK) | \
116+
_PAGE_MEMATTR(MEMATTR_IDX_UNCACHED))
117+
118+
#define pte_write(pte) (!(pte_val(pte) & _PAGE_AP_READONLY))
119+
#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
120+
#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
121+
#define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL)
122+
123+
#define PTE_BIT_FUNC(fn, op) \
124+
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
125+
126+
PTE_BIT_FUNC(wrprotect, |= (_PAGE_AP_READONLY));
127+
PTE_BIT_FUNC(mkwrite, &= ~(_PAGE_AP_READONLY));
128+
PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
129+
PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
130+
/* Accessed bit is always set */
131+
PTE_BIT_FUNC(mkold, |= (_PAGE_ACCESSED));
132+
PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
133+
PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
134+
135+
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
136+
{
137+
const unsigned long mask = _PAGE_VALID |
138+
_PAGE_NOTEXEC_K | _PAGE_NOTEXEC_U |
139+
_PAGE_AP_READONLY | _PAGE_AP_U_N_K;
140+
141+
return __pte((pte_val(pte) & ~mask) | pgprot_val(newprot));
142+
}
143+
144+
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
145+
pte_t *ptep, pte_t pteval)
146+
{
147+
set_pte(ptep, pteval);
148+
}
149+
150+
/*
151+
* Encode and decode a swap entry
152+
*
153+
* Format of swap PTE:
154+
* bits 0-1: _PAGE_VALID (must be zero)
155+
* bits 2-7: swap type
156+
* bits 8-57: swap offset
157+
* bit 58: PROT_NONE (must be zero)
158+
*
159+
* Note: swap bits needed even if !CONFIG_SWAP
160+
*/
161+
#define __SWP_TYPE_SHIFT 2
162+
#define __SWP_TYPE_BITS 6
163+
#define __SWP_OFFSET_BITS 50
164+
#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
165+
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
166+
#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
167+
168+
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
169+
#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
170+
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
171+
172+
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
173+
#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
174+
175+
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
176+
pte_t *ptep);
177+
178+
#endif /* __ASSEMBLY__ */
179+
180+
#endif

arch/arc/include/asm/pgtable.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,13 @@
99
#include <linux/bits.h>
1010

1111
#include <asm/pgtable-levels.h>
12+
13+
#ifndef CONFIG_ARC_MMU_V6
1214
#include <asm/pgtable-bits-arcv2.h>
15+
#else
16+
#include <asm/pgtable-bits-arcv3.h>
17+
#endif
18+
1319
#include <asm/page.h>
1420
#include <asm/mmu.h>
1521

arch/arc/mm/tlb-arcv3.c

Lines changed: 58 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -16,26 +16,31 @@ static struct cpuinfo_arc_mmu {
1616

1717
int arc_mmu_mumbojumbo(int c, char *buf, int len)
1818
{
19+
const unsigned int mmu_version = 0x10;
1920
unsigned int lookups, pg_sz_k, ntlb, u_dtlb, u_itlb;
2021
char *variant_nm[] = { "MMU32", "MMU48", "MMU52" };
2122
struct bcr_mmu_6 mmu6;
22-
int n= 0;
23+
int n = 0;
2324

2425
READ_BCR(ARC_REG_MMU_BCR, mmu6);
25-
if (!mmu6.ver)
26-
return n;
27-
28-
if (mmu6.variant == 0) { /* MMUv32 */
29-
lookups = 3;
30-
pg_sz_k = 4;
26+
if (mmu6.ver != mmu_version) {
27+
panic("Bad version of MMUv6 %#x (expected %#x)\n",
28+
mmu6.ver, mmu_version);
29+
return 0;
30+
} else if (!mmu6.ver) {
31+
panic("Only MMU48 supported currently\n");
32+
return 0;
3133
}
3234

35+
lookups = 4; /* 4 levels */
36+
pg_sz_k = 4; /* 4KB */
37+
3338
u_dtlb = 2 << mmu6.u_dtlb; /* 8, 16 */
3439
u_itlb = 2 << mmu6.u_itlb; /* 4, 8, 16 */
3540
ntlb = 256 << mmu6.n_tlb; /* Fixed 4w */
3641

3742
n += scnprintf(buf + n, len - n,
38-
"MMU [v%x] \t\t: %s hwalk %d levels, %dk PAGE, JTLB %d uD/I %d/%d\n",
43+
"MMU [v%x]\t: %s hwalk %d levels, %dk PAGE, JTLB %d uD/I %d/%d\n",
3944
mmu6.ver, variant_nm[mmu6.variant], lookups,
4045
pg_sz_k, ntlb, u_dtlb, u_itlb);
4146

@@ -46,9 +51,40 @@ int arc_mmu_mumbojumbo(int c, char *buf, int len)
4651

4752
void arc_mmu_init(void)
4853
{
49-
return;
54+
struct mmu_ttbc {
55+
u32 t0sz:5, t0sh:2, t0c:1, res0:7, a1:1,
56+
t1sz:5, t1sh:2, t1c:1, res1:8;
57+
} ttbc;
58+
59+
struct mmu_mem_attr {
60+
u8 attr[8];
61+
} memattr;
62+
5063
if (mmuinfo.pg_sz_k != TO_KB(PAGE_SIZE))
5164
panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
65+
66+
arc_paging_init();
67+
68+
ttbc.t0sz = 16;
69+
ttbc.t1sz = 16; /* Not relevant since kernel linked under 4GB hits T0SZ */
70+
ttbc.t0sh = __SHR_INNER;
71+
ttbc.t1sh = __SHR_INNER;
72+
ttbc.t0c = 1;
73+
ttbc.t1c = 1;
74+
ttbc.a1 = 0; /* ASID used is from MMU_RTP0 */
75+
76+
WRITE_AUX(ARC_REG_MMU_TTBC, ttbc);
77+
78+
memattr.attr[MEMATTR_IDX_NORMAL] = MEMATTR_NORMAL;
79+
memattr.attr[MEMATTR_IDX_UNCACHED] = MEMATTR_UNCACHED;
80+
memattr.attr[MEMATTR_IDX_VOLATILE] = MEMATTR_VOLATILE;
81+
82+
WRITE_AUX64(ARC_REG_MMU_MEM_ATTR, memattr);
83+
84+
write_aux_64(ARC_REG_MMU_RTP0, __pa(swapper_pg_dir));
85+
write_aux_64(ARC_REG_MMU_RTP1, 0); /* to catch bugs */
86+
87+
write_aux_reg(ARC_REG_MMU_CTRL, 0x7);
5288
}
5389

5490
void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
@@ -57,6 +93,19 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
5793

5894
}
5995

96+
noinline void mmu_setup_asid(struct mm_struct *mm, unsigned long asid)
97+
{
98+
#ifdef CONFIG_64BIT
99+
unsigned long rtp0 = (asid << 48) | __pa(mm->pgd);
100+
101+
BUG_ON(__pa(mm->pgd) >> 48);
102+
write_aux_64(ARC_REG_MMU_RTP0, rtp0);
103+
104+
#else
105+
#error "Need to implement 2 SR ops"
106+
#endif
107+
}
108+
60109
noinline void local_flush_tlb_all(void)
61110
{
62111
}

0 commit comments

Comments
 (0)