|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | +/* |
| 3 | + * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com) |
| 4 | + */ |
| 5 | + |
| 6 | + |
| 7 | +#ifndef _ASM_ARC_PGTABLE_BITS_ARCV3_H |
| 8 | +#define _ASM_ARC_PGTABLE_BITS_ARCV3_H |
| 9 | + |
| 10 | +#include <linux/sizes.h> |
| 11 | + |
| 12 | +#define _PAGE_VALID (_UL(1) << 0) |
| 13 | +#define _PAGE_LINK (_UL(1) << 1) |
| 14 | +#define _PAGE_MEMATTR_MASK (_UL(7) << 2) |
| 15 | +#define _PAGE_MEMATTR(idx) ((idx) << 2) |
| 16 | + |
| 17 | +/* PAGE_USER is only relevant for data r/w accesses */ |
| 18 | +#define _PAGE_AP_U_N_K (_UL(1) << 6) /* 1: User + Kernel, 0: Kernel only) */ |
| 19 | +#define _PAGE_AP_READONLY (_UL(1) << 7) /* 1: Read only, 0: Read + Write) */ |
| 20 | + |
| 21 | +#define __SHR_NONE 0 |
| 22 | +#define __SHR_OUTER 2 |
| 23 | +#define __SHR_INNER 3 |
| 24 | + |
| 25 | +#define _PAGE_SHARED_NONE (_UL(__SHR_NONE) << 8) |
| 26 | +#define _PAGE_SHARED_OUTER (_UL(__SHR_OUTER) << 8) /* Outer Shareable */ |
| 27 | +#define _PAGE_SHARED_INNER (_UL(__SHR_INNER) << 8) /* Inner Shareable */ |
| 28 | + |
| 29 | +#define _PAGE_ACCESSED (_UL(1) << 10) /* software managed, exception if clear */ |
| 30 | +#define _PAGE_NOTGLOBAL (_UL(1) << 11) /* ASID */ |
| 31 | + |
| 32 | +#define _PAGE_DIRTY (_UL(1) << 51) /* software managed */ |
| 33 | +#define _PAGE_NOTEXEC_K (_UL(1) << 53) /* Execute User */ |
| 34 | +#define _PAGE_NOTEXEC_U (_UL(1) << 54) /* Execute Kernel */ |
| 35 | + |
| 36 | +#define _PAGE_SPECIAL (_UL(1) << 55) |
| 37 | + |
| 38 | +/* TBD: revisit if this needs to be standalone for PROT_NONE */ |
| 39 | +#define _PAGE_PRESENT _PAGE_VALID |
| 40 | + |
| 41 | +/* |
| 42 | + * PAGE_LINK indicates to hw walker to keep going down. |
| 43 | + * - Set for all intermediate Table Descriptors (pgd, pud, pmd) |
| 44 | + * - Set for last level Table descriptor (ptr) pointing to 4KB page frame |
| 45 | + * - Not set for "Block descriptors", where intermediate levels point to |
| 46 | + * bigger page frames. |
| 47 | + */ |
| 48 | + |
| 49 | +#define _PAGE_TABLE (_PAGE_VALID | _PAGE_LINK) |
| 50 | +#define _PAGE_BLOCK _PAGE_VALID |
| 51 | + |
| 52 | + |
| 53 | +#define MEMATTR_NORMAL 0x69 |
| 54 | +#define MEMATTR_UNCACHED 0x01 |
| 55 | +#define MEMATTR_VOLATILE 0x00 /* Uncached + No Early Write Ack + Strict Ordering */ |
| 56 | + |
| 57 | +#define MEMATTR_IDX_NORMAL 0 |
| 58 | +#define MEMATTR_IDX_UNCACHED 1 |
| 59 | +#define MEMATTR_IDX_VOLATILE 2 |
| 60 | + |
| 61 | +/* Read is always set since AP is specified as RO; !RO == R+W */ |
| 62 | +#define _PAGE_BASE (_PAGE_VALID | _PAGE_LINK | \ |
| 63 | + _PAGE_AP_READONLY | \ |
| 64 | + _PAGE_NOTEXEC_U | _PAGE_NOTEXEC_K | \ |
| 65 | + _PAGE_AP_U_N_K | _PAGE_NOTGLOBAL | \ |
| 66 | + _PAGE_ACCESSED | \ |
| 67 | + _PAGE_SHARED_INNER | \ |
| 68 | + _PAGE_MEMATTR(MEMATTR_IDX_NORMAL)) |
| 69 | + |
| 70 | +/* Exec implies Read since Read is always set */ |
| 71 | +#define _PAGE_RW (_PAGE_BASE & ~_PAGE_AP_READONLY) |
| 72 | +#define _PAGE_RX (_PAGE_BASE & ~_PAGE_NOTEXEC_U) |
| 73 | +#define _PAGE_RWX (_PAGE_BASE & ~_PAGE_AP_READONLY & ~_PAGE_NOTEXEC_U) |
| 74 | + |
| 75 | +/* TBD: kernel is RWX by default, split it to code/data */ |
| 76 | +#define _PAGE_KERNEL (_PAGE_VALID | \ |
| 77 | + /* writable */ \ |
| 78 | + _PAGE_NOTEXEC_U | /* exec k */ \ |
| 79 | + /* AP kernel only | global */ \ |
| 80 | + _PAGE_ACCESSED | \ |
| 81 | + _PAGE_SHARED_INNER | \ |
| 82 | + _PAGE_MEMATTR(MEMATTR_IDX_NORMAL)) |
| 83 | + |
| 84 | +#define PAGE_NONE __pgprot(_PAGE_BASE) /* TBD */ |
| 85 | +#define PAGE_TABLE __pgprot(_PAGE_TABLE) |
| 86 | +#define PAGE_BLOCK __pgprot(_PAGE_BLOCK) |
| 87 | +#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) |
| 88 | + |
| 89 | +#define PAGE_R __pgprot(_PAGE_BASE) |
| 90 | +#define PAGE_RW __pgprot(_PAGE_RW) |
| 91 | +#define PAGE_RX __pgprot(_PAGE_RX) |
| 92 | +#define PAGE_RWX __pgprot(_PAGE_RWX) |
| 93 | + |
| 94 | + /* xwr */ |
| 95 | +#define __P000 PAGE_NONE |
| 96 | +#define __P001 PAGE_R |
| 97 | +#define __P010 PAGE_R |
| 98 | +#define __P011 PAGE_R |
| 99 | +#define __P100 PAGE_RX |
| 100 | +#define __P101 PAGE_RX |
| 101 | +#define __P110 PAGE_RX |
| 102 | +#define __P111 PAGE_RX |
| 103 | + |
| 104 | +#define __S000 PAGE_NONE |
| 105 | +#define __S001 PAGE_R |
| 106 | +#define __S010 PAGE_RW |
| 107 | +#define __S011 PAGE_RW |
| 108 | +#define __S100 PAGE_RX |
| 109 | +#define __S101 PAGE_RX |
| 110 | +#define __S110 PAGE_RWX |
| 111 | +#define __S111 PAGE_RWX |
| 112 | + |
| 113 | +#ifndef __ASSEMBLY__ |
| 114 | + |
| 115 | +#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MEMATTR_MASK) | \ |
| 116 | + _PAGE_MEMATTR(MEMATTR_IDX_UNCACHED)) |
| 117 | + |
| 118 | +#define pte_write(pte) (!(pte_val(pte) & _PAGE_AP_READONLY)) |
| 119 | +#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY) |
| 120 | +#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) |
| 121 | +#define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL) |
| 122 | + |
| 123 | +#define PTE_BIT_FUNC(fn, op) \ |
| 124 | + static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } |
| 125 | + |
| 126 | +PTE_BIT_FUNC(wrprotect, |= (_PAGE_AP_READONLY)); |
| 127 | +PTE_BIT_FUNC(mkwrite, &= ~(_PAGE_AP_READONLY)); |
| 128 | +PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY)); |
| 129 | +PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY)); |
| 130 | +/* Accessed bit is always set */ |
| 131 | +PTE_BIT_FUNC(mkold, |= (_PAGE_ACCESSED)); |
| 132 | +PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED)); |
| 133 | +PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL)); |
| 134 | + |
| 135 | +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 136 | +{ |
| 137 | + const unsigned long mask = _PAGE_VALID | |
| 138 | + _PAGE_NOTEXEC_K | _PAGE_NOTEXEC_U | |
| 139 | + _PAGE_AP_READONLY | _PAGE_AP_U_N_K; |
| 140 | + |
| 141 | + return __pte((pte_val(pte) & ~mask) | pgprot_val(newprot)); |
| 142 | +} |
| 143 | + |
| 144 | +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 145 | + pte_t *ptep, pte_t pteval) |
| 146 | +{ |
| 147 | + set_pte(ptep, pteval); |
| 148 | +} |
| 149 | + |
| 150 | +/* |
| 151 | + * Encode and decode a swap entry |
| 152 | + * |
| 153 | + * Format of swap PTE: |
| 154 | + * bits 0-1: _PAGE_VALID (must be zero) |
| 155 | + * bits 2-7: swap type |
| 156 | + * bits 8-57: swap offset |
| 157 | + * bit 58: PROT_NONE (must be zero) |
| 158 | + * |
| 159 | + * Note: swap bits needed even if !CONFIG_SWAP |
| 160 | + */ |
| 161 | +#define __SWP_TYPE_SHIFT 2 |
| 162 | +#define __SWP_TYPE_BITS 6 |
| 163 | +#define __SWP_OFFSET_BITS 50 |
| 164 | +#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) |
| 165 | +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) |
| 166 | +#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) |
| 167 | + |
| 168 | +#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) |
| 169 | +#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) |
| 170 | +#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) |
| 171 | + |
| 172 | +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 173 | +#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) |
| 174 | + |
| 175 | +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, |
| 176 | + pte_t *ptep); |
| 177 | + |
| 178 | +#endif /* __ASSEMBLY__ */ |
| 179 | + |
| 180 | +#endif |
0 commit comments