@@ -69,7 +69,8 @@ static void free_work(struct work_struct *w)
69
69
70
70
/*** Page table manipulation functions ***/
71
71
72
- static void vunmap_pte_range (pmd_t * pmd , unsigned long addr , unsigned long end )
72
+ static void vunmap_pte_range (pmd_t * pmd , unsigned long addr , unsigned long end ,
73
+ pgtbl_mod_mask * mask )
73
74
{
74
75
pte_t * pte ;
75
76
@@ -78,59 +79,81 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
78
79
pte_t ptent = ptep_get_and_clear (& init_mm , addr , pte );
79
80
WARN_ON (!pte_none (ptent ) && !pte_present (ptent ));
80
81
} while (pte ++ , addr += PAGE_SIZE , addr != end );
82
+ * mask |= PGTBL_PTE_MODIFIED ;
81
83
}
82
84
83
- static void vunmap_pmd_range (pud_t * pud , unsigned long addr , unsigned long end )
85
+ static void vunmap_pmd_range (pud_t * pud , unsigned long addr , unsigned long end ,
86
+ pgtbl_mod_mask * mask )
84
87
{
85
88
pmd_t * pmd ;
86
89
unsigned long next ;
90
+ int cleared ;
87
91
88
92
pmd = pmd_offset (pud , addr );
89
93
do {
90
94
next = pmd_addr_end (addr , end );
91
- if (pmd_clear_huge (pmd ))
95
+
96
+ cleared = pmd_clear_huge (pmd );
97
+ if (cleared || pmd_bad (* pmd ))
98
+ * mask |= PGTBL_PMD_MODIFIED ;
99
+
100
+ if (cleared )
92
101
continue ;
93
102
if (pmd_none_or_clear_bad (pmd ))
94
103
continue ;
95
- vunmap_pte_range (pmd , addr , next );
104
+ vunmap_pte_range (pmd , addr , next , mask );
96
105
} while (pmd ++ , addr = next , addr != end );
97
106
}
98
107
99
- static void vunmap_pud_range (p4d_t * p4d , unsigned long addr , unsigned long end )
108
+ static void vunmap_pud_range (p4d_t * p4d , unsigned long addr , unsigned long end ,
109
+ pgtbl_mod_mask * mask )
100
110
{
101
111
pud_t * pud ;
102
112
unsigned long next ;
113
+ int cleared ;
103
114
104
115
pud = pud_offset (p4d , addr );
105
116
do {
106
117
next = pud_addr_end (addr , end );
107
- if (pud_clear_huge (pud ))
118
+
119
+ cleared = pud_clear_huge (pud );
120
+ if (cleared || pud_bad (* pud ))
121
+ * mask |= PGTBL_PUD_MODIFIED ;
122
+
123
+ if (cleared )
108
124
continue ;
109
125
if (pud_none_or_clear_bad (pud ))
110
126
continue ;
111
- vunmap_pmd_range (pud , addr , next );
127
+ vunmap_pmd_range (pud , addr , next , mask );
112
128
} while (pud ++ , addr = next , addr != end );
113
129
}
114
130
115
- static void vunmap_p4d_range (pgd_t * pgd , unsigned long addr , unsigned long end )
131
+ static void vunmap_p4d_range (pgd_t * pgd , unsigned long addr , unsigned long end ,
132
+ pgtbl_mod_mask * mask )
116
133
{
117
134
p4d_t * p4d ;
118
135
unsigned long next ;
136
+ int cleared ;
119
137
120
138
p4d = p4d_offset (pgd , addr );
121
139
do {
122
140
next = p4d_addr_end (addr , end );
123
- if (p4d_clear_huge (p4d ))
141
+
142
+ cleared = p4d_clear_huge (p4d );
143
+ if (cleared || p4d_bad (* p4d ))
144
+ * mask |= PGTBL_P4D_MODIFIED ;
145
+
146
+ if (cleared )
124
147
continue ;
125
148
if (p4d_none_or_clear_bad (p4d ))
126
149
continue ;
127
- vunmap_pud_range (p4d , addr , next );
150
+ vunmap_pud_range (p4d , addr , next , mask );
128
151
} while (p4d ++ , addr = next , addr != end );
129
152
}
130
153
131
154
/**
132
155
* unmap_kernel_range_noflush - unmap kernel VM area
133
- * @addr : start of the VM area to unmap
156
+ * @start : start of the VM area to unmap
134
157
* @size: size of the VM area to unmap
135
158
*
136
159
* Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify
@@ -141,24 +164,33 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
141
164
* for calling flush_cache_vunmap() on to-be-mapped areas before calling this
142
165
* function and flush_tlb_kernel_range() after.
143
166
*/
144
- void unmap_kernel_range_noflush (unsigned long addr , unsigned long size )
167
+ void unmap_kernel_range_noflush (unsigned long start , unsigned long size )
145
168
{
146
- unsigned long end = addr + size ;
169
+ unsigned long end = start + size ;
147
170
unsigned long next ;
148
171
pgd_t * pgd ;
172
+ unsigned long addr = start ;
173
+ pgtbl_mod_mask mask = 0 ;
149
174
150
175
BUG_ON (addr >= end );
176
+ start = addr ;
151
177
pgd = pgd_offset_k (addr );
152
178
do {
153
179
next = pgd_addr_end (addr , end );
180
+ if (pgd_bad (* pgd ))
181
+ mask |= PGTBL_PGD_MODIFIED ;
154
182
if (pgd_none_or_clear_bad (pgd ))
155
183
continue ;
156
- vunmap_p4d_range (pgd , addr , next );
184
+ vunmap_p4d_range (pgd , addr , next , & mask );
157
185
} while (pgd ++ , addr = next , addr != end );
186
+
187
+ if (mask & ARCH_PAGE_TABLE_SYNC_MASK )
188
+ arch_sync_kernel_mappings (start , end );
158
189
}
159
190
160
191
static int vmap_pte_range (pmd_t * pmd , unsigned long addr ,
161
- unsigned long end , pgprot_t prot , struct page * * pages , int * nr )
192
+ unsigned long end , pgprot_t prot , struct page * * pages , int * nr ,
193
+ pgtbl_mod_mask * mask )
162
194
{
163
195
pte_t * pte ;
164
196
@@ -167,7 +199,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
167
199
* callers keep track of where we're up to.
168
200
*/
169
201
170
- pte = pte_alloc_kernel (pmd , addr );
202
+ pte = pte_alloc_kernel_track (pmd , addr , mask );
171
203
if (!pte )
172
204
return - ENOMEM ;
173
205
do {
@@ -180,55 +212,59 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
180
212
set_pte_at (& init_mm , addr , pte , mk_pte (page , prot ));
181
213
(* nr )++ ;
182
214
} while (pte ++ , addr += PAGE_SIZE , addr != end );
215
+ * mask |= PGTBL_PTE_MODIFIED ;
183
216
return 0 ;
184
217
}
185
218
186
219
static int vmap_pmd_range (pud_t * pud , unsigned long addr ,
187
- unsigned long end , pgprot_t prot , struct page * * pages , int * nr )
220
+ unsigned long end , pgprot_t prot , struct page * * pages , int * nr ,
221
+ pgtbl_mod_mask * mask )
188
222
{
189
223
pmd_t * pmd ;
190
224
unsigned long next ;
191
225
192
- pmd = pmd_alloc (& init_mm , pud , addr );
226
+ pmd = pmd_alloc_track (& init_mm , pud , addr , mask );
193
227
if (!pmd )
194
228
return - ENOMEM ;
195
229
do {
196
230
next = pmd_addr_end (addr , end );
197
- if (vmap_pte_range (pmd , addr , next , prot , pages , nr ))
231
+ if (vmap_pte_range (pmd , addr , next , prot , pages , nr , mask ))
198
232
return - ENOMEM ;
199
233
} while (pmd ++ , addr = next , addr != end );
200
234
return 0 ;
201
235
}
202
236
203
237
static int vmap_pud_range (p4d_t * p4d , unsigned long addr ,
204
- unsigned long end , pgprot_t prot , struct page * * pages , int * nr )
238
+ unsigned long end , pgprot_t prot , struct page * * pages , int * nr ,
239
+ pgtbl_mod_mask * mask )
205
240
{
206
241
pud_t * pud ;
207
242
unsigned long next ;
208
243
209
- pud = pud_alloc (& init_mm , p4d , addr );
244
+ pud = pud_alloc_track (& init_mm , p4d , addr , mask );
210
245
if (!pud )
211
246
return - ENOMEM ;
212
247
do {
213
248
next = pud_addr_end (addr , end );
214
- if (vmap_pmd_range (pud , addr , next , prot , pages , nr ))
249
+ if (vmap_pmd_range (pud , addr , next , prot , pages , nr , mask ))
215
250
return - ENOMEM ;
216
251
} while (pud ++ , addr = next , addr != end );
217
252
return 0 ;
218
253
}
219
254
220
255
static int vmap_p4d_range (pgd_t * pgd , unsigned long addr ,
221
- unsigned long end , pgprot_t prot , struct page * * pages , int * nr )
256
+ unsigned long end , pgprot_t prot , struct page * * pages , int * nr ,
257
+ pgtbl_mod_mask * mask )
222
258
{
223
259
p4d_t * p4d ;
224
260
unsigned long next ;
225
261
226
- p4d = p4d_alloc (& init_mm , pgd , addr );
262
+ p4d = p4d_alloc_track (& init_mm , pgd , addr , mask );
227
263
if (!p4d )
228
264
return - ENOMEM ;
229
265
do {
230
266
next = p4d_addr_end (addr , end );
231
- if (vmap_pud_range (p4d , addr , next , prot , pages , nr ))
267
+ if (vmap_pud_range (p4d , addr , next , prot , pages , nr , mask ))
232
268
return - ENOMEM ;
233
269
} while (p4d ++ , addr = next , addr != end );
234
270
return 0 ;
@@ -255,21 +291,28 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
255
291
int map_kernel_range_noflush (unsigned long addr , unsigned long size ,
256
292
pgprot_t prot , struct page * * pages )
257
293
{
294
+ unsigned long start = addr ;
258
295
unsigned long end = addr + size ;
259
296
unsigned long next ;
260
297
pgd_t * pgd ;
261
298
int err = 0 ;
262
299
int nr = 0 ;
300
+ pgtbl_mod_mask mask = 0 ;
263
301
264
302
BUG_ON (addr >= end );
265
303
pgd = pgd_offset_k (addr );
266
304
do {
267
305
next = pgd_addr_end (addr , end );
268
- err = vmap_p4d_range (pgd , addr , next , prot , pages , & nr );
306
+ if (pgd_bad (* pgd ))
307
+ mask |= PGTBL_PGD_MODIFIED ;
308
+ err = vmap_p4d_range (pgd , addr , next , prot , pages , & nr , & mask );
269
309
if (err )
270
310
return err ;
271
311
} while (pgd ++ , addr = next , addr != end );
272
312
313
+ if (mask & ARCH_PAGE_TABLE_SYNC_MASK )
314
+ arch_sync_kernel_mappings (start , end );
315
+
273
316
return 0 ;
274
317
}
275
318
0 commit comments