-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathArchVSpace_A.thy
746 lines (686 loc) · 30.8 KB
/
ArchVSpace_A.thy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
(*
* Copyright 2014, General Dynamics C4 Systems
*
* This software may be distributed and modified according to the terms of
* the GNU General Public License version 2. Note that NO WARRANTY is provided.
* See "LICENSE_GPLv2.txt" for details.
*
* @TAG(GD_GPL)
*)
(*
Higher level functions for manipulating virtual address spaces
*)
chapter "ARM VSpace Functions"
theory ArchVSpace_A
imports Retype_A
begin
text {* Save the set of entries that would be inserted into a page table or
page directory to map various different sizes of frame at a given virtual
address. *}
fun create_mapping_entries ::
"paddr \<Rightarrow> vspace_ref \<Rightarrow> vmpage_size \<Rightarrow> vm_rights \<Rightarrow> vm_attributes \<Rightarrow> word32 \<Rightarrow>
((pte * word32 list) + (pde * word32 list),'z::state_ext) se_monad"
where
"create_mapping_entries base vptr ARMSmallPage vm_rights attrib pd =
doE
p \<leftarrow> lookup_error_on_failure False $ lookup_pt_slot pd vptr;
returnOk $ Inl (SmallPagePTE base (attrib - {Global, ParityEnabled})
vm_rights, [p])
odE"
| "create_mapping_entries base vptr ARMLargePage vm_rights attrib pd =
doE
p \<leftarrow> lookup_error_on_failure False $ lookup_pt_slot pd vptr;
returnOk $ Inl (LargePagePTE base (attrib - {Global, ParityEnabled})
vm_rights, [p, p + 4 .e. p + 60])
odE"
| "create_mapping_entries base vptr ARMSection vm_rights attrib pd =
doE
p \<leftarrow> returnOk (lookup_pd_slot pd vptr);
returnOk $ Inr (SectionPDE base (attrib - {Global}) 0 vm_rights, [p])
odE"
| "create_mapping_entries base vptr ARMSuperSection vm_rights attrib pd =
doE
p \<leftarrow> returnOk (lookup_pd_slot pd vptr);
returnOk $ Inr (SuperSectionPDE base (attrib - {Global}) vm_rights, [p, p + 4 .e. p + 60])
odE"
definition get_master_pde :: "word32 \<Rightarrow> (ARM_Structs_A.pde,'z::state_ext)s_monad"
where "get_master_pde ptr \<equiv> do
pde \<leftarrow> (get_pde (ptr && ~~ mask 6));
(case pde of ARM_Structs_A.pde.SuperSectionPDE _ _ _ \<Rightarrow> return pde
| _ \<Rightarrow> get_pde ptr)
od"
definition get_master_pte :: "word32 \<Rightarrow> (ARM_Structs_A.pte, 'z::state_ext)s_monad"
where "get_master_pte ptr \<equiv> do
pte \<leftarrow> (get_pte (ptr && ~~ mask 6));
(case pte of ARM_Structs_A.pte.LargePagePTE _ _ _ \<Rightarrow> return pte
| _ \<Rightarrow> get_pte ptr)
od"
text {* Placing an entry which maps a frame within the set of entries that map a
larger frame is unsafe. This function checks that given entries replace either
invalid entries or entries of the same granularity. *}
fun ensure_safe_mapping ::
"(pte * word32 list) + (pde * word32 list) \<Rightarrow> (unit,'z::state_ext) se_monad"
where
"ensure_safe_mapping (Inl (InvalidPTE, _)) = returnOk ()"
|
"ensure_safe_mapping (Inl (SmallPagePTE _ _ _, pt_slots)) =
mapME_x (\<lambda>slot. (doE
pte \<leftarrow> liftE $ get_master_pte slot;
(case pte of
InvalidPTE \<Rightarrow> returnOk ()
| SmallPagePTE _ _ _ \<Rightarrow> returnOk ()
| _ \<Rightarrow> throwError DeleteFirst)
odE)) pt_slots"
|
"ensure_safe_mapping (Inl (LargePagePTE _ _ _, pt_slots)) =
mapME_x (\<lambda> slot. (doE
pte \<leftarrow> liftE $ get_master_pte slot;
(case pte of
InvalidPTE \<Rightarrow> returnOk ()
| LargePagePTE _ _ _ \<Rightarrow> returnOk ()
| _ \<Rightarrow> throwError DeleteFirst
)
odE)) pt_slots"
|
"ensure_safe_mapping (Inr (InvalidPDE, _)) = returnOk ()"
|
"ensure_safe_mapping (Inr (PageTablePDE _ _ _, _)) = fail"
|
"ensure_safe_mapping (Inr (SectionPDE _ _ _ _, pd_slots)) =
mapME_x (\<lambda> slot. (doE
pde \<leftarrow> liftE $ get_master_pde slot;
(case pde of
InvalidPDE \<Rightarrow> returnOk ()
| SectionPDE _ _ _ _ \<Rightarrow> returnOk ()
| _ \<Rightarrow> throwError DeleteFirst
)
odE)) pd_slots"
|
"ensure_safe_mapping (Inr (SuperSectionPDE _ _ _, pd_slots)) =
mapME_x (\<lambda> slot. (doE
pde \<leftarrow> liftE $ get_master_pde slot;
(case pde of
InvalidPDE \<Rightarrow> returnOk ()
| SuperSectionPDE _ _ _ \<Rightarrow> returnOk ()
| _ \<Rightarrow> throwError DeleteFirst
)
odE)) pd_slots"
text {* Look up a thread's IPC buffer and check that the thread has the right
authority to read or (in the receiver case) write to it. *}
definition
lookup_ipc_buffer :: "bool \<Rightarrow> word32 \<Rightarrow> (word32 option,'z::state_ext) s_monad" where
"lookup_ipc_buffer is_receiver thread \<equiv> do
buffer_ptr \<leftarrow> thread_get tcb_ipc_buffer thread;
buffer_frame_slot \<leftarrow> return (thread, tcb_cnode_index 4);
buffer_cap \<leftarrow> get_cap buffer_frame_slot;
(case buffer_cap of
ArchObjectCap (PageCap p R vms _) \<Rightarrow>
if vm_read_write \<subseteq> R \<or> vm_read_only \<subseteq> R \<and> \<not>is_receiver
then return $ Some $ p + (buffer_ptr && mask (pageBitsForSize vms))
else return None
| _ \<Rightarrow> return None)
od"
text {* Locate the page directory associated with a given virtual ASID. *}
definition
find_pd_for_asid :: "asid \<Rightarrow> (word32,'z::state_ext) lf_monad" where
"find_pd_for_asid asid \<equiv> doE
assertE (asid > 0);
asid_table \<leftarrow> liftE $ gets (arm_asid_table \<circ> arch_state);
pool_ptr \<leftarrow> returnOk (asid_table (asid_high_bits_of asid));
pool \<leftarrow> (case pool_ptr of
Some ptr \<Rightarrow> liftE $ get_asid_pool ptr
| None \<Rightarrow> throwError InvalidRoot);
pd \<leftarrow> returnOk (pool (ucast asid));
(case pd of
Some ptr \<Rightarrow> returnOk ptr
| None \<Rightarrow> throwError InvalidRoot)
odE"
text {* Locate the page directory and check that this process succeeds and
returns a pointer to a real page directory. *}
definition
find_pd_for_asid_assert :: "asid \<Rightarrow> (word32,'z::state_ext) s_monad" where
"find_pd_for_asid_assert asid \<equiv> do
pd \<leftarrow> find_pd_for_asid asid <catch> K fail;
get_pde pd;
return pd
od"
text {* Format a VM fault message to be passed to a thread's supervisor after
it encounters a page fault. *}
fun
handle_vm_fault :: "word32 \<Rightarrow> vmfault_type \<Rightarrow> (unit,'z::state_ext) f_monad"
where
"handle_vm_fault thread ARMDataAbort = doE
addr \<leftarrow> liftE $ do_machine_op getFAR;
fault \<leftarrow> liftE $ do_machine_op getDFSR;
throwError $ VMFault addr [0, fault && mask 14]
odE"
|
"handle_vm_fault thread ARMPrefetchAbort = doE
pc \<leftarrow> liftE $ as_user thread $ getRestartPC;
fault \<leftarrow> liftE $ do_machine_op getIFSR;
throwError $ VMFault pc [1, fault && mask 14]
odE"
text {* Load the optional hardware ASID currently associated with this virtual
ASID. *}
definition
load_hw_asid :: "asid \<Rightarrow> (hardware_asid option,'z::state_ext) s_monad" where
"load_hw_asid asid \<equiv> do
asid_map \<leftarrow> gets (arm_asid_map \<circ> arch_state);
return $ option_map fst $ asid_map asid
od"
text {* Associate a hardware ASID with a virtual ASID. *}
definition
store_hw_asid :: "asid \<Rightarrow> hardware_asid \<Rightarrow> (unit,'z::state_ext) s_monad" where
"store_hw_asid asid hw_asid \<equiv> do
pd \<leftarrow> find_pd_for_asid_assert asid;
asid_map \<leftarrow> gets (arm_asid_map \<circ> arch_state);
asid_map' \<leftarrow> return (asid_map (asid \<mapsto> (hw_asid, pd)));
modify (\<lambda>s. s \<lparr> arch_state := (arch_state s) \<lparr> arm_asid_map := asid_map' \<rparr>\<rparr>);
hw_asid_map \<leftarrow> gets (arm_hwasid_table \<circ> arch_state);
hw_asid_map' \<leftarrow> return (hw_asid_map (hw_asid \<mapsto> asid));
modify (\<lambda>s. s \<lparr> arch_state := (arch_state s) \<lparr> arm_hwasid_table := hw_asid_map' \<rparr>\<rparr>)
od"
text {* Clear all TLB mappings associated with this virtual ASID. *}
definition
invalidate_tlb_by_asid :: "asid \<Rightarrow> (unit,'z::state_ext) s_monad" where
"invalidate_tlb_by_asid asid \<equiv> do
maybe_hw_asid \<leftarrow> load_hw_asid asid;
(case maybe_hw_asid of
None \<Rightarrow> return ()
| Some hw_asid \<Rightarrow> do_machine_op $ invalidateTLB_ASID hw_asid)
od"
text {* Flush all cache and TLB entries associated with this virtual ASID. *}
definition
flush_space :: "asid \<Rightarrow> (unit,'z::state_ext) s_monad" where
"flush_space asid \<equiv> do
maybe_hw_asid \<leftarrow> load_hw_asid asid;
do_machine_op cleanCaches_PoU;
(case maybe_hw_asid of
None \<Rightarrow> return ()
| Some hw_asid \<Rightarrow> do_machine_op $ invalidateTLB_ASID hw_asid)
od"
text {* Remove any mapping from this virtual ASID to a hardware ASID. *}
definition
invalidate_asid :: "asid \<Rightarrow> (unit,'z::state_ext) s_monad" where
"invalidate_asid asid \<equiv> do
asid_map \<leftarrow> gets (arm_asid_map \<circ> arch_state);
asid_map' \<leftarrow> return (asid_map (asid:= None));
modify (\<lambda>s. s \<lparr> arch_state := (arch_state s) \<lparr> arm_asid_map := asid_map' \<rparr>\<rparr>)
od"
text {* Remove any mapping from this hardware ASID to a virtual ASID. *}
definition
invalidate_hw_asid_entry :: "hardware_asid \<Rightarrow> (unit,'z::state_ext) s_monad" where
"invalidate_hw_asid_entry hw_asid \<equiv> do
hw_asid_map \<leftarrow> gets (arm_hwasid_table \<circ> arch_state);
hw_asid_map' \<leftarrow> return (hw_asid_map (hw_asid:= None));
modify (\<lambda>s. s \<lparr> arch_state := (arch_state s) \<lparr> arm_hwasid_table := hw_asid_map' \<rparr>\<rparr>)
od"
text {* Remove virtual to physical mappings in either direction involving this
virtual ASID. *}
definition
invalidate_asid_entry :: "asid \<Rightarrow> (unit,'z::state_ext) s_monad" where
"invalidate_asid_entry asid \<equiv> do
maybe_hw_asid \<leftarrow> load_hw_asid asid;
when (maybe_hw_asid \<noteq> None) $ invalidate_hw_asid_entry (the maybe_hw_asid);
invalidate_asid asid
od"
text {* Locate a hardware ASID that is not in use, if necessary by reclaiming
one from another virtual ASID in a round-robin manner. *}
definition
find_free_hw_asid :: "(hardware_asid,'z::state_ext) s_monad" where
"find_free_hw_asid \<equiv> do
hw_asid_table \<leftarrow> gets (arm_hwasid_table \<circ> arch_state);
next_asid \<leftarrow> gets (arm_next_asid \<circ> arch_state);
maybe_asid \<leftarrow> return (find (\<lambda>a. hw_asid_table a = None)
(take (length [minBound :: hardware_asid .e. maxBound])
([next_asid .e. maxBound] @ [minBound .e. next_asid])));
(case maybe_asid of
Some hw_asid \<Rightarrow> return hw_asid
| None \<Rightarrow> do
invalidate_asid $ the $ hw_asid_table next_asid;
do_machine_op $ invalidateTLB_ASID next_asid;
invalidate_hw_asid_entry next_asid;
new_next_asid \<leftarrow> return (next_asid + 1);
modify (\<lambda>s. s \<lparr> arch_state := (arch_state s) \<lparr> arm_next_asid := new_next_asid \<rparr>\<rparr>);
return next_asid
od)
od"
text {* Get the hardware ASID associated with a virtual ASID, assigning one if
none is already assigned. *}
definition
get_hw_asid :: "asid \<Rightarrow> (hardware_asid,'z::state_ext) s_monad" where
"get_hw_asid asid \<equiv> do
maybe_hw_asid \<leftarrow> load_hw_asid asid;
(case maybe_hw_asid of
Some hw_asid \<Rightarrow> return hw_asid
| None \<Rightarrow> do
new_hw_asid \<leftarrow> find_free_hw_asid;
store_hw_asid asid new_hw_asid;
return new_hw_asid
od)
od"
abbreviation
"arm_context_switch_hwasid pd hwasid \<equiv> do
setCurrentPD $ addrFromPPtr pd;
setHardwareASID hwasid
od"
definition
arm_context_switch :: "word32 \<Rightarrow> asid \<Rightarrow> (unit, 'z::state_ext) s_monad"
where
"arm_context_switch pd asid \<equiv> do
hwasid \<leftarrow> get_hw_asid asid;
do_machine_op $ arm_context_switch_hwasid pd hwasid
od"
text {* Switch into the address space of a given thread or the global address
space if none is correctly configured. *}
definition
set_vm_root :: "word32 \<Rightarrow> (unit,'z::state_ext) s_monad" where
"set_vm_root tcb \<equiv> do
thread_root_slot \<leftarrow> return (tcb, tcb_cnode_index 1);
thread_root \<leftarrow> get_cap thread_root_slot;
(case thread_root of
ArchObjectCap (PageDirectoryCap pd (Some asid)) \<Rightarrow> doE
pd' \<leftarrow> find_pd_for_asid asid;
whenE (pd \<noteq> pd') $ throwError InvalidRoot;
liftE $ arm_context_switch pd asid
odE
| _ \<Rightarrow> throwError InvalidRoot) <catch>
(\<lambda>_. do
global_pd \<leftarrow> gets (arm_global_pd \<circ> arch_state);
do_machine_op $ setCurrentPD $ addrFromPPtr global_pd
od)
od"
text {* Before deleting an ASID pool object we must deactivate all page
directories that are installed in it. *}
definition
delete_asid_pool :: "asid \<Rightarrow> word32 \<Rightarrow> (unit,'z::state_ext) s_monad" where
"delete_asid_pool base ptr \<equiv> do
assert (base && mask asid_low_bits = 0);
asid_table \<leftarrow> gets (arm_asid_table \<circ> arch_state);
when (asid_table (asid_high_bits_of base) = Some ptr) $ do
pool \<leftarrow> get_asid_pool ptr;
mapM (\<lambda>offset. (when (pool (ucast offset) \<noteq> None) $ do
flush_space $ base + offset;
invalidate_asid_entry $ base + offset
od)) [0 .e. (1 << asid_low_bits) - 1];
asid_table' \<leftarrow> return (asid_table (asid_high_bits_of base:= None));
modify (\<lambda>s. s \<lparr> arch_state := (arch_state s) \<lparr> arm_asid_table := asid_table' \<rparr>\<rparr>);
tcb \<leftarrow> gets cur_thread;
set_vm_root tcb
od
od"
text {* When deleting a page directory from an ASID pool we must deactivate
it. *}
definition
delete_asid :: "asid \<Rightarrow> word32 \<Rightarrow> (unit,'z::state_ext) s_monad" where
"delete_asid asid pd \<equiv> do
asid_table \<leftarrow> gets (arm_asid_table \<circ> arch_state);
(case asid_table (asid_high_bits_of asid) of
None \<Rightarrow> return ()
| Some pool_ptr \<Rightarrow> do
pool \<leftarrow> get_asid_pool pool_ptr;
when (pool (ucast asid) = Some pd) $ do
flush_space asid;
invalidate_asid_entry asid;
pool' \<leftarrow> return (pool (ucast asid := None));
set_asid_pool pool_ptr pool';
tcb \<leftarrow> gets cur_thread;
set_vm_root tcb
od
od)
od"
text {* Switch to a particular address space in order to perform a flush
operation. *}
definition
set_vm_root_for_flush :: "word32 \<Rightarrow> asid \<Rightarrow> (bool,'z::state_ext) s_monad" where
"set_vm_root_for_flush pd asid \<equiv> do
tcb \<leftarrow> gets cur_thread;
thread_root_slot \<leftarrow> return (tcb, tcb_cnode_index 1);
thread_root \<leftarrow> get_cap thread_root_slot;
not_is_pd \<leftarrow> (case thread_root of
ArchObjectCap (PageDirectoryCap cur_pd (Some _)) \<Rightarrow> return (cur_pd \<noteq> pd)
| _ \<Rightarrow> return True);
(if not_is_pd then do
arm_context_switch pd asid;
return True
od
else return False)
od"
definition
do_flush :: "flush_type \<Rightarrow> vspace_ref \<Rightarrow> vspace_ref \<Rightarrow> paddr \<Rightarrow> unit machine_monad" where
"do_flush flush_type vstart vend pstart \<equiv>
case flush_type of
Clean \<Rightarrow> cleanCacheRange_RAM vstart vend pstart
| Invalidate \<Rightarrow> invalidateCacheRange_RAM vstart vend pstart
| CleanInvalidate \<Rightarrow> cleanInvalidateCacheRange_RAM vstart vend pstart
| Unify \<Rightarrow> do
cleanCacheRange_PoU vstart vend pstart;
dsb;
invalidateCacheRange_I vstart vend pstart;
branchFlushRange vstart vend pstart;
isb
od"
text {* Flush mappings associated with a page table. *}
definition
flush_table :: "word32 \<Rightarrow> asid \<Rightarrow> vspace_ref \<Rightarrow> word32 \<Rightarrow> (unit,'z::state_ext) s_monad" where
"flush_table pd asid vptr pt \<equiv> do
assert (vptr && mask (pageBitsForSize ARMSection) = 0);
root_switched \<leftarrow> set_vm_root_for_flush pd asid;
maybe_hw_asid \<leftarrow> load_hw_asid asid;
when (maybe_hw_asid \<noteq> None) $ do
hw_asid \<leftarrow> return (the maybe_hw_asid);
do_machine_op $ invalidateTLB_ASID hw_asid;
when root_switched $ do
tcb \<leftarrow> gets cur_thread;
set_vm_root tcb
od
od
od"
text {* Flush mappings associated with a given page. *}
definition
flush_page :: "vmpage_size \<Rightarrow> word32 \<Rightarrow> asid \<Rightarrow> vspace_ref \<Rightarrow> (unit,'z::state_ext) s_monad" where
"flush_page page_size pd asid vptr\<equiv> do
assert (vptr && mask pageBits = 0);
root_switched \<leftarrow> set_vm_root_for_flush pd asid;
maybe_hw_asid \<leftarrow> load_hw_asid asid;
when (maybe_hw_asid \<noteq> None) $ do
hw_asid \<leftarrow> return (the maybe_hw_asid);
do_machine_op $ invalidateTLB_VAASID (vptr || ucast hw_asid);
when root_switched $ do
tcb \<leftarrow> gets cur_thread;
set_vm_root tcb
od
od
od"
text {* Return the optional page directory a page table is mapped in. *}
definition
page_table_mapped :: "asid \<Rightarrow> vspace_ref \<Rightarrow> obj_ref \<Rightarrow> (obj_ref option,'z::state_ext) s_monad" where
"page_table_mapped asid vaddr pt \<equiv> doE
pd \<leftarrow> find_pd_for_asid asid;
pd_slot \<leftarrow> returnOk $ lookup_pd_slot pd vaddr;
pde \<leftarrow> liftE $ get_pde pd_slot;
case pde of
PageTablePDE addr _ _ \<Rightarrow> returnOk $
if addrFromPPtr pt = addr then Some pd else None
| _ \<Rightarrow> returnOk None
odE <catch> (K $ return None)"
text {* Unmap a page table from its page directory. *}
definition
unmap_page_table :: "asid \<Rightarrow> vspace_ref \<Rightarrow> word32 \<Rightarrow> (unit,'z::state_ext) s_monad" where
"unmap_page_table asid vaddr pt \<equiv> do
pdOpt \<leftarrow> page_table_mapped asid vaddr pt;
case pdOpt of
None \<Rightarrow> return ()
| Some pd \<Rightarrow> do
pd_slot \<leftarrow> return $ lookup_pd_slot pd vaddr;
store_pde pd_slot InvalidPDE;
do_machine_op $ cleanByVA_PoU pd_slot (addrFromPPtr pd_slot);
flush_table pd asid vaddr pt
od
od"
text {* Check that a given frame is mapped by a given mapping entry. *}
definition
check_mapping_pptr :: "obj_ref \<Rightarrow> vmpage_size \<Rightarrow> (obj_ref + obj_ref) \<Rightarrow> (bool,'z::state_ext) s_monad" where
"check_mapping_pptr pptr pgsz tablePtr \<equiv> case tablePtr of
Inl ptePtr \<Rightarrow> do
pte \<leftarrow> get_pte ptePtr;
return $ case pte of
SmallPagePTE x _ _ \<Rightarrow> x = addrFromPPtr pptr \<and> pgsz = ARMSmallPage
| LargePagePTE x _ _ \<Rightarrow> x = addrFromPPtr pptr \<and> pgsz = ARMLargePage
| _ \<Rightarrow> False
od
| Inr pdePtr \<Rightarrow> do
pde \<leftarrow> get_pde pdePtr;
return $ case pde of
SectionPDE x _ _ _ \<Rightarrow> x = addrFromPPtr pptr \<and> pgsz = ARMSection
| SuperSectionPDE x _ _ \<Rightarrow> x = addrFromPPtr pptr \<and> pgsz = ARMSuperSection
| _ \<Rightarrow> False
od"
text {* Raise an exception if a property does not hold. *}
definition
throw_on_false :: "'e \<Rightarrow> (bool,'z::state_ext) s_monad \<Rightarrow> ('e + unit,'z::state_ext) s_monad" where
"throw_on_false ex f \<equiv> doE v \<leftarrow> liftE f; unlessE v $ throwError ex odE"
definition
"last_byte_pte x \<equiv> let pte_bits = 2 in x + ((1 << pte_bits) - 1)"
definition
"last_byte_pde x \<equiv> let pde_bits = 2 in x + ((1 << pde_bits) - 1)"
text {* Unmap a mapped page if the given mapping details are still current. *}
definition
unmap_page :: "vmpage_size \<Rightarrow> asid \<Rightarrow> vspace_ref \<Rightarrow> obj_ref \<Rightarrow> (unit,'z::state_ext) s_monad" where
"unmap_page pgsz asid vptr pptr \<equiv> doE
pd \<leftarrow> find_pd_for_asid asid;
(case pgsz of
ARMSmallPage \<Rightarrow> doE
p \<leftarrow> lookup_pt_slot pd vptr;
throw_on_false undefined $
check_mapping_pptr pptr pgsz (Inl p);
liftE $ do
store_pte p InvalidPTE;
do_machine_op $ cleanByVA_PoU p (addrFromPPtr p)
od
odE
| ARMLargePage \<Rightarrow> doE
p \<leftarrow> lookup_pt_slot pd vptr;
throw_on_false undefined $
check_mapping_pptr pptr pgsz (Inl p);
liftE $ do
assert $ p && mask 6 = 0;
slots \<leftarrow> return (map (\<lambda>x. x + p) [0, 4 .e. 60]);
mapM (swp store_pte InvalidPTE) slots;
do_machine_op $ cleanCacheRange_PoU (hd slots) (last_byte_pte (last slots))
(addrFromPPtr (hd slots))
od
odE
| ARMSection \<Rightarrow> doE
p \<leftarrow> returnOk (lookup_pd_slot pd vptr);
throw_on_false undefined $
check_mapping_pptr pptr pgsz (Inr p);
liftE $ do
store_pde p InvalidPDE;
do_machine_op $ cleanByVA_PoU p (addrFromPPtr p)
od
odE
| ARMSuperSection \<Rightarrow> doE
p \<leftarrow> returnOk (lookup_pd_slot pd vptr);
throw_on_false undefined $
check_mapping_pptr pptr pgsz (Inr p);
liftE $ do
assert $ p && mask 6 = 0;
slots \<leftarrow> return (map (\<lambda>x. x + p) [0, 4 .e. 60]);
mapM (swp store_pde InvalidPDE) slots;
do_machine_op $ cleanCacheRange_PoU (hd slots) (last_byte_pde (last slots))
(addrFromPPtr (hd slots))
od
odE);
liftE $ flush_page pgsz pd asid vptr
odE <catch> (K $ return ())"
text {* PageDirectory and PageTable capabilities cannot be copied until they
have a virtual ASID and location assigned. This is because page directories
cannot have multiple current virtual ASIDs and page tables cannot be shared
between address spaces or virtual locations. *}
definition
arch_derive_cap :: "arch_cap \<Rightarrow> (arch_cap,'z::state_ext) se_monad"
where
"arch_derive_cap c \<equiv> case c of
PageTableCap _ (Some x) \<Rightarrow> returnOk c
| PageTableCap _ None \<Rightarrow> throwError IllegalOperation
| PageDirectoryCap _ (Some x) \<Rightarrow> returnOk c
| PageDirectoryCap _ None \<Rightarrow> throwError IllegalOperation
| PageCap r R pgs x \<Rightarrow> returnOk (PageCap r R pgs None)
| ASIDControlCap \<Rightarrow> returnOk c
| ASIDPoolCap _ _ \<Rightarrow> returnOk c"
text {* No user-modifiable data is stored in ARM-specific capabilities. *}
definition
arch_update_cap_data :: "data \<Rightarrow> arch_cap \<Rightarrow> arch_cap"
where
"arch_update_cap_data data c \<equiv> c"
text {* Actions that must be taken on finalisation of ARM-specific
capabilities. *}
definition
arch_finalise_cap :: "arch_cap \<Rightarrow> bool \<Rightarrow> (cap,'z::state_ext) s_monad"
where
"arch_finalise_cap c x \<equiv> case (c, x) of
(ASIDPoolCap ptr b, True) \<Rightarrow> do
delete_asid_pool b ptr;
return NullCap
od
| (PageDirectoryCap ptr (Some a), True) \<Rightarrow> do
delete_asid a ptr;
return NullCap
od
| (PageTableCap ptr (Some (a, v)), True) \<Rightarrow> do
unmap_page_table a v ptr;
return NullCap
od
| (PageCap ptr _ s (Some (a, v)), _) \<Rightarrow> do
unmap_page s a v ptr;
return NullCap
od
| _ \<Rightarrow> return NullCap"
text {* Remove record of mappings to a page cap, page table cap or page directory cap *}
fun
arch_reset_mem_mapping :: "arch_cap \<Rightarrow> arch_cap"
where
"arch_reset_mem_mapping (PageCap p rts sz mp) = PageCap p rts sz None"
| "arch_reset_mem_mapping (PageTableCap ptr mp) = PageTableCap ptr None"
| "arch_reset_mem_mapping (PageDirectoryCap ptr ma) = PageDirectoryCap ptr None"
| "arch_reset_mem_mapping cap = cap"
text {* Actions that must be taken to recycle ARM-specific capabilities. *}
definition
arch_recycle_cap :: "bool \<Rightarrow> arch_cap \<Rightarrow> (arch_cap,'z::state_ext) s_monad"
where
"arch_recycle_cap is_final cap \<equiv> case cap of
PageCap p _ sz _ \<Rightarrow> do
do_machine_op $ clearMemory p (2 ^ (pageBitsForSize sz));
arch_finalise_cap cap is_final;
return $ arch_reset_mem_mapping cap
od
| PageTableCap ptr mp \<Rightarrow> do
pte_bits \<leftarrow> return 2;
slots \<leftarrow> return [ptr, ptr + (1 << pte_bits) .e. ptr + (1 << pt_bits) - 1];
mapM_x (swp store_pte InvalidPTE) slots;
do_machine_op $ cleanCacheRange_PoU ptr (ptr + (1 << pt_bits) - 1)
(addrFromPPtr ptr);
case mp of None \<Rightarrow> return ()
| Some (a, v) \<Rightarrow> do
pdOpt \<leftarrow> page_table_mapped a v ptr;
when (pdOpt \<noteq> None) $ invalidate_tlb_by_asid a
od;
arch_finalise_cap cap is_final;
return (if is_final then arch_reset_mem_mapping cap else cap)
od
| PageDirectoryCap ptr ma \<Rightarrow> do
pde_bits \<leftarrow> return 2;
indices \<leftarrow> return [0 .e. (kernel_base >> pageBitsForSize ARMSection) - 1];
offsets \<leftarrow> return (map (swp (op <<) pde_bits) indices);
slots \<leftarrow> return (map (\<lambda>x. x + ptr) offsets);
mapM_x (swp store_pde InvalidPDE) slots;
do_machine_op $ cleanCacheRange_PoU ptr (ptr + (1 << pd_bits) - 1)
(addrFromPPtr ptr);
case ma of None \<Rightarrow> return ()
| Some a \<Rightarrow> doE
pd' \<leftarrow> find_pd_for_asid a;
liftE $ when (pd' = ptr) $ invalidate_tlb_by_asid a
odE <catch> K (return ());
arch_finalise_cap cap is_final;
return (if is_final then arch_reset_mem_mapping cap else cap)
od
| ASIDControlCap \<Rightarrow> return ASIDControlCap
| ASIDPoolCap ptr base \<Rightarrow> do
asid_table \<leftarrow> gets (arm_asid_table \<circ> arch_state);
when (asid_table (asid_high_bits_of base) = Some ptr) $ do
delete_asid_pool base ptr;
set_asid_pool ptr empty;
asid_table \<leftarrow> gets (arm_asid_table \<circ> arch_state);
asid_table' \<leftarrow> return (asid_table (asid_high_bits_of base \<mapsto> ptr));
modify (\<lambda>s. s \<lparr> arch_state := (arch_state s) \<lparr> arm_asid_table := asid_table' \<rparr>\<rparr>)
od;
return cap
od"
text {* A thread's virtual address space capability must be to a page directory
to be valid on the ARM architecture. *}
definition
is_valid_vtable_root :: "cap \<Rightarrow> bool" where
"is_valid_vtable_root c \<equiv> \<exists>r a. c = ArchObjectCap (PageDirectoryCap r (Some a))"
text {* A thread's IPC buffer capability must be to a page that is capable of
containing the IPC buffer without the end of the buffer spilling into another
page. *}
definition
cap_transfer_data_size :: nat where
"cap_transfer_data_size \<equiv> 3"
definition
msg_max_length :: nat where
"msg_max_length \<equiv> 120"
definition
msg_max_extra_caps :: nat where
"msg_max_extra_caps \<equiv> 3"
definition
msg_align_bits :: nat
where
"msg_align_bits \<equiv> 2 + (LEAST n. (cap_transfer_data_size + msg_max_length + msg_max_extra_caps + 2) \<le> 2 ^ n)"
lemma msg_align_bits:
"msg_align_bits = 9"
proof -
have "(LEAST n. (cap_transfer_data_size + msg_max_length + msg_max_extra_caps + 2) \<le> 2 ^ n) = 7"
proof (rule Least_equality)
show "(cap_transfer_data_size + msg_max_length + msg_max_extra_caps + 2) \<le> 2 ^ 7"
by (simp add: cap_transfer_data_size_def msg_max_length_def msg_max_extra_caps_def)
next
fix y
assume "(cap_transfer_data_size + msg_max_length + msg_max_extra_caps + 2) \<le> 2 ^ y"
hence "(2 :: nat) ^ 7 \<le> 2 ^ y"
by (simp add: cap_transfer_data_size_def msg_max_length_def msg_max_extra_caps_def)
thus "7 \<le> y"
by (rule power_le_imp_le_exp [rotated], simp)
qed
thus ?thesis unfolding msg_align_bits_def by simp
qed
definition
check_valid_ipc_buffer :: "vspace_ref \<Rightarrow> cap \<Rightarrow> (unit,'z::state_ext) se_monad" where
"check_valid_ipc_buffer vptr c \<equiv> case c of
(ArchObjectCap (PageCap _ _ magnitude _)) \<Rightarrow> doE
whenE (\<not> is_aligned vptr msg_align_bits) $ throwError AlignmentError;
returnOk ()
odE
| _ \<Rightarrow> throwError IllegalOperation"
text {* On the abstract level, capability and VM rights share the same type.
Nevertheless, a simple set intersection might lead to an invalid value like
@{term "{AllowWrite}"}. Hence, @{const validate_vm_rights}. *}
definition
mask_vm_rights :: "vm_rights \<Rightarrow> cap_rights \<Rightarrow> vm_rights" where
"mask_vm_rights V R \<equiv> validate_vm_rights (V \<inter> R)"
text {* Decode a user argument word describing the kind of VM attributes a
mapping is to have. *}
definition
attribs_from_word :: "word32 \<Rightarrow> vm_attributes" where
"attribs_from_word w \<equiv>
let V = (if w !!0 then {PageCacheable} else {});
V' = (if w!!1 then insert ParityEnabled V else V)
in if w!!2 then insert XNever V' else V'"
text {* Update the mapping data saved in a page or page table capability. *}
definition
update_map_data :: "arch_cap \<Rightarrow> (word32 \<times> word32) option \<Rightarrow> arch_cap" where
"update_map_data cap m \<equiv> case cap of PageCap p R sz _ \<Rightarrow> PageCap p R sz m
| PageTableCap p _ \<Rightarrow> PageTableCap p m"
text {* Get information about the frame of a given virtual address *}
definition
resolve_vaddr :: "word32 \<Rightarrow> vspace_ref \<Rightarrow> ((vmpage_size \<times> obj_ref) option, 'z::state_ext) s_monad"
where
"resolve_vaddr pd vaddr \<equiv> do
pd_slot \<leftarrow> return $ lookup_pd_slot pd vaddr;
pde \<leftarrow> get_master_pde pd_slot;
case pde of
SectionPDE f _ _ _ \<Rightarrow> return $ Some (ARMSection, f)
| SuperSectionPDE f _ _ \<Rightarrow> return $ Some (ARMSuperSection, f)
| PageTablePDE t _ _ \<Rightarrow> (do
pt \<leftarrow> return $ ptrFromPAddr t;
pte_slot \<leftarrow> return $ lookup_pt_slot_no_fail pt vaddr;
pte \<leftarrow> get_master_pte pte_slot;
case pte of
LargePagePTE f _ _ \<Rightarrow> return $ Some (ARMLargePage, f)
| SmallPagePTE f _ _ \<Rightarrow> return $ Some (ARMSmallPage, f)
| _ \<Rightarrow> return None
od)
| _ \<Rightarrow> return None
od"
end