-
Notifications
You must be signed in to change notification settings - Fork 1.1k
/
Copy pathboot.c
1497 lines (1305 loc) · 39 KB
/
boot.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// SPDX-License-Identifier: BSD-2-Clause
/*
* Copyright (c) 2015-2023, Linaro Limited
* Copyright (c) 2023, Arm Limited
*/
#include <arm.h>
#include <assert.h>
#include <compiler.h>
#include <config.h>
#include <console.h>
#include <crypto/crypto.h>
#include <drivers/gic.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <ffa.h>
#include <initcall.h>
#include <inttypes.h>
#include <io.h>
#include <keep.h>
#include <kernel/asan.h>
#include <kernel/boot.h>
#include <kernel/dt.h>
#include <kernel/linker.h>
#include <kernel/misc.h>
#include <kernel/panic.h>
#include <kernel/tee_misc.h>
#include <kernel/thread.h>
#include <kernel/tpm.h>
#include <kernel/transfer_list.h>
#include <libfdt.h>
#include <malloc.h>
#include <memtag.h>
#include <mm/core_memprot.h>
#include <mm/core_mmu.h>
#include <mm/fobj.h>
#include <mm/phys_mem.h>
#include <mm/tee_mm.h>
#include <mm/tee_pager.h>
#include <sm/psci.h>
#include <trace.h>
#include <utee_defines.h>
#include <util.h>
#include <platform_config.h>
#if !defined(CFG_WITH_ARM_TRUSTED_FW)
#include <sm/sm.h>
#endif
#if defined(CFG_WITH_VFP)
#include <kernel/vfp.h>
#endif
/*
* In this file we're using unsigned long to represent physical pointers as
* they are received in a single register when OP-TEE is initially entered.
* This limits 32-bit systems to only use make use of the lower 32 bits
* of a physical address for initial parameters.
*
* 64-bit systems on the other hand can use full 64-bit physical pointers.
*/
#define PADDR_INVALID ULONG_MAX
#if defined(CFG_BOOT_SECONDARY_REQUEST)
struct ns_entry_context {
uintptr_t entry_point;
uintptr_t context_id;
};
struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
#endif
#ifdef CFG_BOOT_SYNC_CPU
/*
* Array used when booting, to synchronize cpu.
* When 0, the cpu has not started.
* When 1, it has started
*/
uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
DECLARE_KEEP_PAGER(sem_cpu_sync);
#endif
/*
* Must not be in .bss since it's initialized and used from assembly before
* .bss is cleared.
*/
vaddr_t boot_cached_mem_end __nex_data = 1;
static unsigned long boot_arg_fdt __nex_bss;
unsigned long boot_arg_nsec_entry __nex_bss;
static unsigned long boot_arg_pageable_part __nex_bss;
static unsigned long boot_arg_transfer_list __nex_bss;
static struct transfer_list_header *mapped_tl __nex_bss;
#ifdef CFG_SECONDARY_INIT_CNTFRQ
static uint32_t cntfrq;
#endif
/* May be overridden in plat-$(PLATFORM)/main.c */
__weak void plat_primary_init_early(void)
{
}
DECLARE_KEEP_PAGER(plat_primary_init_early);
/* May be overridden in plat-$(PLATFORM)/main.c */
__weak void boot_primary_init_intc(void)
{
}
/* May be overridden in plat-$(PLATFORM)/main.c */
__weak void boot_secondary_init_intc(void)
{
}
/* May be overridden in plat-$(PLATFORM)/main.c */
__weak unsigned long plat_get_aslr_seed(void)
{
DMSG("Warning: no ASLR seed");
return 0;
}
#if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES)
/* Generate random stack canary value on boot up */
__weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size)
{
TEE_Result ret = TEE_ERROR_GENERIC;
size_t i = 0;
assert(buf && ncan && size);
/*
* With virtualization the RNG is not initialized in Nexus core.
* Need to override with platform specific implementation.
*/
if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
IMSG("WARNING: Using fixed value for stack canary");
memset(buf, 0xab, ncan * size);
goto out;
}
ret = crypto_rng_read(buf, ncan * size);
if (ret != TEE_SUCCESS)
panic("Failed to generate random stack canary");
out:
/* Leave null byte in canary to prevent string base exploit */
for (i = 0; i < ncan; i++)
*((uint8_t *)buf + size * i) = 0;
}
#endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */
/*
* This function is called as a guard after each smc call which is not
* supposed to return.
*/
void __panic_at_smc_return(void)
{
panic();
}
#if defined(CFG_WITH_ARM_TRUSTED_FW)
void init_sec_mon(unsigned long nsec_entry __maybe_unused)
{
assert(nsec_entry == PADDR_INVALID);
/* Do nothing as we don't have a secure monitor */
}
#else
/* May be overridden in plat-$(PLATFORM)/main.c */
__weak void init_sec_mon(unsigned long nsec_entry)
{
struct sm_nsec_ctx *nsec_ctx;
assert(nsec_entry != PADDR_INVALID);
/* Initialize secure monitor */
nsec_ctx = sm_get_nsec_ctx();
nsec_ctx->mon_lr = nsec_entry;
nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
if (nsec_entry & 1)
nsec_ctx->mon_spsr |= CPSR_T;
}
#endif
#if defined(CFG_WITH_ARM_TRUSTED_FW)
static void init_vfp_nsec(void)
{
}
#else
static void init_vfp_nsec(void)
{
/* Normal world can use CP10 and CP11 (SIMD/VFP) */
write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
}
#endif
static void check_crypto_extensions(void)
{
bool ce_supported = true;
if (!feat_aes_implemented() &&
IS_ENABLED(CFG_CRYPTO_AES_ARM_CE)) {
EMSG("AES instructions are not supported");
ce_supported = false;
}
if (!feat_sha1_implemented() &&
IS_ENABLED(CFG_CRYPTO_SHA1_ARM_CE)) {
EMSG("SHA1 instructions are not supported");
ce_supported = false;
}
if (!feat_sha256_implemented() &&
IS_ENABLED(CFG_CRYPTO_SHA256_ARM_CE)) {
EMSG("SHA256 instructions are not supported");
ce_supported = false;
}
/* Check aarch64 specific instructions */
if (IS_ENABLED(CFG_ARM64_core)) {
if (!feat_sha512_implemented() &&
IS_ENABLED(CFG_CRYPTO_SHA512_ARM_CE)) {
EMSG("SHA512 instructions are not supported");
ce_supported = false;
}
if (!feat_sha3_implemented() &&
IS_ENABLED(CFG_CRYPTO_SHA3_ARM_CE)) {
EMSG("SHA3 instructions are not supported");
ce_supported = false;
}
if (!feat_sm3_implemented() &&
IS_ENABLED(CFG_CRYPTO_SM3_ARM_CE)) {
EMSG("SM3 instructions are not supported");
ce_supported = false;
}
if (!feat_sm4_implemented() &&
IS_ENABLED(CFG_CRYPTO_SM4_ARM_CE)) {
EMSG("SM4 instructions are not supported");
ce_supported = false;
}
}
if (!ce_supported)
panic("HW doesn't support CE instructions");
}
#if defined(CFG_WITH_VFP)
#ifdef ARM32
static void init_vfp_sec(void)
{
uint32_t cpacr = read_cpacr();
/*
* Enable Advanced SIMD functionality.
* Enable use of D16-D31 of the Floating-point Extension register
* file.
*/
cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
/*
* Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
* mode.
*/
cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
write_cpacr(cpacr);
}
#endif /* ARM32 */
#ifdef ARM64
static void init_vfp_sec(void)
{
/* Not using VFP until thread_kernel_enable_vfp() */
vfp_disable();
}
#endif /* ARM64 */
#else /* CFG_WITH_VFP */
static void init_vfp_sec(void)
{
/* Not using VFP */
}
#endif
#ifdef CFG_SECONDARY_INIT_CNTFRQ
static void primary_save_cntfrq(void)
{
assert(cntfrq == 0);
/*
* CNTFRQ should be initialized on the primary CPU by a
* previous boot stage
*/
cntfrq = read_cntfrq();
}
static void secondary_init_cntfrq(void)
{
assert(cntfrq != 0);
write_cntfrq(cntfrq);
}
#else /* CFG_SECONDARY_INIT_CNTFRQ */
static void primary_save_cntfrq(void)
{
}
static void secondary_init_cntfrq(void)
{
}
#endif
#ifdef CFG_CORE_SANITIZE_KADDRESS
static void init_run_constructors(void)
{
const vaddr_t *ctor;
for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
((void (*)(void))(*ctor))();
}
static void init_asan(void)
{
/*
* CFG_ASAN_SHADOW_OFFSET is also supplied as
* -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
* Since all the needed values to calculate the value of
* CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
* calculate it in advance and hard code it into the platform
* conf.mk. Here where we have all the needed values we double
* check that the compiler is supplied the correct value.
*/
#define __ASAN_SHADOW_START \
ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
#define __CFG_ASAN_SHADOW_OFFSET \
(__ASAN_SHADOW_START - (TEE_RAM_START / 8))
COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
#undef __ASAN_SHADOW_START
#undef __CFG_ASAN_SHADOW_OFFSET
/*
* Assign area covered by the shadow area, everything from start up
* to the beginning of the shadow area.
*/
asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
/*
* Add access to areas that aren't opened automatically by a
* constructor.
*/
asan_tag_access(&__ctor_list, &__ctor_end);
asan_tag_access(__rodata_start, __rodata_end);
#ifdef CFG_WITH_PAGER
asan_tag_access(__pageable_start, __pageable_end);
#endif /*CFG_WITH_PAGER*/
asan_tag_access(__nozi_start, __nozi_end);
#ifdef ARM32
asan_tag_access(__exidx_start, __exidx_end);
asan_tag_access(__extab_start, __extab_end);
#endif
init_run_constructors();
/* Everything is tagged correctly, let's start address sanitizing. */
asan_start();
}
#else /*CFG_CORE_SANITIZE_KADDRESS*/
static void init_asan(void)
{
}
#endif /*CFG_CORE_SANITIZE_KADDRESS*/
#if defined(CFG_MEMTAG)
/* Called from entry_a64.S only when MEMTAG is configured */
void boot_init_memtag(void)
{
memtag_init_ops(feat_mte_implemented());
}
static TEE_Result mmap_clear_memtag(struct tee_mmap_region *map,
void *ptr __unused)
{
switch (map->type) {
case MEM_AREA_NEX_RAM_RO:
case MEM_AREA_SEC_RAM_OVERALL:
DMSG("Clearing tags for VA %#"PRIxVA"..%#"PRIxVA,
map->va, map->va + map->size - 1);
memtag_set_tags((void *)map->va, map->size, 0);
break;
default:
break;
}
return TEE_SUCCESS;
}
/* Called from entry_a64.S only when MEMTAG is configured */
void boot_clear_memtag(void)
{
core_mmu_for_each_map(NULL, mmap_clear_memtag);
}
#endif
#ifdef CFG_WITH_PAGER
#ifdef CFG_CORE_SANITIZE_KADDRESS
static void carve_out_asan_mem(void)
{
nex_phys_mem_partial_carve_out(ASAN_MAP_PA, ASAN_MAP_SZ);
}
#else
static void carve_out_asan_mem(void)
{
}
#endif
static void print_pager_pool_size(void)
{
struct tee_pager_stats __maybe_unused stats;
tee_pager_get_stats(&stats);
IMSG("Pager pool size: %zukB",
stats.npages_all * SMALL_PAGE_SIZE / 1024);
}
static void init_virt_pool(tee_mm_pool_t *virt_pool)
{
const vaddr_t begin = VCORE_START_VA;
size_t size = TEE_RAM_VA_SIZE;
#ifdef CFG_CORE_SANITIZE_KADDRESS
/* Carve out asan memory, flat maped after core memory */
if (begin + size > ASAN_SHADOW_PA)
size = ASAN_MAP_PA - begin;
#endif
if (!tee_mm_init(virt_pool, begin, size, SMALL_PAGE_SHIFT,
TEE_MM_POOL_NO_FLAGS))
panic("core_virt_mem_pool init failed");
}
/*
* With CFG_CORE_ASLR=y the init part is relocated very early during boot.
* The init part is also paged just as the rest of the normal paged code, with
* the difference that it's preloaded during boot. When the backing store
* is configured the entire paged binary is copied in place and then also
* the init part. Since the init part has been relocated (references to
* addresses updated to compensate for the new load address) this has to be
* undone for the hashes of those pages to match with the original binary.
*
* If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
* unchanged.
*/
static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
{
#ifdef CFG_CORE_ASLR
unsigned long *ptr = NULL;
const uint32_t *reloc = NULL;
const uint32_t *reloc_end = NULL;
unsigned long offs = boot_mmu_config.map_offset;
const struct boot_embdata *embdata = (const void *)__init_end;
vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
for (; reloc < reloc_end; reloc++) {
if (*reloc < addr_start)
continue;
if (*reloc >= addr_end)
break;
ptr = (void *)(paged_store + *reloc - addr_start);
*ptr -= offs;
}
#endif
}
static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
void *store)
{
const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
#ifdef CFG_CORE_ASLR
unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
const struct boot_embdata *embdata = (const void *)__init_end;
const void *reloc = __init_end + embdata->reloc_offset;
return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
reloc, embdata->reloc_len, store);
#else
return fobj_ro_paged_alloc(num_pages, hashes, store);
#endif
}
static void init_pager_runtime(unsigned long pageable_part)
{
size_t n;
size_t init_size = (size_t)(__init_end - __init_start);
size_t pageable_start = (size_t)__pageable_start;
size_t pageable_end = (size_t)__pageable_end;
size_t pageable_size = pageable_end - pageable_start;
vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
VCORE_START_VA;
size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
TEE_SHA256_HASH_SIZE;
const struct boot_embdata *embdata = (const void *)__init_end;
const void *tmp_hashes = NULL;
tee_mm_entry_t *mm = NULL;
struct fobj *fobj = NULL;
uint8_t *paged_store = NULL;
uint8_t *hashes = NULL;
assert(pageable_size % SMALL_PAGE_SIZE == 0);
assert(embdata->total_len >= embdata->hashes_offset +
embdata->hashes_len);
assert(hash_size == embdata->hashes_len);
tmp_hashes = __init_end + embdata->hashes_offset;
/*
* This needs to be initialized early to support address lookup
* in MEM_AREA_TEE_RAM
*/
tee_pager_early_init();
hashes = malloc(hash_size);
IMSG_RAW("\n");
IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
assert(hashes);
asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
/*
* The pager is about the be enabled below, eventual temporary boot
* memory allocation must be removed now.
*/
boot_mem_release_tmp_alloc();
carve_out_asan_mem();
mm = nex_phys_mem_ta_alloc(pageable_size);
assert(mm);
paged_store = phys_to_virt(tee_mm_get_smem(mm),
MEM_AREA_SEC_RAM_OVERALL, pageable_size);
/*
* Load pageable part in the dedicated allocated area:
* - Move pageable non-init part into pageable area. Note bootloader
* may have loaded it anywhere in TA RAM hence use memmove().
* - Copy pageable init part from current location into pageable area.
*/
memmove(paged_store + init_size,
phys_to_virt(pageable_part,
core_mmu_get_type_by_pa(pageable_part),
__pageable_part_end - __pageable_part_start),
__pageable_part_end - __pageable_part_start);
asan_memcpy_unchecked(paged_store, __init_start, init_size);
/*
* Undo eventual relocation for the init part so the hash checks
* can pass.
*/
undo_init_relocation(paged_store);
/* Check that hashes of what's in pageable area is OK */
DMSG("Checking hashes of pageable area");
for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
TEE_Result res;
DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
if (res != TEE_SUCCESS) {
EMSG("Hash failed for page %zu at %p: res 0x%x",
n, (void *)page, res);
panic();
}
}
/*
* Assert prepaged init sections are page aligned so that nothing
* trails uninited at the end of the premapped init area.
*/
assert(!(init_size & SMALL_PAGE_MASK));
/*
* Initialize the virtual memory pool used for main_mmu_l2_ttb which
* is supplied to tee_pager_init() below.
*/
init_virt_pool(&core_virt_mem_pool);
/*
* Assign alias area for pager end of the small page block the rest
* of the binary is loaded into. We're taking more than needed, but
* we're guaranteed to not need more than the physical amount of
* TZSRAM.
*/
mm = tee_mm_alloc2(&core_virt_mem_pool,
(vaddr_t)core_virt_mem_pool.lo +
core_virt_mem_pool.size - TZSRAM_SIZE,
TZSRAM_SIZE);
assert(mm);
tee_pager_set_alias_area(mm);
/*
* Claim virtual memory which isn't paged.
* Linear memory (flat map core memory) ends there.
*/
mm = tee_mm_alloc2(&core_virt_mem_pool, VCORE_UNPG_RX_PA,
(vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
assert(mm);
/*
* Allocate virtual memory for the pageable area and let the pager
* take charge of all the pages already assigned to that memory.
*/
mm = tee_mm_alloc2(&core_virt_mem_pool, (vaddr_t)__pageable_start,
pageable_size);
assert(mm);
fobj = ro_paged_alloc(mm, hashes, paged_store);
assert(fobj);
tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
fobj);
fobj_put(fobj);
tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
tee_pager_add_pages(pageable_start + init_size,
(pageable_size - init_size) / SMALL_PAGE_SIZE,
true);
if (pageable_end < tzsram_end)
tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
SMALL_PAGE_SIZE, true);
/*
* There may be physical pages in TZSRAM before the core load address.
* These pages can be added to the physical pages pool of the pager.
* This setup may happen when a the secure bootloader runs in TZRAM
* and its memory can be reused by OP-TEE once boot stages complete.
*/
tee_pager_add_pages(core_virt_mem_pool.lo,
(VCORE_UNPG_RX_PA - core_virt_mem_pool.lo) /
SMALL_PAGE_SIZE,
true);
print_pager_pool_size();
}
#else /*!CFG_WITH_PAGER*/
static void init_pager_runtime(unsigned long pageable_part __unused)
{
}
#endif
#if defined(CFG_DT)
static int add_optee_dt_node(struct dt_descriptor *dt)
{
int offs;
int ret;
if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
DMSG("OP-TEE Device Tree node already exists!");
return 0;
}
offs = fdt_path_offset(dt->blob, "/firmware");
if (offs < 0) {
offs = add_dt_path_subnode(dt, "/", "firmware");
if (offs < 0)
return -1;
}
offs = fdt_add_subnode(dt->blob, offs, "optee");
if (offs < 0)
return -1;
ret = fdt_setprop_string(dt->blob, offs, "compatible",
"linaro,optee-tz");
if (ret < 0)
return -1;
ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
if (ret < 0)
return -1;
if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
/*
* The format of the interrupt property is defined by the
* binding of the interrupt domain root. In this case it's
* one Arm GIC v1, v2 or v3 so we must be compatible with
* these.
*
* An SPI type of interrupt is indicated with a 0 in the
* first cell. A PPI type is indicated with value 1.
*
* The interrupt number goes in the second cell where
* SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
*
* Flags are passed in the third cells.
*/
uint32_t itr_trigger = 0;
uint32_t itr_type = 0;
uint32_t itr_id = 0;
uint32_t val[3] = { };
/* PPI are visible only in current CPU cluster */
static_assert(IS_ENABLED(CFG_CORE_FFA) ||
!CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
(CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
GIC_SPI_BASE) ||
((CFG_TEE_CORE_NB_CORE <= 8) &&
(CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
GIC_PPI_BASE)));
if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
itr_type = GIC_SPI;
itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
itr_trigger = IRQ_TYPE_EDGE_RISING;
} else {
itr_type = GIC_PPI;
itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
itr_trigger = IRQ_TYPE_EDGE_RISING |
GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
}
val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
ret = fdt_setprop(dt->blob, offs, "interrupts", val,
sizeof(val));
if (ret < 0)
return -1;
}
return 0;
}
#ifdef CFG_PSCI_ARM32
static int append_psci_compatible(void *fdt, int offs, const char *str)
{
return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
}
static int dt_add_psci_node(struct dt_descriptor *dt)
{
int offs;
if (fdt_path_offset(dt->blob, "/psci") >= 0) {
DMSG("PSCI Device Tree node already exists!");
return 0;
}
offs = add_dt_path_subnode(dt, "/", "psci");
if (offs < 0)
return -1;
if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
return -1;
if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
return -1;
if (append_psci_compatible(dt->blob, offs, "arm,psci"))
return -1;
if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
return -1;
if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
return -1;
if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
return -1;
if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
return -1;
if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
return -1;
if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
return -1;
return 0;
}
static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
const char *prefix)
{
const size_t prefix_len = strlen(prefix);
size_t l;
int plen;
const char *prop;
prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
if (!prop)
return -1;
while (plen > 0) {
if (memcmp(prop, prefix, prefix_len) == 0)
return 0; /* match */
l = strlen(prop) + 1;
prop += l;
plen -= l;
}
return -1;
}
static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
{
int offs = 0;
while (1) {
offs = fdt_next_node(dt->blob, offs, NULL);
if (offs < 0)
break;
if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
continue; /* already set */
if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
continue; /* no compatible */
if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
return -1;
/* Need to restart scanning as offsets may have changed */
offs = 0;
}
return 0;
}
static int config_psci(struct dt_descriptor *dt)
{
if (dt_add_psci_node(dt))
return -1;
return dt_add_psci_cpu_enable_methods(dt);
}
#else
static int config_psci(struct dt_descriptor *dt __unused)
{
return 0;
}
#endif /*CFG_PSCI_ARM32*/
static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
{
return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
CFG_TZDRAM_SIZE);
}
static void update_external_dt(void)
{
struct dt_descriptor *dt = get_external_dt_desc();
if (!dt || !dt->blob)
return;
if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
panic("Failed to add OP-TEE Device Tree node");
if (config_psci(dt))
panic("Failed to config PSCI");
#ifdef CFG_CORE_RESERVED_SHM
if (mark_static_shm_as_reserved(dt))
panic("Failed to config non-secure memory");
#endif
if (mark_tzdram_as_reserved(dt))
panic("Failed to config secure memory");
}
#else /*CFG_DT*/
static void update_external_dt(void)
{
}
#endif /*!CFG_DT*/
void init_tee_runtime(void)
{
/*
* With virtualization we call this function when creating the
* OP-TEE partition instead.
*/
if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
call_preinitcalls();
call_early_initcalls();
call_service_initcalls();
/*
* These two functions uses crypto_rng_read() to initialize the
* pauth keys. Once call_initcalls() returns we're guaranteed that
* crypto_rng_read() is ready to be used.
*/
thread_init_core_local_pauth_keys();
thread_init_thread_pauth_keys();
/*
* Reinitialize canaries around the stacks with crypto_rng_read().
*
* TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
* require synchronization between thread_check_canaries() and
* thread_update_canaries().
*/
if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
thread_update_canaries();
}
static bool add_padding_to_pool(vaddr_t va, size_t len, void *ptr __unused)
{
#ifdef CFG_NS_VIRTUALIZATION
nex_malloc_add_pool((void *)va, len);
#else
malloc_add_pool((void *)va, len);
#endif
return true;
}
static void init_primary(unsigned long pageable_part)
{
vaddr_t va = 0;
/*
* Mask asynchronous exceptions before switch to the thread vector
* as the thread handler requires those to be masked while
* executing with the temporary stack. The thread subsystem also
* asserts that the foreign interrupts are blocked when using most of
* its functions.
*/
thread_set_exceptions(THREAD_EXCP_ALL);
primary_save_cntfrq();
init_vfp_sec();
if (IS_ENABLED(CFG_CRYPTO_WITH_CE))
check_crypto_extensions();
init_asan();
/*
* By default whole OP-TEE uses malloc, so we need to initialize
* it early. But, when virtualization is enabled, malloc is used
* only by TEE runtime, so malloc should be initialized later, for
* every virtual partition separately. Core code uses nex_malloc
* instead.
*/
#ifdef CFG_WITH_PAGER
/* Add heap2 first as heap1 may be too small as initial bget pool */
malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
#endif
#ifdef CFG_NS_VIRTUALIZATION
nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
__nex_heap_start);
#else
malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
#endif
IMSG_RAW("\n");
core_mmu_save_mem_map();
core_mmu_init_phys_mem();
boot_mem_foreach_padding(add_padding_to_pool, NULL);
va = boot_mem_release_unused();
if (!IS_ENABLED(CFG_WITH_PAGER)) {
/*
* We must update boot_cached_mem_end to reflect the memory
* just unmapped by boot_mem_release_unused().
*/
assert(va && va <= boot_cached_mem_end);
boot_cached_mem_end = va;
}
if (IS_ENABLED(CFG_WITH_PAGER)) {
/*
* Pager: init_runtime() calls thread_kernel_enable_vfp()
* so we must set a current thread right now to avoid a
* chicken-and-egg problem (thread_init_boot_thread() sets
* the current thread but needs things set by
* init_runtime()).
*/
thread_get_core_local()->curr_thread = 0;
init_pager_runtime(pageable_part);
}
thread_init_primary();
thread_init_per_cpu();
}
static bool cpu_nmfi_enabled(void)
{
#if defined(ARM32)
return read_sctlr() & SCTLR_NMFI;
#else
/* Note: ARM64 does not feature non-maskable FIQ support. */
return false;
#endif
}
/*
* Note: this function is weak just to make it possible to exclude it from
* the unpaged area.
*/
void __weak boot_init_primary_late(unsigned long fdt __unused,
unsigned long manifest __unused)
{
size_t fdt_size = CFG_DTB_MAX_SIZE;
if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) {
struct transfer_list_entry *tl_e = NULL;
tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
if (tl_e) {
/*