Skip to content

Large tee ram va size #1669

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 10 commits into from
Aug 29, 2017
4 changes: 4 additions & 0 deletions core/arch/arm/include/mm/core_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,10 @@
*/
#ifdef CFG_WITH_LPAE
#define CORE_MMU_PGDIR_SHIFT 21
#define CORE_MMU_PGDIR_LEVEL 3
#else
#define CORE_MMU_PGDIR_SHIFT 20
#define CORE_MMU_PGDIR_LEVEL 2
#endif
#define CORE_MMU_PGDIR_SIZE (1 << CORE_MMU_PGDIR_SHIFT)
#define CORE_MMU_PGDIR_MASK (CORE_MMU_PGDIR_SIZE - 1)
Expand Down Expand Up @@ -111,6 +113,7 @@ enum teecore_memtypes {
MEM_AREA_RES_VASPACE,
MEM_AREA_SHM_VASPACE,
MEM_AREA_TA_VASPACE,
MEM_AREA_PAGER_VASPACE,
MEM_AREA_SDP_MEM,
MEM_AREA_MAXTYPE
};
Expand All @@ -133,6 +136,7 @@ static inline const char *teecore_memtype_name(enum teecore_memtypes type)
[MEM_AREA_RES_VASPACE] = "RES_VASPACE",
[MEM_AREA_SHM_VASPACE] = "SHM_VASPACE",
[MEM_AREA_TA_VASPACE] = "TA_VASPACE",
[MEM_AREA_PAGER_VASPACE] = "PAGER_VASPACE",
[MEM_AREA_SDP_MEM] = "SDP_MEM",
};

Expand Down
35 changes: 27 additions & 8 deletions core/arch/arm/include/mm/tee_pager.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,16 +33,37 @@
#include <kernel/panic.h>
#include <kernel/user_ta.h>
#include <mm/tee_mm.h>
#include <mm/core_mmu.h>
#include <string.h>
#include <trace.h>

struct tee_pager_area_head;

/*
* Reference to translation table used to map the virtual memory range
* covered by the pager.
* tee_pager_early_init() - Perform early initialization of pager
*
* Panics if some error occurs
*/
extern struct core_mmu_table_info tee_pager_tbl_info;
void tee_pager_early_init(void);

struct tee_pager_area_head;
/*
* tee_pager_get_table_info() - Fills in table info for address mapped in
* translation table managed by the pager.
* @va: address to look up
* @ti: filled in table info
*
* Returns true if address is in the pager translation tables else false
*/
bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti);

/*
* tee_pager_phys_to_virt() - Translate physical address to virtual address
* looking in the pager page tables
* @pa: address to translate
*
* Returns found virtual address or NULL on error
*/
void *tee_pager_phys_to_virt(paddr_t pa);

/*
* tee_pager_init() - Initialized the pager
Expand Down Expand Up @@ -72,11 +93,9 @@ void tee_pager_init(tee_mm_entry_t *mm_alias);
*
* Invalid use of flags or non-page aligned base or size or size == 0 will
* cause a panic.
*
* Return true on success or false if area can't be added.
*/
bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
const void *store, const void *hashes);
void tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
const void *store, const void *hashes);

/*
* tee_pager_add_uta_area() - Adds a pageable user ta area
Expand Down
38 changes: 6 additions & 32 deletions core/arch/arm/kernel/generic_boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -179,22 +179,6 @@ static void init_vfp_sec(void)
#endif

#ifdef CFG_WITH_PAGER

static size_t get_block_size(void)
{
struct core_mmu_table_info tbl_info;
unsigned l;

if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX, &tbl_info))
panic("can't find mmu tables");

l = tbl_info.level - 1;
if (!core_mmu_find_table(CFG_TEE_RAM_START, l, &tbl_info))
panic("can't find mmu table upper level");

return 1 << tbl_info.shift;
}

static void init_runtime(unsigned long pageable_part)
{
size_t n;
Expand All @@ -205,7 +189,6 @@ static void init_runtime(unsigned long pageable_part)
tee_mm_entry_t *mm;
uint8_t *paged_store;
uint8_t *hashes;
size_t block_size;

assert(pageable_size % SMALL_PAGE_SIZE == 0);
assert(hash_size == (size_t)__tmp_hashes_size);
Expand All @@ -214,12 +197,7 @@ static void init_runtime(unsigned long pageable_part)
* This needs to be initialized early to support address lookup
* in MEM_AREA_TEE_RAM
*/
if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX,
&tee_pager_tbl_info))
panic("can't find mmu tables");

if (tee_pager_tbl_info.shift != SMALL_PAGE_SHIFT)
panic("Unsupported page size in translation table");
tee_pager_early_init();

thread_init_boot_thread();

Expand Down Expand Up @@ -279,12 +257,9 @@ static void init_runtime(unsigned long pageable_part)
* Initialize the virtual memory pool used for main_mmu_l2_ttb which
* is supplied to tee_pager_init() below.
*/
block_size = get_block_size();
if (!tee_mm_init(&tee_mm_vcore,
ROUNDDOWN(CFG_TEE_RAM_START, block_size),
ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
block_size),
SMALL_PAGE_SHIFT, 0))
if (!tee_mm_init(&tee_mm_vcore, CFG_TEE_RAM_START,
CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
SMALL_PAGE_SHIFT, 0))
panic("tee_mm_vcore init failed");

/*
Expand Down Expand Up @@ -315,9 +290,8 @@ static void init_runtime(unsigned long pageable_part)
mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
pageable_size);
assert(mm);
if (!tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
TEE_MATTR_PRX, paged_store, hashes))
panic("failed to add pageable to vcore");
tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
TEE_MATTR_PRX, paged_store, hashes);

tee_pager_add_pages((vaddr_t)__pageable_start,
init_size / SMALL_PAGE_SIZE, false);
Expand Down
90 changes: 53 additions & 37 deletions core/arch/arm/mm/core_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -615,6 +615,8 @@ uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
case MEM_AREA_RES_VASPACE:
case MEM_AREA_SHM_VASPACE:
return 0;
case MEM_AREA_PAGER_VASPACE:
return TEE_MATTR_SECURE;
default:
panic("invalid type");
}
Expand Down Expand Up @@ -694,14 +696,47 @@ static void dump_mmap_table(struct tee_mmap_region *memory_map)
}
}

static void add_pager_vaspace(struct tee_mmap_region *mmap, size_t num_elems,
vaddr_t begin, vaddr_t *end, size_t *last)
{
size_t size = CFG_TEE_RAM_VA_SIZE - (*end - begin);
size_t n;
size_t pos = 0;

if (!size)
return;

if (*last >= (num_elems - 1)) {
EMSG("Out of entries (%zu) in memory map", num_elems);
panic();
}

for (n = 0; !core_mmap_is_end_of_table(mmap + n); n++)
if (map_is_flat_mapped(mmap + n))
pos = n + 1;

assert(pos <= *last);
memmove(mmap + pos + 1, mmap + pos,
sizeof(struct tee_mmap_region) * (*last - pos));
(*last)++;
memset(mmap + pos, 0, sizeof(mmap[0]));
mmap[pos].type = MEM_AREA_PAGER_VASPACE;
mmap[pos].va = *end;
mmap[pos].size = size;
mmap[pos].region_size = SMALL_PAGE_SIZE;
mmap[pos].attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE);

*end += size;
Copy link
Contributor Author

@jenswi-linaro jenswi-linaro Aug 24, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This needs to be updated regardless if pager is enabled or not.

Edit: Maybe not strictly necessary as no code depends on that part of the pgdir being unused.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree with the edited comment: when pager is disable, CFG_TEE_RAM_VA_SIZE is meaningless.
Hmm... maybe usefull also for the kasan ?

However, some platform do define CFG_TEE_RAM_VA_SIZE and some piece of code use it. Ok to not condition the sequence here and postponed cleaning of CFG_TEE_RAM_VA_SIZE to later if needed.

}

static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems)
{
const struct core_mmu_phys_mem *mem;
struct tee_mmap_region *map;
size_t last = 0;
size_t __maybe_unused count = 0;
vaddr_t va;
vaddr_t __maybe_unused end;
vaddr_t end;
bool __maybe_unused va_is_secure = true; /* any init value fits */

for (mem = &__start_phys_mem_map_section;
Expand Down Expand Up @@ -785,8 +820,9 @@ static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems)

/*
* Map flat mapped addresses first.
* 'va' will store the lower address of the flat-mapped areas to later
* setup the virtual mapping of the non flat-mapped areas.
* 'va' (resp. 'end') will store the lower (reps. higher) address of
* the flat-mapped areas to later setup the virtual mapping of the non
* flat-mapped areas.
*/
va = (vaddr_t)~0UL;
end = 0;
Expand All @@ -802,10 +838,14 @@ static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems)
assert(va >= CFG_TEE_RAM_START);
assert(end <= CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE);

add_pager_vaspace(memory_map, num_elems, va, &end, &last);

assert(!((va | end) & SMALL_PAGE_MASK));

if (core_mmu_place_tee_ram_at_top(va)) {
/* Map non-flat mapped addresses below flat mapped addresses */
for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
if (map_is_flat_mapped(map))
if (map->va)
continue;

#if !defined(CFG_WITH_LPAE)
Expand All @@ -825,9 +865,9 @@ static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems)
}
} else {
/* Map non-flat mapped addresses above flat mapped addresses */
va = ROUNDUP(va + CFG_TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE);
va = end;
for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
if (map_is_flat_mapped(map))
if (map->va)
continue;

#if !defined(CFG_WITH_LPAE)
Expand Down Expand Up @@ -898,6 +938,7 @@ void core_init_mmu_map(void)
case MEM_AREA_RAM_NSEC:
case MEM_AREA_RES_VASPACE:
case MEM_AREA_SHM_VASPACE:
case MEM_AREA_PAGER_VASPACE:
break;
default:
EMSG("Uhandled memtype %d", map->type);
Expand Down Expand Up @@ -1488,6 +1529,7 @@ static void check_pa_matches_va(void *va, paddr_t pa)
TEE_Result res;
vaddr_t v = (vaddr_t)va;
paddr_t p = 0;
struct core_mmu_table_info ti __maybe_unused;

if (core_mmu_user_va_range_is_defined()) {
vaddr_t user_va_base;
Expand Down Expand Up @@ -1517,9 +1559,8 @@ static void check_pa_matches_va(void *va, paddr_t pa)
panic("issue in linear address space");
return;
}
if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
struct core_mmu_table_info *ti = &tee_pager_tbl_info;

if (tee_pager_get_table_info(v, &ti)) {
uint32_t a;

/*
Expand All @@ -1528,9 +1569,9 @@ static void check_pa_matches_va(void *va, paddr_t pa)
* changes all the time. But some ranges are safe,
* rw-locked areas when the page is populated for instance.
*/
core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a);
if (a & TEE_MATTR_VALID_BLOCK) {
paddr_t mask = ((1 << ti->shift) - 1);
paddr_t mask = ((1 << ti.shift) - 1);

p |= v & mask;
if (pa != p)
Expand Down Expand Up @@ -1596,34 +1637,9 @@ static void *phys_to_virt_ta_vaspace(paddr_t pa)
#ifdef CFG_WITH_PAGER
static void *phys_to_virt_tee_ram(paddr_t pa)
{
struct core_mmu_table_info *ti = &tee_pager_tbl_info;
unsigned idx;
unsigned end_idx;
uint32_t a;
paddr_t p;

if (pa >= CFG_TEE_LOAD_ADDR && pa < get_linear_map_end())
return (void *)(vaddr_t)pa;

end_idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START +
CFG_TEE_RAM_VA_SIZE);
/* Most addresses are mapped lineary, try that first if possible. */
idx = core_mmu_va2idx(ti, pa);
if (idx >= core_mmu_va2idx(ti, CFG_TEE_RAM_START) &&
idx < end_idx) {
core_mmu_get_entry(ti, idx, &p, &a);
if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
return (void *)core_mmu_idx2va(ti, idx);
}

for (idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START);
idx < end_idx; idx++) {
core_mmu_get_entry(ti, idx, &p, &a);
if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
return (void *)core_mmu_idx2va(ti, idx);
}

return NULL;
return tee_pager_phys_to_virt(pa);
}
#else
static void *phys_to_virt_tee_ram(paddr_t pa)
Expand Down
20 changes: 8 additions & 12 deletions core/arch/arm/mm/core_mmu_lpae.c
Original file line number Diff line number Diff line change
Expand Up @@ -315,12 +315,6 @@ static uint64_t mattr_to_desc(unsigned level, uint32_t attr)
return desc;
}

static uint64_t mmap_desc(uint32_t attr, uint64_t addr_pa,
unsigned level)
{
return mattr_to_desc(level, attr) | addr_pa;
}

static int mmap_region_attr(struct tee_mmap_region *mm, uint64_t base_va,
uint64_t size)
{
Expand Down Expand Up @@ -383,14 +377,17 @@ static struct tee_mmap_region *init_xlation_table(struct tee_mmap_region *mm,
level * 2, "", base_va, level_size);
} else if (mm->va <= base_va &&
mm->va + mm->size >= base_va + level_size &&
!(mm->pa & (level_size - 1))) {
!((mm->pa | mm->region_size) & (level_size - 1))) {
/* Next region covers all of area */
int attr = mmap_region_attr(mm, base_va, level_size);

if (attr >= 0) {
desc = mmap_desc(attr,
base_va - mm->va + mm->pa,
level);
desc = mattr_to_desc(level, attr);
if (desc)
desc |= base_va - mm->va + mm->pa;
}

if (desc != UNSET_DESC && desc)
debug_print("%*s%010" PRIx64 " %8x %s-%s-%s-%s",
level * 2, "", base_va, level_size,
attr & (TEE_MATTR_CACHE_CACHED <<
Expand All @@ -399,10 +396,9 @@ static struct tee_mmap_region *init_xlation_table(struct tee_mmap_region *mm,
attr & TEE_MATTR_PW ? "RW" : "RO",
attr & TEE_MATTR_PX ? "X" : "XN",
attr & TEE_MATTR_SECURE ? "S" : "NS");
} else {
else
debug_print("%*s%010" PRIx64 " %8x",
level * 2, "", base_va, level_size);
}
}
/* else Next region only partially covers area, so need */

Expand Down
Loading