diff --git a/core/arch/arm/include/arm32.h b/core/arch/arm/include/arm32.h index 45890135049..109e64c3dd9 100644 --- a/core/arch/arm/include/arm32.h +++ b/core/arch/arm/include/arm32.h @@ -33,6 +33,12 @@ #include #include +#define CORTEX_A7_PART_NUM 0xC07 +#define CORTEX_A9_PART_NUM 0xC09 + +#define MIDR_PRIMARY_PART_NUM_SHIFT 4 +#define MIDR_PRIMARY_PART_NUM_WIDTH 12 + #define CPSR_MODE_MASK ARM32_CPSR_MODE_MASK #define CPSR_MODE_USR ARM32_CPSR_MODE_USR #define CPSR_MODE_FIQ ARM32_CPSR_MODE_FIQ diff --git a/core/arch/arm/include/arm32_macros.S b/core/arch/arm/include/arm32_macros.S index f3d821d4fd9..f9093834f9a 100644 --- a/core/arch/arm/include/arm32_macros.S +++ b/core/arch/arm/include/arm32_macros.S @@ -27,6 +27,10 @@ /* Please keep them sorted based on the CRn register */ + .macro read_midr reg + mrc p15, 0, \reg, c0, c0, 0 + .endm + .macro read_ctr reg mrc p15, 0, \reg, c0, c0, 1 .endm diff --git a/core/arch/arm/include/kernel/thread.h b/core/arch/arm/include/kernel/thread.h index 7a0bb6a1957..1df7d0ef131 100644 --- a/core/arch/arm/include/kernel/thread.h +++ b/core/arch/arm/include/kernel/thread.h @@ -62,6 +62,7 @@ struct thread_core_local { uint32_t flags; vaddr_t abt_stack_va_end; #ifdef ARM32 + paddr_t sm_pm_ctx_phys; uint32_t r[2]; #endif #ifdef ARM64 diff --git a/core/arch/arm/include/sm/pm.h b/core/arch/arm/include/sm/pm.h new file mode 100644 index 00000000000..7048e2b5edc --- /dev/null +++ b/core/arch/arm/include/sm/pm.h @@ -0,0 +1,50 @@ +/* + * Copyright 2017 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef SM_PM_H +#define SM_PM_H +#include +#include + +struct sm_pm_ctx { + uint32_t sp; + paddr_t cpu_resume_addr; + uint32_t suspend_regs[16]; +}; + +/* suspend/resume core functions */ +void sm_pm_cpu_suspend_save(struct sm_pm_ctx *ptr, uint32_t sp); +void sm_pm_cpu_do_suspend(uint32_t *ptr); +void sm_pm_cpu_do_resume(void); + +/* + * Exported to platform suspend, arg will be passed to fn as r0 + * Return value: 0 - cpu resumed from suspended state. + * -1 - cpu not suspended. + */ +int sm_pm_cpu_suspend(uint32_t arg, int (*fn)(uint32_t)); +#endif diff --git a/core/arch/arm/include/sm/sm.h b/core/arch/arm/include/sm/sm.h index 3446506fba4..320e26b2d3f 100644 --- a/core/arch/arm/include/sm/sm.h +++ b/core/arch/arm/include/sm/sm.h @@ -134,4 +134,6 @@ static inline bool sm_platform_handler(__unused struct sm_ctx *ctx) bool sm_platform_handler(struct sm_ctx *ctx); #endif +void sm_save_modes_regs(struct sm_mode_regs *regs); +void sm_restore_modes_regs(struct sm_mode_regs *regs); #endif /*SM_SM_H*/ diff --git a/core/arch/arm/kernel/asm-defines.c b/core/arch/arm/kernel/asm-defines.c index 1e3ed4037e4..a08ced3d4ca 100644 --- a/core/arch/arm/kernel/asm-defines.c +++ b/core/arch/arm/kernel/asm-defines.c @@ -26,6 +26,7 @@ */ #include +#include #include #include #include "thread_private.h" @@ -55,6 +56,11 @@ DEFINES /* struct thread_core_local */ DEFINE(THREAD_CORE_LOCAL_R0, offsetof(struct thread_core_local, r[0])); + DEFINE(THREAD_CORE_LOCAL_SM_PM_CTX_PHYS, + offsetof(struct thread_core_local, sm_pm_ctx_phys)); + DEFINE(THREAD_CORE_LOCAL_SIZE, sizeof(struct thread_core_local)); + + DEFINE(SM_PM_CTX_SIZE, sizeof(struct sm_pm_ctx)); #endif /*ARM32*/ #ifdef ARM64 diff --git a/core/arch/arm/sm/pm.c b/core/arch/arm/sm/pm.c new file mode 100644 index 00000000000..cc97dd5debe --- /dev/null +++ b/core/arch/arm/sm/pm.c @@ -0,0 +1,72 @@ +/* + * Copyright 2017 NXP + * + * Peng Fan + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if CFG_TEE_CORE_NB_CORE > 4 +#error "Max support 4 cores in one cluster now" +#endif + +void sm_pm_cpu_suspend_save(struct sm_pm_ctx *ctx, uint32_t sp) +{ + struct thread_core_local *p = thread_get_core_local(); + + p->sm_pm_ctx_phys = virt_to_phys((void *)ctx); + + /* The content will be passed to sm_pm_cpu_do_resume as register sp */ + ctx->sp = sp; + ctx->cpu_resume_addr = + virt_to_phys((void *)(vaddr_t)sm_pm_cpu_do_resume); + + sm_pm_cpu_do_suspend(ctx->suspend_regs); + + dcache_op_level1(DCACHE_OP_CLEAN_INV); + +#ifdef CFG_PL310 + arm_cl2_cleanbyway(core_mmu_get_va(PL310_BASE, MEM_AREA_IO_SEC)); +#endif +} diff --git a/core/arch/arm/sm/pm_a32.S b/core/arch/arm/sm/pm_a32.S new file mode 100644 index 00000000000..1540cd7da07 --- /dev/null +++ b/core/arch/arm/sm/pm_a32.S @@ -0,0 +1,225 @@ +/* + * Copyright 2017 NXP + * + * Peng Fan + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +.section .text + +/* + * int sm_pm_cpu_suspend(uint32_t arg, int (*fn)(uint32_t)) + * @arg will be passed to fn as argument + * return value: 0 - cpu resumed from suspended state. + * -1 - cpu not suspended. + */ +FUNC sm_pm_cpu_suspend, : +UNWIND( .fnstart) +UNWIND( .cantunwind) + push {r4 - r12, lr} + mov r5, sp + sub sp, sp, #SM_PM_CTX_SIZE + push {r0, r1} + + mov r1, r5 + add r0, sp, #8 + blx sm_pm_cpu_suspend_save + adr lr, aborted + /* Jump to arch specific suspend */ + pop {r0, pc} +aborted: + /* cpu not suspended */ + add sp, sp, #SM_PM_CTX_SIZE + /* Return -1 to the caller */ + mov r0, #(-1) +suspend_return: + pop {r4 - r12, pc} +UNWIND( .fnend) +END_FUNC sm_pm_cpu_suspend + +FUNC sm_pm_cpu_do_suspend, : +UNWIND( .fnstart) +UNWIND( .cantunwind) + push {r4 - r11} + read_midr r4 + ubfx r5, r4, #4, #12 + ldr r4, =CORTEX_A7_PART_NUM + cmp r5, r4 + beq a7_suspend + ldr r4, =CORTEX_A9_PART_NUM + cmp r5, r4 + beq a9_suspend + /* cpu not supported */ + b . + /* A9 needs PCR/DIAG */ +a9_suspend: + read_pcr r4 + read_diag r5 + stmia r0!, {r4 - r5} +a7_suspend: + read_fcseidr r4 + read_tpidruro r5 + stmia r0!, {r4 - r5} + read_dacr r4 +#ifdef CFG_WITH_LPAE +#error "Not supported" +#else + read_ttbr0 r5 + read_ttbr1 r6 + read_ttbcr r7 +#endif + read_sctlr r8 + read_actlr r9 + read_cpacr r10 + read_mvbar r11 + stmia r0!, {r4 - r11} + read_prrr r4 + read_nmrr r5 + read_vbar r6 + read_nsacr r7 + stmia r0, {r4 - r7} + pop {r4 - r11} + bx lr +UNWIND( .fnend) +END_FUNC sm_pm_cpu_do_suspend + +FUNC sm_pm_cpu_resume, : +UNWIND( .fnstart) +UNWIND( .cantunwind) + cpsid aif + + /* Call into the runtime address of get_core_pos */ + adr r0, _core_pos + ldr r1, [r0] + add r0, r0, r1 + blx r0 + + /* + * At this point, MMU is not enabled now. + * 1. Get the runtime physical address of _suspend_sp + * 2. Get the offset from _suspend_sp to &thread_core_local + * 3. Get the runtime physical address of thread_core_local + * Since moving towards non-linear mapping, + * `ldr r0, =thread_core_local` is not used here. + */ + adr r4, _suspend_sp + ldr r5, [r4] + add r4, r4, r5 + + mov_imm r1, THREAD_CORE_LOCAL_SIZE + mla r0, r0, r1, r4 + + ldr r0, [r0, #THREAD_CORE_LOCAL_SM_PM_CTX_PHYS] + /* Need to use r0!, because sm_pm_cpu_do_resume needs it */ + ldmia r0!, {sp, pc} +UNWIND( .fnend) +END_FUNC sm_pm_cpu_resume + +/* + * void sm_do_cpu_do_resume(paddr suspend_regs) __noreturn; + * Restore the registers stored when sm_pm_cpu_do_suspend + * r0 points to the physical base address of the suspend_regs + * field of struct sm_pm_ctx. + */ +FUNC sm_pm_cpu_do_resume, : +UNWIND( .fnstart) +UNWIND( .cantunwind) + read_midr r4 + ubfx r5, r4, #4, #12 + ldr r4, =CORTEX_A7_PART_NUM + cmp r5, r4 + beq a7_resume + + /* + * A9 needs PCR/DIAG + */ + ldmia r0!, {r4 - r5} + write_pcr r4 + write_diag r5 + +a7_resume: + /* v7 resume */ + mov ip, #0 + /* Invalidate icache to PoU */ + write_iciallu + /* set reserved context */ + write_contextidr ip + ldmia r0!, {r4 - r5} + write_fcseidr r4 + write_tpidruro r5 + ldmia r0!, {r4 - r11} + /* Invalidate entire TLB */ + write_tlbiall + write_dacr r4 +#ifdef CFG_WITH_LPAE +#error "Not supported -" +#else + write_ttbr0 r5 + write_ttbr1 r6 + write_ttbcr r7 +#endif + + ldmia r0, {r4 - r7} + write_prrr r4 + write_nmrr r5 + write_vbar r6 + write_nsacr r7 + + write_actlr r9 + write_cpacr r10 + write_mvbar r11 + write_bpiall + isb + dsb + /* MMU will be enabled here */ + write_sctlr r8 + isb + mov r0, #0 + b suspend_return +UNWIND( .fnend) +END_FUNC sm_pm_cpu_do_resume + +/* + * The following will be located in text section whose attribute is + * marked as readonly, but we only need to read here + * _suspend_sp stores the offset between thread_core_local to _suspend_sp. + * _core_pos stores the offset between get_core_pos to _core_pos. + */ +.align 2 +.extern thread_core_local +_suspend_sp: + .long thread_core_local - . +.extern get_core_pos +_core_pos: + .long get_core_pos - . diff --git a/core/arch/arm/sm/sm_private.h b/core/arch/arm/sm/sm_private.h index 0b41becea55..6578d473aa9 100644 --- a/core/arch/arm/sm/sm_private.h +++ b/core/arch/arm/sm/sm_private.h @@ -30,9 +30,5 @@ /* Returns true if returning to sec, false if returning to nsec */ bool sm_from_nsec(struct sm_ctx *ctx); - -void sm_save_modes_regs(struct sm_mode_regs *regs); -void sm_restore_modes_regs(struct sm_mode_regs *regs); - #endif /*SM_PRIVATE_H*/ diff --git a/core/arch/arm/sm/sub.mk b/core/arch/arm/sm/sub.mk index fef4b43ba0c..09c3840eb03 100644 --- a/core/arch/arm/sm/sub.mk +++ b/core/arch/arm/sm/sub.mk @@ -1,3 +1,3 @@ srcs-y += sm_a32.S srcs-y += sm.c -srcs-$(CFG_PSCI_ARM32) += std_smc.c psci.c psci-helper.S +srcs-$(CFG_PSCI_ARM32) += std_smc.c psci.c pm.c psci-helper.S pm_a32.S