diff --git a/arch/arm/core/swap_helper.S b/arch/arm/core/swap_helper.S index be8b79534a98..2762f26fc726 100644 --- a/arch/arm/core/swap_helper.S +++ b/arch/arm/core/swap_helper.S @@ -41,6 +41,8 @@ GDATA(_kernel) * already been taken. In other words, when z_arm_pendsv() runs, we *know* we * have to swap *something*. * + * For Cortex-M, z_arm_pendsv() is invoked with no arguments. + * * For Cortex-R, PendSV exception is not supported by the architecture and this * function is directly called either by _IntExit in case of preemption, or * z_arm_svc in case of cooperative switching. @@ -163,7 +165,9 @@ out_fp_endif: str v3, [v4, #0] #endif - /* Restore previous interrupt disable state (irq_lock key) */ + /* Restore previous interrupt disable state (irq_lock key) + * (We clear the arch.basepri field after restoring state) + */ #if (defined(CONFIG_CPU_CORTEX_M0PLUS) || defined(CONFIG_CPU_CORTEX_M0)) && \ _thread_offset_to_basepri > 124 /* Doing it this way since the offset to thread->arch.basepri can in @@ -249,8 +253,7 @@ in_fp_endif: #if defined (CONFIG_ARM_MPU) /* Re-program dynamic memory map */ push {r2,lr} - ldr r0, =_kernel - ldr r0, [r0, #_kernel_offset_to_current] + mov r0, r2 /* _current thread */ bl z_arm_configure_dynamic_mpu_regions pop {r2,lr} #endif @@ -411,14 +414,22 @@ _oops: * @return N/A */ SECTION_FUNC(TEXT, z_arm_svc) + /* Use EXC_RETURN state to find out if stack frame is on the + * MSP or PSP + */ tst lr, #0x4 /* did we come from thread mode ? */ ite eq /* if zero (equal), came from handler mode */ mrseq r0, MSP /* handler mode, stack frame is on MSP */ mrsne r0, PSP /* thread mode, stack frame is on PSP */ + + /* Figure out what SVC call number was invoked */ + ldr r1, [r0, #24] /* grab address of PC from stack frame */ - /* SVC is a two-byte instruction, point to it and read encoding */ - ldrh r1, [r1, #-2] + /* SVC is a two-byte instruction, point to it and read the + * SVC number (lower byte of SCV instruction) + */ + ldrb r1, [r1, #-2] /* * grab service call number: @@ -426,10 +437,7 @@ SECTION_FUNC(TEXT, z_arm_svc) * 1: irq_offload (if configured) * 2: kernel panic or oops (software generated fatal exception) * 3: System call (if user mode supported) - * Planned implementation of system calls for memory protection will - * expand this case. */ - ands r1, #0xff #if defined(CONFIG_USERSPACE) mrs r2, CONTROL @@ -443,7 +451,7 @@ SECTION_FUNC(TEXT, z_arm_svc) tst r2, #0x1 bne _oops -#endif +#endif /* CONFIG_USERSPACE */ cmp r1, #2 beq _oops @@ -495,13 +503,12 @@ _do_syscall: blt valid_syscall_id /* bad syscall id. Set arg1 to bad id and set call_id to SYSCALL_BAD */ - str r6, [r0, #0] + str r6, [r0] ldr r6, =K_SYSCALL_BAD /* Bad syscalls treated as valid syscalls with ID K_SYSCALL_BAD. */ valid_syscall_id: - push {r0, r1} ldr r0, =_kernel ldr r0, [r0, #_kernel_offset_to_current] ldr r1, [r0, #_thread_offset_to_mode] @@ -518,7 +525,6 @@ valid_syscall_id: * instructions with the previous privilege. */ isb - pop {r0, r1} /* return from SVC to the modified LR - z_arm_do_syscall */ bx lr diff --git a/arch/arm/core/userspace.S b/arch/arm/core/userspace.S index 4a6c9770730d..518404ea8328 100644 --- a/arch/arm/core/userspace.S +++ b/arch/arm/core/userspace.S @@ -33,6 +33,9 @@ GDATA(_k_syscall_table) * The conversion is one way, and threads which transition to user mode do * not transition back later, unless they are doing system calls. * + * The function is invoked as: + * z_arm_userspace_enter(user_entry, p1, p2, p3, + * stack_info.start, stack_info.size); */ SECTION_FUNC(TEXT,z_arm_userspace_enter) /* move user_entry to lr */ @@ -44,14 +47,20 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter) msr PSPLIM, r0 #endif - /* set stack to privileged stack */ + /* prepare to set stack to privileged stack */ ldr r0, =_kernel ldr r0, [r0, #_kernel_offset_to_current] ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */ ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE add r0, r0, ip + /* store current stack pointer to ip + * the current stack pointer is needed to retrieve + * stack_info.start and stack_info.size + */ mov ip, sp + + /* set stack to privileged stack */ msr PSP, r0 #if defined(CONFIG_BUILTIN_STACK_GUARD) @@ -62,7 +71,10 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter) msr PSPLIM, r0 #endif -#if defined (CONFIG_ARM_MPU) + /* push args to stack */ + push {r1,r2,r3,lr} + push {r0,ip} + /* Re-program dynamic memory map. * * Important note: @@ -77,20 +89,17 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter) * stack, since we do not control how much stack is actually left, when * user invokes z_arm_userspace_enter(). */ - push {r0,r1,r2,r3,ip,lr} ldr r0, =_kernel ldr r0, [r0, #_kernel_offset_to_current] bl z_arm_configure_dynamic_mpu_regions - pop {r0,r1,r2,r3,ip,lr} -#endif + + pop {r0,ip} /* load up stack info from user stack */ ldr r0, [ip] ldr ip, [ip, #4] - - /* push args to stack */ - push {r0,r1,r2,r3,ip,lr} + push {r0,ip} /* clear the user stack area to clean out privileged data */ /* from right past the guard right up to the end */ @@ -102,18 +111,18 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter) #endif bl memset - pop {r0,r1,r2,r3,ip,lr} + pop {r0,ip} /* r0 contains user stack start, ip contains user stack size */ add r0, r0, ip /* calculate top of stack */ #if defined(CONFIG_BUILTIN_STACK_GUARD) /* clear stack limit (stack protection not required in user mode) */ - push {r3} mov r3, #0 msr PSPLIM, r3 - pop {r3} #endif + /* pop remaining arguments from stack before switching stacks */ + pop {r1,r2,r3,lr} /* set stack to user stack */ msr PSP, r0