Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Arch arm assembly simplifications #19794

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 18 additions & 12 deletions arch/arm/core/swap_helper.S
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ GDATA(_kernel)
* already been taken. In other words, when z_arm_pendsv() runs, we *know* we
* have to swap *something*.
*
* For Cortex-M, z_arm_pendsv() is invoked with no arguments.
*
* For Cortex-R, PendSV exception is not supported by the architecture and this
* function is directly called either by _IntExit in case of preemption, or
* z_arm_svc in case of cooperative switching.
Expand Down Expand Up @@ -163,7 +165,9 @@ out_fp_endif:
str v3, [v4, #0]
#endif

/* Restore previous interrupt disable state (irq_lock key) */
/* Restore previous interrupt disable state (irq_lock key)
* (We clear the arch.basepri field after restoring state)
*/
#if (defined(CONFIG_CPU_CORTEX_M0PLUS) || defined(CONFIG_CPU_CORTEX_M0)) && \
_thread_offset_to_basepri > 124
/* Doing it this way since the offset to thread->arch.basepri can in
Expand Down Expand Up @@ -249,8 +253,7 @@ in_fp_endif:
#if defined (CONFIG_ARM_MPU)
/* Re-program dynamic memory map */
push {r2,lr}
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
mov r0, r2 /* _current thread */
bl z_arm_configure_dynamic_mpu_regions
pop {r2,lr}
#endif
Expand Down Expand Up @@ -411,25 +414,30 @@ _oops:
* @return N/A
*/
SECTION_FUNC(TEXT, z_arm_svc)
/* Use EXC_RETURN state to find out if stack frame is on the
* MSP or PSP
*/
tst lr, #0x4 /* did we come from thread mode ? */
ite eq /* if zero (equal), came from handler mode */
mrseq r0, MSP /* handler mode, stack frame is on MSP */
mrsne r0, PSP /* thread mode, stack frame is on PSP */


/* Figure out what SVC call number was invoked */

ldr r1, [r0, #24] /* grab address of PC from stack frame */
/* SVC is a two-byte instruction, point to it and read encoding */
ldrh r1, [r1, #-2]
/* SVC is a two-byte instruction, point to it and read the
* SVC number (lower byte of SCV instruction)
*/
ldrb r1, [r1, #-2]

/*
* grab service call number:
* 0: Unused
* 1: irq_offload (if configured)
* 2: kernel panic or oops (software generated fatal exception)
* 3: System call (if user mode supported)
* Planned implementation of system calls for memory protection will
* expand this case.
*/
ands r1, #0xff
#if defined(CONFIG_USERSPACE)
mrs r2, CONTROL

Expand All @@ -443,7 +451,7 @@ SECTION_FUNC(TEXT, z_arm_svc)
tst r2, #0x1
bne _oops

#endif
#endif /* CONFIG_USERSPACE */

cmp r1, #2
beq _oops
Expand Down Expand Up @@ -495,13 +503,12 @@ _do_syscall:
blt valid_syscall_id

/* bad syscall id. Set arg1 to bad id and set call_id to SYSCALL_BAD */
str r6, [r0, #0]
str r6, [r0]
ldr r6, =K_SYSCALL_BAD

/* Bad syscalls treated as valid syscalls with ID K_SYSCALL_BAD. */

valid_syscall_id:
push {r0, r1}
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r1, [r0, #_thread_offset_to_mode]
Expand All @@ -518,7 +525,6 @@ valid_syscall_id:
* instructions with the previous privilege.
*/
isb
pop {r0, r1}

/* return from SVC to the modified LR - z_arm_do_syscall */
bx lr
Expand Down
31 changes: 20 additions & 11 deletions arch/arm/core/userspace.S
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ GDATA(_k_syscall_table)
* The conversion is one way, and threads which transition to user mode do
* not transition back later, unless they are doing system calls.
*
* The function is invoked as:
* z_arm_userspace_enter(user_entry, p1, p2, p3,
* stack_info.start, stack_info.size);
*/
SECTION_FUNC(TEXT,z_arm_userspace_enter)
/* move user_entry to lr */
Expand All @@ -44,14 +47,20 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
msr PSPLIM, r0
#endif

/* set stack to privileged stack */
/* prepare to set stack to privileged stack */
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
add r0, r0, ip

/* store current stack pointer to ip
* the current stack pointer is needed to retrieve
* stack_info.start and stack_info.size
*/
mov ip, sp

/* set stack to privileged stack */
msr PSP, r0

#if defined(CONFIG_BUILTIN_STACK_GUARD)
Expand All @@ -62,7 +71,10 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
msr PSPLIM, r0
#endif

#if defined (CONFIG_ARM_MPU)
/* push args to stack */
push {r1,r2,r3,lr}
push {r0,ip}

/* Re-program dynamic memory map.
*
* Important note:
Expand All @@ -77,20 +89,17 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
* stack, since we do not control how much stack is actually left, when
* user invokes z_arm_userspace_enter().
*/
push {r0,r1,r2,r3,ip,lr}
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
bl z_arm_configure_dynamic_mpu_regions
pop {r0,r1,r2,r3,ip,lr}
#endif

pop {r0,ip}

/* load up stack info from user stack */
ldr r0, [ip]
ldr ip, [ip, #4]


/* push args to stack */
push {r0,r1,r2,r3,ip,lr}
push {r0,ip}

/* clear the user stack area to clean out privileged data */
/* from right past the guard right up to the end */
Expand All @@ -102,18 +111,18 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
#endif
bl memset

pop {r0,r1,r2,r3,ip,lr}
pop {r0,ip}

/* r0 contains user stack start, ip contains user stack size */
add r0, r0, ip /* calculate top of stack */

#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* clear stack limit (stack protection not required in user mode) */
push {r3}
mov r3, #0
msr PSPLIM, r3
pop {r3}
#endif
/* pop remaining arguments from stack before switching stacks */
pop {r1,r2,r3,lr}

/* set stack to user stack */
msr PSP, r0
Expand Down