Skip to content

Commit 4708fb0

Browse files
committed
ARM: vfp: Reimplement VFP exception entry in C code
En/disabling softirqs from asm code turned out to be trickier than expected, so vfp_support_entry now returns by tail calling __local_enable_bh_ip() and passing the same arguments that a C call to local_bh_enable() would pass. However, this is slightly hacky, as we don't want to carry our own implementation of local_bh_enable(). So let's bite the bullet, and get rid of the asm logic in vfp_support_entry that reasons about whether or not to save and/or reload the VFP state, and about whether or not an FP exception is pending, and only keep the VFP loading logic as a function that is callable from C. Replicate the removed logic in vfp_entry(), and use the exact same reasoning as in the asm code. To emphasize the correspondence, retain some of the asm comments in the C version as well. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Acked-by: Linus Walleij <linus.walleij@linaro.org>
1 parent 4a0548c commit 4708fb0

File tree

4 files changed

+124
-216
lines changed

4 files changed

+124
-216
lines changed

Diff for: arch/arm/vfp/entry.S

+6-6
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,10 @@
2222
@ IRQs enabled.
2323
@
2424
ENTRY(do_vfp)
25-
mov r1, r10
26-
str lr, [sp, #-8]!
27-
add r3, sp, #4
28-
str r9, [r3]
29-
bl vfp_entry
30-
ldr pc, [sp], #8
25+
mov r1, r0 @ pass trigger opcode via R1
26+
mov r0, sp @ pass struct pt_regs via R0
27+
bl vfp_support_entry @ dispatch the VFP exception
28+
cmp r0, #0 @ handled successfully?
29+
reteq r9 @ then use R9 as return address
30+
ret lr @ pass to undef handler
3131
ENDPROC(do_vfp)

Diff for: arch/arm/vfp/vfp.h

+1
Original file line numberDiff line numberDiff line change
@@ -375,3 +375,4 @@ struct op {
375375
};
376376

377377
asmlinkage void vfp_save_state(void *location, u32 fpexc);
378+
asmlinkage u32 vfp_load_state(const void *location);

Diff for: arch/arm/vfp/vfphw.S

+14-190
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,6 @@
44
*
55
* Copyright (C) 2004 ARM Limited.
66
* Written by Deep Blue Solutions Limited.
7-
*
8-
* This code is called from the kernel's undefined instruction trap.
9-
* r1 holds the thread_info pointer
10-
* r3 holds the return address for successful handling.
11-
* lr holds the return address for unrecognised instructions.
12-
* sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
137
*/
148
#include <linux/init.h>
159
#include <linux/linkage.h>
@@ -19,20 +13,6 @@
1913
#include <asm/assembler.h>
2014
#include <asm/asm-offsets.h>
2115

22-
.macro DBGSTR, str
23-
#ifdef DEBUG
24-
stmfd sp!, {r0-r3, ip, lr}
25-
ldr r0, =1f
26-
bl _printk
27-
ldmfd sp!, {r0-r3, ip, lr}
28-
29-
.pushsection .rodata, "a"
30-
1: .ascii KERN_DEBUG "VFP: \str\n"
31-
.byte 0
32-
.previous
33-
#endif
34-
.endm
35-
3616
.macro DBGSTR1, str, arg
3717
#ifdef DEBUG
3818
stmfd sp!, {r0-r3, ip, lr}
@@ -48,177 +28,25 @@
4828
#endif
4929
.endm
5030

51-
.macro DBGSTR3, str, arg1, arg2, arg3
52-
#ifdef DEBUG
53-
stmfd sp!, {r0-r3, ip, lr}
54-
mov r3, \arg3
55-
mov r2, \arg2
56-
mov r1, \arg1
57-
ldr r0, =1f
58-
bl _printk
59-
ldmfd sp!, {r0-r3, ip, lr}
60-
61-
.pushsection .rodata, "a"
62-
1: .ascii KERN_DEBUG "VFP: \str\n"
63-
.byte 0
64-
.previous
65-
#endif
66-
.endm
67-
68-
69-
@ VFP hardware support entry point.
70-
@
71-
@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
72-
@ r1 = thread_info pointer
73-
@ r2 = PC value to resume execution after successful emulation
74-
@ r3 = normal "successful" return address
75-
@ lr = unrecognised instruction return address
76-
@ IRQs enabled.
77-
ENTRY(vfp_support_entry)
78-
ldr r11, [r1, #TI_CPU] @ CPU number
79-
add r10, r1, #TI_VFPSTATE @ r10 = workspace
80-
81-
DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
82-
83-
.fpu vfpv2
84-
VFPFMRX r1, FPEXC @ Is the VFP enabled?
85-
DBGSTR1 "fpexc %08x", r1
86-
tst r1, #FPEXC_EN
87-
bne look_for_VFP_exceptions @ VFP is already enabled
88-
89-
DBGSTR1 "enable %x", r10
90-
ldr r9, vfp_current_hw_state_address
91-
orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set
92-
ldr r4, [r9, r11, lsl #2] @ vfp_current_hw_state pointer
93-
bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
94-
cmp r4, r10 @ this thread owns the hw context?
95-
#ifndef CONFIG_SMP
96-
@ For UP, checking that this thread owns the hw context is
97-
@ sufficient to determine that the hardware state is valid.
98-
beq vfp_hw_state_valid
99-
100-
@ On UP, we lazily save the VFP context. As a different
101-
@ thread wants ownership of the VFP hardware, save the old
102-
@ state if there was a previous (valid) owner.
103-
104-
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
105-
@ exceptions, so we can get at the
106-
@ rest of it
107-
108-
DBGSTR1 "save old state %p", r4
109-
cmp r4, #0 @ if the vfp_current_hw_state is NULL
110-
beq vfp_reload_hw @ then the hw state needs reloading
111-
VFPFSTMIA r4, r5 @ save the working registers
112-
VFPFMRX r5, FPSCR @ current status
113-
tst r1, #FPEXC_EX @ is there additional state to save?
114-
beq 1f
115-
VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set)
116-
tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
117-
beq 1f
118-
VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present)
119-
1:
120-
stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2
121-
vfp_reload_hw:
122-
123-
#else
124-
@ For SMP, if this thread does not own the hw context, then we
125-
@ need to reload it. No need to save the old state as on SMP,
126-
@ we always save the state when we switch away from a thread.
127-
bne vfp_reload_hw
128-
129-
@ This thread has ownership of the current hardware context.
130-
@ However, it may have been migrated to another CPU, in which
131-
@ case the saved state is newer than the hardware context.
132-
@ Check this by looking at the CPU number which the state was
133-
@ last loaded onto.
134-
ldr ip, [r10, #VFP_CPU]
135-
teq ip, r11
136-
beq vfp_hw_state_valid
137-
138-
vfp_reload_hw:
139-
@ We're loading this threads state into the VFP hardware. Update
140-
@ the CPU number which contains the most up to date VFP context.
141-
str r11, [r10, #VFP_CPU]
142-
143-
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
144-
@ exceptions, so we can get at the
145-
@ rest of it
146-
#endif
147-
148-
DBGSTR1 "load state %p", r10
149-
str r10, [r9, r11, lsl #2] @ update the vfp_current_hw_state pointer
31+
ENTRY(vfp_load_state)
32+
@ Load the current VFP state
33+
@ r0 - load location
34+
@ returns FPEXC
35+
DBGSTR1 "load VFP state %p", r0
15036
@ Load the saved state back into the VFP
151-
VFPFLDMIA r10, r5 @ reload the working registers while
37+
VFPFLDMIA r0, r1 @ reload the working registers while
15238
@ FPEXC is in a safe state
153-
ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2
154-
tst r1, #FPEXC_EX @ is there additional state to restore?
39+
ldmia r0, {r0-r3} @ load FPEXC, FPSCR, FPINST, FPINST2
40+
tst r0, #FPEXC_EX @ is there additional state to restore?
15541
beq 1f
156-
VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set)
157-
tst r1, #FPEXC_FP2V @ is there an FPINST2 to write?
42+
VFPFMXR FPINST, r2 @ restore FPINST (only if FPEXC.EX is set)
43+
tst r0, #FPEXC_FP2V @ is there an FPINST2 to write?
15844
beq 1f
159-
VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present)
45+
VFPFMXR FPINST2, r3 @ FPINST2 if needed (and present)
16046
1:
161-
VFPFMXR FPSCR, r5 @ restore status
162-
163-
@ The context stored in the VFP hardware is up to date with this thread
164-
vfp_hw_state_valid:
165-
tst r1, #FPEXC_EX
166-
bne process_exception @ might as well handle the pending
167-
@ exception before retrying branch
168-
@ out before setting an FPEXC that
169-
@ stops us reading stuff
170-
VFPFMXR FPEXC, r1 @ Restore FPEXC last
171-
mov sp, r3 @ we think we have handled things
172-
pop {lr}
173-
sub r2, r2, #4 @ Retry current instruction - if Thumb
174-
str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
175-
@ else it's one 32-bit instruction, so
176-
@ always subtract 4 from the following
177-
@ instruction address.
178-
179-
local_bh_enable_and_ret:
180-
adr r0, .
181-
mov r1, #SOFTIRQ_DISABLE_OFFSET
182-
b __local_bh_enable_ip @ tail call
183-
184-
look_for_VFP_exceptions:
185-
@ Check for synchronous or asynchronous exception
186-
tst r1, #FPEXC_EX | FPEXC_DEX
187-
bne process_exception
188-
@ On some implementations of the VFP subarch 1, setting FPSCR.IXE
189-
@ causes all the CDP instructions to be bounced synchronously without
190-
@ setting the FPEXC.EX bit
191-
VFPFMRX r5, FPSCR
192-
tst r5, #FPSCR_IXE
193-
bne process_exception
194-
195-
tst r5, #FPSCR_LENGTH_MASK
196-
beq skip
197-
orr r1, r1, #FPEXC_DEX
198-
b process_exception
199-
skip:
200-
201-
@ Fall into hand on to next handler - appropriate coproc instr
202-
@ not recognised by VFP
203-
204-
DBGSTR "not VFP"
205-
b local_bh_enable_and_ret
206-
207-
process_exception:
208-
DBGSTR "bounce"
209-
mov sp, r3 @ setup for a return to the user code.
210-
pop {lr}
211-
mov r2, sp @ nothing stacked - regdump is at TOS
212-
213-
@ Now call the C code to package up the bounce to the support code
214-
@ r0 holds the trigger instruction
215-
@ r1 holds the FPEXC value
216-
@ r2 pointer to register dump
217-
b VFP_bounce @ we have handled this - the support
218-
@ code will raise an exception if
219-
@ required. If not, the user code will
220-
@ retry the faulted instruction
221-
ENDPROC(vfp_support_entry)
47+
VFPFMXR FPSCR, r1 @ restore status
48+
ret lr
49+
ENDPROC(vfp_load_state)
22250

22351
ENTRY(vfp_save_state)
22452
@ Save the current VFP state
@@ -238,10 +66,6 @@ ENTRY(vfp_save_state)
23866
ret lr
23967
ENDPROC(vfp_save_state)
24068

241-
.align
242-
vfp_current_hw_state_address:
243-
.word vfp_current_hw_state
244-
24569
.macro tbl_branch, base, tmp, shift
24670
#ifdef CONFIG_THUMB2_KERNEL
24771
adr \tmp, 1f

0 commit comments

Comments
 (0)