@@ -54,50 +54,18 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
5454 if (!system_supports_fpsimd ())
5555 return ;
5656
57- fpsimd_kvm_prepare ();
58-
5957 /*
60- * We will check TIF_FOREIGN_FPSTATE just before entering the
61- * guest in kvm_arch_vcpu_ctxflush_fp() and override this to
62- * FP_STATE_FREE if the flag set.
58+ * Ensure that any host FPSIMD/SVE/SME state is saved and unbound such
59+ * that the host kernel is responsible for restoring this state upon
60+ * return to userspace, and the hyp code doesn't need to save anything.
61+ *
62+ * When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures
63+ * that PSTATE.{SM,ZA} == {0,0}.
6364 */
64- * host_data_ptr (fp_owner ) = FP_STATE_HOST_OWNED ;
65- * host_data_ptr (fpsimd_state ) = kern_hyp_va (& current -> thread .uw .fpsimd_state );
66- * host_data_ptr (fpmr_ptr ) = kern_hyp_va (& current -> thread .uw .fpmr );
67-
68- host_data_clear_flag (HOST_SVE_ENABLED );
69- if (read_sysreg (cpacr_el1 ) & CPACR_EL1_ZEN_EL0EN )
70- host_data_set_flag (HOST_SVE_ENABLED );
71-
72- if (system_supports_sme ()) {
73- host_data_clear_flag (HOST_SME_ENABLED );
74- if (read_sysreg (cpacr_el1 ) & CPACR_EL1_SMEN_EL0EN )
75- host_data_set_flag (HOST_SME_ENABLED );
76-
77- /*
78- * If PSTATE.SM is enabled then save any pending FP
79- * state and disable PSTATE.SM. If we leave PSTATE.SM
80- * enabled and the guest does not enable SME via
81- * CPACR_EL1.SMEN then operations that should be valid
82- * may generate SME traps from EL1 to EL1 which we
83- * can't intercept and which would confuse the guest.
84- *
85- * Do the same for PSTATE.ZA in the case where there
86- * is state in the registers which has not already
87- * been saved, this is very unlikely to happen.
88- */
89- if (read_sysreg_s (SYS_SVCR ) & (SVCR_SM_MASK | SVCR_ZA_MASK )) {
90- * host_data_ptr (fp_owner ) = FP_STATE_FREE ;
91- fpsimd_save_and_flush_cpu_state ();
92- }
93- }
65+ fpsimd_save_and_flush_cpu_state ();
66+ * host_data_ptr (fp_owner ) = FP_STATE_FREE ;
9467
95- /*
96- * If normal guests gain SME support, maintain this behavior for pKVM
97- * guests, which don't support SME.
98- */
99- WARN_ON (is_protected_kvm_enabled () && system_supports_sme () &&
100- read_sysreg_s (SYS_SVCR ));
68+ WARN_ON_ONCE (system_supports_sme () && read_sysreg_s (SYS_SVCR ));
10169}
10270
10371/*
@@ -162,52 +130,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
162130
163131 local_irq_save (flags );
164132
165- /*
166- * If we have VHE then the Hyp code will reset CPACR_EL1 to
167- * the default value and we need to reenable SME.
168- */
169- if (has_vhe () && system_supports_sme ()) {
170- /* Also restore EL0 state seen on entry */
171- if (host_data_test_flag (HOST_SME_ENABLED ))
172- sysreg_clear_set (CPACR_EL1 , 0 , CPACR_EL1_SMEN );
173- else
174- sysreg_clear_set (CPACR_EL1 ,
175- CPACR_EL1_SMEN_EL0EN ,
176- CPACR_EL1_SMEN_EL1EN );
177- isb ();
178- }
179-
180133 if (guest_owns_fp_regs ()) {
181- if (vcpu_has_sve (vcpu )) {
182- u64 zcr = read_sysreg_el1 (SYS_ZCR );
183-
184- /*
185- * If the vCPU is in the hyp context then ZCR_EL1 is
186- * loaded with its vEL2 counterpart.
187- */
188- __vcpu_sys_reg (vcpu , vcpu_sve_zcr_elx (vcpu )) = zcr ;
189-
190- /*
191- * Restore the VL that was saved when bound to the CPU,
192- * which is the maximum VL for the guest. Because the
193- * layout of the data when saving the sve state depends
194- * on the VL, we need to use a consistent (i.e., the
195- * maximum) VL.
196- * Note that this means that at guest exit ZCR_EL1 is
197- * not necessarily the same as on guest entry.
198- *
199- * ZCR_EL2 holds the guest hypervisor's VL when running
200- * a nested guest, which could be smaller than the
201- * max for the vCPU. Similar to above, we first need to
202- * switch to a VL consistent with the layout of the
203- * vCPU's SVE state. KVM support for NV implies VHE, so
204- * using the ZCR_EL1 alias is safe.
205- */
206- if (!has_vhe () || (vcpu_has_nv (vcpu ) && !is_hyp_ctxt (vcpu )))
207- sve_cond_update_zcr_vq (vcpu_sve_max_vq (vcpu ) - 1 ,
208- SYS_ZCR_EL1 );
209- }
210-
211134 /*
212135 * Flush (save and invalidate) the fpsimd/sve state so that if
213136 * the host tries to use fpsimd/sve, it's not using stale data
@@ -219,18 +142,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
219142 * when needed.
220143 */
221144 fpsimd_save_and_flush_cpu_state ();
222- } else if (has_vhe () && system_supports_sve ()) {
223- /*
224- * The FPSIMD/SVE state in the CPU has not been touched, and we
225- * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
226- * reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
227- * for EL0. To avoid spurious traps, restore the trap state
228- * seen by kvm_arch_vcpu_load_fp():
229- */
230- if (host_data_test_flag (HOST_SVE_ENABLED ))
231- sysreg_clear_set (CPACR_EL1 , 0 , CPACR_EL1_ZEN_EL0EN );
232- else
233- sysreg_clear_set (CPACR_EL1 , CPACR_EL1_ZEN_EL0EN , 0 );
234145 }
235146
236147 local_irq_restore (flags );
0 commit comments