@@ -317,6 +317,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
317317 */
318318 mtspr (SPRN_HDEC , hdec );
319319
320+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
321+ tm_return_to_guest :
322+ #endif
320323 mtspr (SPRN_DAR , vcpu -> arch .shregs .dar );
321324 mtspr (SPRN_DSISR , vcpu -> arch .shregs .dsisr );
322325 mtspr (SPRN_SRR0 , vcpu -> arch .shregs .srr0 );
@@ -415,11 +418,23 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
415418 * is in real suspend mode and is trying to transition to
416419 * transactional mode.
417420 */
418- if (local_paca -> kvm_hstate .fake_suspend &&
421+ if (! local_paca -> kvm_hstate .fake_suspend &&
419422 (vcpu -> arch .shregs .msr & MSR_TS_S )) {
420423 if (kvmhv_p9_tm_emulation_early (vcpu )) {
421- /* Prevent it being handled again. */
422- trap = 0 ;
424+ /*
425+ * Go straight back into the guest with the
426+ * new NIP/MSR as set by TM emulation.
427+ */
428+ mtspr (SPRN_HSRR0 , vcpu -> arch .regs .nip );
429+ mtspr (SPRN_HSRR1 , vcpu -> arch .shregs .msr );
430+
431+ /*
432+ * tm_return_to_guest re-loads SRR0/1, DAR,
433+ * DSISR after RI is cleared, in case they had
434+ * been clobbered by a MCE.
435+ */
436+ __mtmsrd (0 , 1 ); /* clear RI */
437+ goto tm_return_to_guest ;
423438 }
424439 }
425440#endif
@@ -499,6 +514,10 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
499514 * If we are in real mode, only switch MMU on after the MMU is
500515 * switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
501516 */
517+ if (IS_ENABLED (CONFIG_PPC_TRANSACTIONAL_MEM ) &&
518+ vcpu -> arch .shregs .msr & MSR_TS_MASK )
519+ msr |= MSR_TS_S ;
520+
502521 __mtmsrd (msr , 0 );
503522
504523 end_timing (vcpu );
0 commit comments