Skip to content

Commit

Permalink
KVM: SVM: Add NMI support for an SEV-ES guest
Browse files Browse the repository at this point in the history
The GHCB specification defines how NMIs are to be handled for an SEV-ES
guest. To detect the completion of an NMI the hypervisor must not
intercept the IRET instruction (because a #VC while running the NMI will
issue an IRET) and, instead, must receive an NMI Complete exit event from
the guest.

Update the KVM support for detecting the completion of NMIs in the guest
to follow the GHCB specification. When an SEV-ES guest is active, the
IRET instruction will no longer be intercepted. Now, when the NMI Complete
exit event is received, the iret_interception() function will be called
to simulate the completion of the NMI.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Message-Id: <5ea3dd69b8d4396cefdc9048ebc1ab7caa70a847.1607620209.git.thomas.lendacky@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
tlendacky authored and bonzini committed Dec 15, 2020
1 parent ed02b21 commit 4444dfe
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 7 deletions.
4 changes: 4 additions & 0 deletions arch/x86/kvm/svm/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -1449,6 +1449,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
if (!ghcb_sw_scratch_is_valid(ghcb))
goto vmgexit_err;
break;
case SVM_VMGEXIT_NMI_COMPLETE:
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
break;
default:
Expand Down Expand Up @@ -1770,6 +1771,9 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
control->exit_info_2,
svm->ghcb_sa);
break;
case SVM_VMGEXIT_NMI_COMPLETE:
ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
break;
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
vcpu_unimpl(&svm->vcpu,
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
Expand Down
20 changes: 13 additions & 7 deletions arch/x86/kvm/svm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -2319,9 +2319,11 @@ static int cpuid_interception(struct vcpu_svm *svm)
static int iret_interception(struct vcpu_svm *svm)
{
++svm->vcpu.stat.nmi_window_exits;
svm_clr_intercept(svm, INTERCEPT_IRET);
svm->vcpu.arch.hflags |= HF_IRET_MASK;
svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
if (!sev_es_guest(svm->vcpu.kvm)) {
svm_clr_intercept(svm, INTERCEPT_IRET);
svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
}
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
return 1;
}
Expand Down Expand Up @@ -3302,7 +3304,8 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)

svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
vcpu->arch.hflags |= HF_NMI_MASK;
svm_set_intercept(svm, INTERCEPT_IRET);
if (!sev_es_guest(svm->vcpu.kvm))
svm_set_intercept(svm, INTERCEPT_IRET);
++vcpu->stat.nmi_injections;
}

Expand Down Expand Up @@ -3386,10 +3389,12 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)

if (masked) {
svm->vcpu.arch.hflags |= HF_NMI_MASK;
svm_set_intercept(svm, INTERCEPT_IRET);
if (!sev_es_guest(svm->vcpu.kvm))
svm_set_intercept(svm, INTERCEPT_IRET);
} else {
svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
svm_clr_intercept(svm, INTERCEPT_IRET);
if (!sev_es_guest(svm->vcpu.kvm))
svm_clr_intercept(svm, INTERCEPT_IRET);
}
}

Expand Down Expand Up @@ -3567,8 +3572,9 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
* If we've made progress since setting HF_IRET_MASK, we've
* executed an IRET and can allow NMI injection.
*/
if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
&& kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
if ((svm->vcpu.arch.hflags & HF_IRET_MASK) &&
(sev_es_guest(svm->vcpu.kvm) ||
kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip)) {
svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
}
Expand Down

0 comments on commit 4444dfe

Please sign in to comment.