Skip to content

Commit

Permalink
KVM: x86: Move HF_NMI_MASK and HF_IRET_MASK into "struct vcpu_svm"
Browse files Browse the repository at this point in the history
Move HF_NMI_MASK and HF_IRET_MASK (a.k.a. "waiting for IRET") out of the
common "hflags" and into dedicated flags in "struct vcpu_svm".  The flags
are used only for the SVM and thus should not be in hflags.

Tracking NMI masking in software isn't SVM specific, e.g. VMX has a
similar flag (soft_vnmi_blocked), but that's much more of a hack as VMX
can't intercept IRET, is useful only for ancient CPUs, i.e. will
hopefully be removed at some point, and again the exact behavior is
vendor specific and shouldn't ever be referenced in common code.
converting VMX

No functional change is intended.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Tested-by: Santosh Shukla <Santosh.Shukla@amd.com>
Link: https://lore.kernel.org/r/20221129193717.513824-5-mlevitsk@redhat.com
[sean: split from HF_GIF_MASK patch]
Signed-off-by: Sean Christopherson <seanjc@google.com>
  • Loading branch information
Maxim Levitsky authored and sean-jc committed Jan 31, 2023
1 parent c760e86 commit 916b54a
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 11 deletions.
2 changes: 0 additions & 2 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -2074,8 +2074,6 @@ enum {
TASK_SWITCH_GATE = 3,
};

#define HF_NMI_MASK (1 << 3)
#define HF_IRET_MASK (1 << 4)
#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */

#ifdef CONFIG_KVM_SMM
Expand Down
22 changes: 13 additions & 9 deletions arch/x86/kvm/svm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1338,6 +1338,9 @@ static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
vcpu->arch.microcode_version = 0x01000065;
svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio;

svm->nmi_masked = false;
svm->awaiting_iret_completion = false;

if (sev_es_guest(vcpu->kvm))
sev_es_vcpu_reset(svm);
}
Expand Down Expand Up @@ -2482,7 +2485,7 @@ static int iret_interception(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);

++vcpu->stat.nmi_window_exits;
vcpu->arch.hflags |= HF_IRET_MASK;
svm->awaiting_iret_completion = true;
if (!sev_es_guest(vcpu->kvm)) {
svm_clr_intercept(svm, INTERCEPT_IRET);
svm->nmi_iret_rip = kvm_rip_read(vcpu);
Expand Down Expand Up @@ -3478,7 +3481,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
if (svm->nmi_l1_to_l2)
return;

vcpu->arch.hflags |= HF_NMI_MASK;
svm->nmi_masked = true;
if (!sev_es_guest(vcpu->kvm))
svm_set_intercept(svm, INTERCEPT_IRET);
++vcpu->stat.nmi_injections;
Expand Down Expand Up @@ -3591,7 +3594,7 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
return false;

return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
(vcpu->arch.hflags & HF_NMI_MASK);
svm->nmi_masked;
}

static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
Expand All @@ -3611,19 +3614,19 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)

static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
{
return !!(vcpu->arch.hflags & HF_NMI_MASK);
return to_svm(vcpu)->nmi_masked;
}

static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
struct vcpu_svm *svm = to_svm(vcpu);

if (masked) {
vcpu->arch.hflags |= HF_NMI_MASK;
svm->nmi_masked = true;
if (!sev_es_guest(vcpu->kvm))
svm_set_intercept(svm, INTERCEPT_IRET);
} else {
vcpu->arch.hflags &= ~HF_NMI_MASK;
svm->nmi_masked = false;
if (!sev_es_guest(vcpu->kvm))
svm_clr_intercept(svm, INTERCEPT_IRET);
}
Expand Down Expand Up @@ -3709,7 +3712,7 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);

if ((vcpu->arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK)
if (svm->nmi_masked && !svm->awaiting_iret_completion)
return; /* IRET will cause a vm exit */

if (!gif_set(svm)) {
Expand Down Expand Up @@ -3833,10 +3836,11 @@ static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
* If we've made progress since setting HF_IRET_MASK, we've
* executed an IRET and can allow NMI injection.
*/
if ((vcpu->arch.hflags & HF_IRET_MASK) &&
if (svm->awaiting_iret_completion &&
(sev_es_guest(vcpu->kvm) ||
kvm_rip_read(vcpu) != svm->nmi_iret_rip)) {
vcpu->arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
svm->awaiting_iret_completion = false;
svm->nmi_masked = false;
kvm_make_request(KVM_REQ_EVENT, vcpu);
}

Expand Down
18 changes: 18 additions & 0 deletions arch/x86/kvm/svm/svm.h
Original file line number Diff line number Diff line change
Expand Up @@ -230,8 +230,26 @@ struct vcpu_svm {

struct svm_nested_state nested;

/* NMI mask value, used when vNMI is not enabled */
bool nmi_masked;

/*
* True when NMIs are still masked but guest IRET was just intercepted
* and KVM is waiting for RIP to change, which will signal that the
* intercepted IRET was retired and thus NMI can be unmasked.
*/
bool awaiting_iret_completion;

/*
* Set when KVM is awaiting IRET completion and needs to inject NMIs as
* soon as the IRET completes (e.g. NMI is pending injection). KVM
* temporarily steals RFLAGS.TF to single-step the guest in this case
* in order to regain control as soon as the NMI-blocking condition
* goes away.
*/
bool nmi_singlestep;
u64 nmi_singlestep_guest_rflags;

bool nmi_l1_to_l2;

unsigned long soft_int_csbase;
Expand Down

0 comments on commit 916b54a

Please sign in to comment.