Skip to content

Commit 9aba641

Browse files
committed
KVM: arm64: nv: Respect exception routing rules for SEAs
Synchronous external aborts are taken to EL2 if ELIsInHost() or HCR_EL2.TEA=1. Rework the SEA injection plumbing to respect the imposed routing of the guest hypervisor and opportunistically rephrase things to make their function a bit more obvious. Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20250708172532.1699409-6-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
1 parent aae35f4 commit 9aba641

File tree

6 files changed

+48
-47
lines changed

6 files changed

+48
-47
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,15 +46,25 @@ void kvm_skip_instr32(struct kvm_vcpu *vcpu);
4646

4747
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
4848
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
49-
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
50-
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
49+
int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
5150
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
5251

52+
static inline int kvm_inject_sea_dabt(struct kvm_vcpu *vcpu, u64 addr)
53+
{
54+
return kvm_inject_sea(vcpu, false, addr);
55+
}
56+
57+
static inline int kvm_inject_sea_iabt(struct kvm_vcpu *vcpu, u64 addr)
58+
{
59+
return kvm_inject_sea(vcpu, true, addr);
60+
}
61+
5362
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
5463

5564
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
5665
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
5766
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
67+
int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
5868

5969
static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
6070
{

arch/arm64/kvm/emulate-nested.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2811,3 +2811,13 @@ int kvm_inject_nested_irq(struct kvm_vcpu *vcpu)
28112811
/* esr_el2 value doesn't matter for exits due to irqs. */
28122812
return kvm_inject_nested(vcpu, 0, except_type_irq);
28132813
}
2814+
2815+
int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
2816+
{
2817+
u64 esr = FIELD_PREP(ESR_ELx_EC_MASK,
2818+
iabt ? ESR_ELx_EC_IABT_LOW : ESR_ELx_EC_DABT_LOW);
2819+
esr |= ESR_ELx_FSC_EXTABT | ESR_ELx_IL;
2820+
2821+
vcpu_write_sys_reg(vcpu, FAR_EL2, addr);
2822+
return kvm_inject_nested_sync(vcpu, esr);
2823+
}

arch/arm64/kvm/guest.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -839,6 +839,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
839839
bool serror_pending = events->exception.serror_pending;
840840
bool has_esr = events->exception.serror_has_esr;
841841
bool ext_dabt_pending = events->exception.ext_dabt_pending;
842+
int ret = 0;
842843

843844
if (serror_pending && has_esr) {
844845
if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
@@ -853,9 +854,9 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
853854
}
854855

855856
if (ext_dabt_pending)
856-
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
857+
ret = kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
857858

858-
return 0;
859+
return (ret < 0) ? ret : 0;
859860
}
860861

861862
u32 __attribute_const__ kvm_target_cpu(void)

arch/arm64/kvm/inject_fault.c

Lines changed: 17 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -155,36 +155,28 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
155155
vcpu_write_sys_reg(vcpu, far, FAR_EL1);
156156
}
157157

158-
/**
159-
* kvm_inject_dabt - inject a data abort into the guest
160-
* @vcpu: The VCPU to receive the data abort
161-
* @addr: The address to report in the DFAR
162-
*
163-
* It is assumed that this code is called from the VCPU thread and that the
164-
* VCPU therefore is not currently executing guest code.
165-
*/
166-
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
158+
static void __kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
167159
{
168160
if (vcpu_el1_is_32bit(vcpu))
169-
inject_abt32(vcpu, false, addr);
161+
inject_abt32(vcpu, iabt, addr);
170162
else
171-
inject_abt64(vcpu, false, addr);
163+
inject_abt64(vcpu, iabt, addr);
172164
}
173165

174-
/**
175-
* kvm_inject_pabt - inject a prefetch abort into the guest
176-
* @vcpu: The VCPU to receive the prefetch abort
177-
* @addr: The address to report in the DFAR
178-
*
179-
* It is assumed that this code is called from the VCPU thread and that the
180-
* VCPU therefore is not currently executing guest code.
181-
*/
182-
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
166+
static bool kvm_sea_target_is_el2(struct kvm_vcpu *vcpu)
183167
{
184-
if (vcpu_el1_is_32bit(vcpu))
185-
inject_abt32(vcpu, true, addr);
186-
else
187-
inject_abt64(vcpu, true, addr);
168+
return __vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA);
169+
}
170+
171+
int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
172+
{
173+
lockdep_assert_held(&vcpu->mutex);
174+
175+
if (is_nested_ctxt(vcpu) && kvm_sea_target_is_el2(vcpu))
176+
return kvm_inject_nested_sea(vcpu, iabt, addr);
177+
178+
__kvm_inject_sea(vcpu, iabt, addr);
179+
return 1;
188180
}
189181

190182
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
@@ -194,10 +186,7 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
194186
addr = kvm_vcpu_get_fault_ipa(vcpu);
195187
addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
196188

197-
if (kvm_vcpu_trap_is_iabt(vcpu))
198-
kvm_inject_pabt(vcpu, addr);
199-
else
200-
kvm_inject_dabt(vcpu, addr);
189+
__kvm_inject_sea(vcpu, kvm_vcpu_trap_is_iabt(vcpu), addr);
201190

202191
/*
203192
* If AArch64 or LPAE, set FSC to 0 to indicate an Address

arch/arm64/kvm/mmio.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -169,10 +169,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
169169
trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
170170
kvm_vcpu_get_hfar(vcpu), fault_ipa);
171171

172-
if (vcpu_is_protected(vcpu)) {
173-
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
174-
return 1;
175-
}
172+
if (vcpu_is_protected(vcpu))
173+
return kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
176174

177175
if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
178176
&vcpu->kvm->arch.flags)) {

arch/arm64/kvm/mmu.c

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1836,11 +1836,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
18361836
if (fault_ipa >= BIT_ULL(VTCR_EL2_IPA(vcpu->arch.hw_mmu->vtcr))) {
18371837
fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
18381838

1839-
if (is_iabt)
1840-
kvm_inject_pabt(vcpu, fault_ipa);
1841-
else
1842-
kvm_inject_dabt(vcpu, fault_ipa);
1843-
return 1;
1839+
return kvm_inject_sea(vcpu, is_iabt, fault_ipa);
18441840
}
18451841
}
18461842

@@ -1912,8 +1908,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
19121908
}
19131909

19141910
if (kvm_vcpu_abt_iss1tw(vcpu)) {
1915-
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1916-
ret = 1;
1911+
ret = kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
19171912
goto out_unlock;
19181913
}
19191914

@@ -1958,10 +1953,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
19581953
if (ret == 0)
19591954
ret = 1;
19601955
out:
1961-
if (ret == -ENOEXEC) {
1962-
kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1963-
ret = 1;
1964-
}
1956+
if (ret == -ENOEXEC)
1957+
ret = kvm_inject_sea_iabt(vcpu, kvm_vcpu_get_hfar(vcpu));
19651958
out_unlock:
19661959
srcu_read_unlock(&vcpu->kvm->srcu, idx);
19671960
return ret;

0 commit comments

Comments
 (0)