Skip to content

Commit 291bd20

Browse files
tlendackybonzini
authored andcommitted
KVM: SVM: Add initial support for a VMGEXIT VMEXIT
SEV-ES adds a new VMEXIT reason code, VMGEXIT. Initial support for a VMGEXIT includes mapping the GHCB based on the guest GPA, which is obtained from a new VMCB field, and then validating the required inputs for the VMGEXIT exit reason. Since many of the VMGEXIT exit reasons correspond to existing VMEXIT reasons, the information from the GHCB is copied into the VMCB control exit code areas and KVM register areas. The standard exit handlers are invoked, similar to standard VMEXIT processing. Before restarting the vCPU, the GHCB is updated with any registers that have been updated by the hypervisor. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Message-Id: <c6a4ed4294a369bd75c44d03bd7ce0f0c3840e50.1607620209.git.thomas.lendacky@amd.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent e9093fd commit 291bd20

File tree

5 files changed

+292
-3
lines changed

5 files changed

+292
-3
lines changed

arch/x86/include/asm/svm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
130130
u32 exit_int_info_err;
131131
u64 nested_ctl;
132132
u64 avic_vapic_bar;
133-
u8 reserved_4[8];
133+
u64 ghcb_gpa;
134134
u32 event_inj;
135135
u32 event_inj_err;
136136
u64 nested_cr3;

arch/x86/include/uapi/asm/svm.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@
8181
#define SVM_EXIT_NPF 0x400
8282
#define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401
8383
#define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402
84+
#define SVM_EXIT_VMGEXIT 0x403
8485

8586
/* SEV-ES software-defined VMGEXIT events */
8687
#define SVM_VMGEXIT_MMIO_READ 0x80000001
@@ -187,6 +188,12 @@
187188
{ SVM_EXIT_NPF, "npf" }, \
188189
{ SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \
189190
{ SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }, \
191+
{ SVM_EXIT_VMGEXIT, "vmgexit" }, \
192+
{ SVM_VMGEXIT_MMIO_READ, "vmgexit_mmio_read" }, \
193+
{ SVM_VMGEXIT_MMIO_WRITE, "vmgexit_mmio_write" }, \
194+
{ SVM_VMGEXIT_NMI_COMPLETE, "vmgexit_nmi_complete" }, \
195+
{ SVM_VMGEXIT_AP_HLT_LOOP, "vmgexit_ap_hlt_loop" }, \
196+
{ SVM_VMGEXIT_AP_JUMP_TABLE, "vmgexit_ap_jump_table" }, \
190197
{ SVM_EXIT_ERR, "invalid_guest_state" }
191198

192199

arch/x86/kvm/svm/sev.c

Lines changed: 272 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818

1919
#include "x86.h"
2020
#include "svm.h"
21+
#include "cpuid.h"
2122

2223
static int sev_flush_asids(void);
2324
static DECLARE_RWSEM(sev_deactivate_lock);
@@ -1257,11 +1258,226 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
12571258
__free_page(virt_to_page(svm->vmsa));
12581259
}
12591260

1261+
static void dump_ghcb(struct vcpu_svm *svm)
1262+
{
1263+
struct ghcb *ghcb = svm->ghcb;
1264+
unsigned int nbits;
1265+
1266+
/* Re-use the dump_invalid_vmcb module parameter */
1267+
if (!dump_invalid_vmcb) {
1268+
pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
1269+
return;
1270+
}
1271+
1272+
nbits = sizeof(ghcb->save.valid_bitmap) * 8;
1273+
1274+
pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
1275+
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
1276+
ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
1277+
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
1278+
ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
1279+
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
1280+
ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
1281+
pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
1282+
ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
1283+
pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
1284+
}
1285+
1286+
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
1287+
{
1288+
struct kvm_vcpu *vcpu = &svm->vcpu;
1289+
struct ghcb *ghcb = svm->ghcb;
1290+
1291+
/*
1292+
* The GHCB protocol so far allows for the following data
1293+
* to be returned:
1294+
* GPRs RAX, RBX, RCX, RDX
1295+
*
1296+
* Copy their values to the GHCB if they are dirty.
1297+
*/
1298+
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RAX))
1299+
ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
1300+
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RBX))
1301+
ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
1302+
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RCX))
1303+
ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
1304+
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RDX))
1305+
ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
1306+
}
1307+
1308+
static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
1309+
{
1310+
struct vmcb_control_area *control = &svm->vmcb->control;
1311+
struct kvm_vcpu *vcpu = &svm->vcpu;
1312+
struct ghcb *ghcb = svm->ghcb;
1313+
u64 exit_code;
1314+
1315+
/*
1316+
* The GHCB protocol so far allows for the following data
1317+
* to be supplied:
1318+
* GPRs RAX, RBX, RCX, RDX
1319+
* XCR0
1320+
* CPL
1321+
*
1322+
* VMMCALL allows the guest to provide extra registers. KVM also
1323+
* expects RSI for hypercalls, so include that, too.
1324+
*
1325+
* Copy their values to the appropriate location if supplied.
1326+
*/
1327+
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
1328+
1329+
vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
1330+
vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
1331+
vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
1332+
vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
1333+
vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
1334+
1335+
svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
1336+
1337+
if (ghcb_xcr0_is_valid(ghcb)) {
1338+
vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
1339+
kvm_update_cpuid_runtime(vcpu);
1340+
}
1341+
1342+
/* Copy the GHCB exit information into the VMCB fields */
1343+
exit_code = ghcb_get_sw_exit_code(ghcb);
1344+
control->exit_code = lower_32_bits(exit_code);
1345+
control->exit_code_hi = upper_32_bits(exit_code);
1346+
control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
1347+
control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
1348+
1349+
/* Clear the valid entries fields */
1350+
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
1351+
}
1352+
1353+
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
1354+
{
1355+
struct kvm_vcpu *vcpu;
1356+
struct ghcb *ghcb;
1357+
u64 exit_code = 0;
1358+
1359+
ghcb = svm->ghcb;
1360+
1361+
/* Only GHCB Usage code 0 is supported */
1362+
if (ghcb->ghcb_usage)
1363+
goto vmgexit_err;
1364+
1365+
/*
1366+
* Retrieve the exit code now even though is may not be marked valid
1367+
* as it could help with debugging.
1368+
*/
1369+
exit_code = ghcb_get_sw_exit_code(ghcb);
1370+
1371+
if (!ghcb_sw_exit_code_is_valid(ghcb) ||
1372+
!ghcb_sw_exit_info_1_is_valid(ghcb) ||
1373+
!ghcb_sw_exit_info_2_is_valid(ghcb))
1374+
goto vmgexit_err;
1375+
1376+
switch (ghcb_get_sw_exit_code(ghcb)) {
1377+
case SVM_EXIT_READ_DR7:
1378+
break;
1379+
case SVM_EXIT_WRITE_DR7:
1380+
if (!ghcb_rax_is_valid(ghcb))
1381+
goto vmgexit_err;
1382+
break;
1383+
case SVM_EXIT_RDTSC:
1384+
break;
1385+
case SVM_EXIT_RDPMC:
1386+
if (!ghcb_rcx_is_valid(ghcb))
1387+
goto vmgexit_err;
1388+
break;
1389+
case SVM_EXIT_CPUID:
1390+
if (!ghcb_rax_is_valid(ghcb) ||
1391+
!ghcb_rcx_is_valid(ghcb))
1392+
goto vmgexit_err;
1393+
if (ghcb_get_rax(ghcb) == 0xd)
1394+
if (!ghcb_xcr0_is_valid(ghcb))
1395+
goto vmgexit_err;
1396+
break;
1397+
case SVM_EXIT_INVD:
1398+
break;
1399+
case SVM_EXIT_IOIO:
1400+
if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
1401+
if (!ghcb_rax_is_valid(ghcb))
1402+
goto vmgexit_err;
1403+
break;
1404+
case SVM_EXIT_MSR:
1405+
if (!ghcb_rcx_is_valid(ghcb))
1406+
goto vmgexit_err;
1407+
if (ghcb_get_sw_exit_info_1(ghcb)) {
1408+
if (!ghcb_rax_is_valid(ghcb) ||
1409+
!ghcb_rdx_is_valid(ghcb))
1410+
goto vmgexit_err;
1411+
}
1412+
break;
1413+
case SVM_EXIT_VMMCALL:
1414+
if (!ghcb_rax_is_valid(ghcb) ||
1415+
!ghcb_cpl_is_valid(ghcb))
1416+
goto vmgexit_err;
1417+
break;
1418+
case SVM_EXIT_RDTSCP:
1419+
break;
1420+
case SVM_EXIT_WBINVD:
1421+
break;
1422+
case SVM_EXIT_MONITOR:
1423+
if (!ghcb_rax_is_valid(ghcb) ||
1424+
!ghcb_rcx_is_valid(ghcb) ||
1425+
!ghcb_rdx_is_valid(ghcb))
1426+
goto vmgexit_err;
1427+
break;
1428+
case SVM_EXIT_MWAIT:
1429+
if (!ghcb_rax_is_valid(ghcb) ||
1430+
!ghcb_rcx_is_valid(ghcb))
1431+
goto vmgexit_err;
1432+
break;
1433+
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
1434+
break;
1435+
default:
1436+
goto vmgexit_err;
1437+
}
1438+
1439+
return 0;
1440+
1441+
vmgexit_err:
1442+
vcpu = &svm->vcpu;
1443+
1444+
if (ghcb->ghcb_usage) {
1445+
vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
1446+
ghcb->ghcb_usage);
1447+
} else {
1448+
vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
1449+
exit_code);
1450+
dump_ghcb(svm);
1451+
}
1452+
1453+
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1454+
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
1455+
vcpu->run->internal.ndata = 2;
1456+
vcpu->run->internal.data[0] = exit_code;
1457+
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
1458+
1459+
return -EINVAL;
1460+
}
1461+
1462+
static void pre_sev_es_run(struct vcpu_svm *svm)
1463+
{
1464+
if (!svm->ghcb)
1465+
return;
1466+
1467+
sev_es_sync_to_ghcb(svm);
1468+
1469+
kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
1470+
svm->ghcb = NULL;
1471+
}
1472+
12601473
void pre_sev_run(struct vcpu_svm *svm, int cpu)
12611474
{
12621475
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
12631476
int asid = sev_get_asid(svm->vcpu.kvm);
12641477

1478+
/* Perform any SEV-ES pre-run actions */
1479+
pre_sev_es_run(svm);
1480+
12651481
/* Assign the asid allocated with this SEV guest */
12661482
svm->asid = asid;
12671483

@@ -1279,3 +1495,59 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
12791495
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
12801496
vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
12811497
}
1498+
1499+
static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
1500+
{
1501+
return -EINVAL;
1502+
}
1503+
1504+
int sev_handle_vmgexit(struct vcpu_svm *svm)
1505+
{
1506+
struct vmcb_control_area *control = &svm->vmcb->control;
1507+
u64 ghcb_gpa, exit_code;
1508+
struct ghcb *ghcb;
1509+
int ret;
1510+
1511+
/* Validate the GHCB */
1512+
ghcb_gpa = control->ghcb_gpa;
1513+
if (ghcb_gpa & GHCB_MSR_INFO_MASK)
1514+
return sev_handle_vmgexit_msr_protocol(svm);
1515+
1516+
if (!ghcb_gpa) {
1517+
vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB gpa is not set\n");
1518+
return -EINVAL;
1519+
}
1520+
1521+
if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
1522+
/* Unable to map GHCB from guest */
1523+
vcpu_unimpl(&svm->vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
1524+
ghcb_gpa);
1525+
return -EINVAL;
1526+
}
1527+
1528+
svm->ghcb = svm->ghcb_map.hva;
1529+
ghcb = svm->ghcb_map.hva;
1530+
1531+
exit_code = ghcb_get_sw_exit_code(ghcb);
1532+
1533+
ret = sev_es_validate_vmgexit(svm);
1534+
if (ret)
1535+
return ret;
1536+
1537+
sev_es_sync_from_ghcb(svm);
1538+
ghcb_set_sw_exit_info_1(ghcb, 0);
1539+
ghcb_set_sw_exit_info_2(ghcb, 0);
1540+
1541+
ret = -EINVAL;
1542+
switch (exit_code) {
1543+
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
1544+
vcpu_unimpl(&svm->vcpu,
1545+
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
1546+
control->exit_info_1, control->exit_info_2);
1547+
break;
1548+
default:
1549+
ret = svm_invoke_exit_handler(svm, exit_code);
1550+
}
1551+
1552+
return ret;
1553+
}

arch/x86/kvm/svm/svm.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ module_param(sev, int, 0444);
194194
int sev_es = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
195195
module_param(sev_es, int, 0444);
196196

197-
static bool __read_mostly dump_invalid_vmcb = 0;
197+
bool __read_mostly dump_invalid_vmcb;
198198
module_param(dump_invalid_vmcb, bool, 0644);
199199

200200
static u8 rsm_ins_bytes[] = "\x0f\xaa";
@@ -2977,6 +2977,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
29772977
[SVM_EXIT_RSM] = rsm_interception,
29782978
[SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
29792979
[SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
2980+
[SVM_EXIT_VMGEXIT] = sev_handle_vmgexit,
29802981
};
29812982

29822983
static void dump_vmcb(struct kvm_vcpu *vcpu)
@@ -3018,6 +3019,7 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
30183019
pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
30193020
pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
30203021
pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
3022+
pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa);
30213023
pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
30223024
pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
30233025
pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
@@ -3114,7 +3116,7 @@ static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
31143116
return -EINVAL;
31153117
}
31163118

3117-
static int svm_invoke_exit_handler(struct vcpu_svm *svm, u64 exit_code)
3119+
int svm_invoke_exit_handler(struct vcpu_svm *svm, u64 exit_code)
31183120
{
31193121
if (svm_handle_invalid_exit(&svm->vcpu, exit_code))
31203122
return 0;

0 commit comments

Comments
 (0)