1818
1919#include "x86.h"
2020#include "svm.h"
21+ #include "cpuid.h"
2122
2223static int sev_flush_asids (void );
2324static DECLARE_RWSEM (sev_deactivate_lock );
@@ -1257,11 +1258,226 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
12571258 __free_page (virt_to_page (svm -> vmsa ));
12581259}
12591260
1261+ static void dump_ghcb (struct vcpu_svm * svm )
1262+ {
1263+ struct ghcb * ghcb = svm -> ghcb ;
1264+ unsigned int nbits ;
1265+
1266+ /* Re-use the dump_invalid_vmcb module parameter */
1267+ if (!dump_invalid_vmcb ) {
1268+ pr_warn_ratelimited ("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n" );
1269+ return ;
1270+ }
1271+
1272+ nbits = sizeof (ghcb -> save .valid_bitmap ) * 8 ;
1273+
1274+ pr_err ("GHCB (GPA=%016llx):\n" , svm -> vmcb -> control .ghcb_gpa );
1275+ pr_err ("%-20s%016llx is_valid: %u\n" , "sw_exit_code" ,
1276+ ghcb -> save .sw_exit_code , ghcb_sw_exit_code_is_valid (ghcb ));
1277+ pr_err ("%-20s%016llx is_valid: %u\n" , "sw_exit_info_1" ,
1278+ ghcb -> save .sw_exit_info_1 , ghcb_sw_exit_info_1_is_valid (ghcb ));
1279+ pr_err ("%-20s%016llx is_valid: %u\n" , "sw_exit_info_2" ,
1280+ ghcb -> save .sw_exit_info_2 , ghcb_sw_exit_info_2_is_valid (ghcb ));
1281+ pr_err ("%-20s%016llx is_valid: %u\n" , "sw_scratch" ,
1282+ ghcb -> save .sw_scratch , ghcb_sw_scratch_is_valid (ghcb ));
1283+ pr_err ("%-20s%*pb\n" , "valid_bitmap" , nbits , ghcb -> save .valid_bitmap );
1284+ }
1285+
1286+ static void sev_es_sync_to_ghcb (struct vcpu_svm * svm )
1287+ {
1288+ struct kvm_vcpu * vcpu = & svm -> vcpu ;
1289+ struct ghcb * ghcb = svm -> ghcb ;
1290+
1291+ /*
1292+ * The GHCB protocol so far allows for the following data
1293+ * to be returned:
1294+ * GPRs RAX, RBX, RCX, RDX
1295+ *
1296+ * Copy their values to the GHCB if they are dirty.
1297+ */
1298+ if (kvm_register_is_dirty (vcpu , VCPU_REGS_RAX ))
1299+ ghcb_set_rax (ghcb , vcpu -> arch .regs [VCPU_REGS_RAX ]);
1300+ if (kvm_register_is_dirty (vcpu , VCPU_REGS_RBX ))
1301+ ghcb_set_rbx (ghcb , vcpu -> arch .regs [VCPU_REGS_RBX ]);
1302+ if (kvm_register_is_dirty (vcpu , VCPU_REGS_RCX ))
1303+ ghcb_set_rcx (ghcb , vcpu -> arch .regs [VCPU_REGS_RCX ]);
1304+ if (kvm_register_is_dirty (vcpu , VCPU_REGS_RDX ))
1305+ ghcb_set_rdx (ghcb , vcpu -> arch .regs [VCPU_REGS_RDX ]);
1306+ }
1307+
1308+ static void sev_es_sync_from_ghcb (struct vcpu_svm * svm )
1309+ {
1310+ struct vmcb_control_area * control = & svm -> vmcb -> control ;
1311+ struct kvm_vcpu * vcpu = & svm -> vcpu ;
1312+ struct ghcb * ghcb = svm -> ghcb ;
1313+ u64 exit_code ;
1314+
1315+ /*
1316+ * The GHCB protocol so far allows for the following data
1317+ * to be supplied:
1318+ * GPRs RAX, RBX, RCX, RDX
1319+ * XCR0
1320+ * CPL
1321+ *
1322+ * VMMCALL allows the guest to provide extra registers. KVM also
1323+ * expects RSI for hypercalls, so include that, too.
1324+ *
1325+ * Copy their values to the appropriate location if supplied.
1326+ */
1327+ memset (vcpu -> arch .regs , 0 , sizeof (vcpu -> arch .regs ));
1328+
1329+ vcpu -> arch .regs [VCPU_REGS_RAX ] = ghcb_get_rax_if_valid (ghcb );
1330+ vcpu -> arch .regs [VCPU_REGS_RBX ] = ghcb_get_rbx_if_valid (ghcb );
1331+ vcpu -> arch .regs [VCPU_REGS_RCX ] = ghcb_get_rcx_if_valid (ghcb );
1332+ vcpu -> arch .regs [VCPU_REGS_RDX ] = ghcb_get_rdx_if_valid (ghcb );
1333+ vcpu -> arch .regs [VCPU_REGS_RSI ] = ghcb_get_rsi_if_valid (ghcb );
1334+
1335+ svm -> vmcb -> save .cpl = ghcb_get_cpl_if_valid (ghcb );
1336+
1337+ if (ghcb_xcr0_is_valid (ghcb )) {
1338+ vcpu -> arch .xcr0 = ghcb_get_xcr0 (ghcb );
1339+ kvm_update_cpuid_runtime (vcpu );
1340+ }
1341+
1342+ /* Copy the GHCB exit information into the VMCB fields */
1343+ exit_code = ghcb_get_sw_exit_code (ghcb );
1344+ control -> exit_code = lower_32_bits (exit_code );
1345+ control -> exit_code_hi = upper_32_bits (exit_code );
1346+ control -> exit_info_1 = ghcb_get_sw_exit_info_1 (ghcb );
1347+ control -> exit_info_2 = ghcb_get_sw_exit_info_2 (ghcb );
1348+
1349+ /* Clear the valid entries fields */
1350+ memset (ghcb -> save .valid_bitmap , 0 , sizeof (ghcb -> save .valid_bitmap ));
1351+ }
1352+
1353+ static int sev_es_validate_vmgexit (struct vcpu_svm * svm )
1354+ {
1355+ struct kvm_vcpu * vcpu ;
1356+ struct ghcb * ghcb ;
1357+ u64 exit_code = 0 ;
1358+
1359+ ghcb = svm -> ghcb ;
1360+
1361+ /* Only GHCB Usage code 0 is supported */
1362+ if (ghcb -> ghcb_usage )
1363+ goto vmgexit_err ;
1364+
1365+ /*
1366+ * Retrieve the exit code now even though is may not be marked valid
1367+ * as it could help with debugging.
1368+ */
1369+ exit_code = ghcb_get_sw_exit_code (ghcb );
1370+
1371+ if (!ghcb_sw_exit_code_is_valid (ghcb ) ||
1372+ !ghcb_sw_exit_info_1_is_valid (ghcb ) ||
1373+ !ghcb_sw_exit_info_2_is_valid (ghcb ))
1374+ goto vmgexit_err ;
1375+
1376+ switch (ghcb_get_sw_exit_code (ghcb )) {
1377+ case SVM_EXIT_READ_DR7 :
1378+ break ;
1379+ case SVM_EXIT_WRITE_DR7 :
1380+ if (!ghcb_rax_is_valid (ghcb ))
1381+ goto vmgexit_err ;
1382+ break ;
1383+ case SVM_EXIT_RDTSC :
1384+ break ;
1385+ case SVM_EXIT_RDPMC :
1386+ if (!ghcb_rcx_is_valid (ghcb ))
1387+ goto vmgexit_err ;
1388+ break ;
1389+ case SVM_EXIT_CPUID :
1390+ if (!ghcb_rax_is_valid (ghcb ) ||
1391+ !ghcb_rcx_is_valid (ghcb ))
1392+ goto vmgexit_err ;
1393+ if (ghcb_get_rax (ghcb ) == 0xd )
1394+ if (!ghcb_xcr0_is_valid (ghcb ))
1395+ goto vmgexit_err ;
1396+ break ;
1397+ case SVM_EXIT_INVD :
1398+ break ;
1399+ case SVM_EXIT_IOIO :
1400+ if (!(ghcb_get_sw_exit_info_1 (ghcb ) & SVM_IOIO_TYPE_MASK ))
1401+ if (!ghcb_rax_is_valid (ghcb ))
1402+ goto vmgexit_err ;
1403+ break ;
1404+ case SVM_EXIT_MSR :
1405+ if (!ghcb_rcx_is_valid (ghcb ))
1406+ goto vmgexit_err ;
1407+ if (ghcb_get_sw_exit_info_1 (ghcb )) {
1408+ if (!ghcb_rax_is_valid (ghcb ) ||
1409+ !ghcb_rdx_is_valid (ghcb ))
1410+ goto vmgexit_err ;
1411+ }
1412+ break ;
1413+ case SVM_EXIT_VMMCALL :
1414+ if (!ghcb_rax_is_valid (ghcb ) ||
1415+ !ghcb_cpl_is_valid (ghcb ))
1416+ goto vmgexit_err ;
1417+ break ;
1418+ case SVM_EXIT_RDTSCP :
1419+ break ;
1420+ case SVM_EXIT_WBINVD :
1421+ break ;
1422+ case SVM_EXIT_MONITOR :
1423+ if (!ghcb_rax_is_valid (ghcb ) ||
1424+ !ghcb_rcx_is_valid (ghcb ) ||
1425+ !ghcb_rdx_is_valid (ghcb ))
1426+ goto vmgexit_err ;
1427+ break ;
1428+ case SVM_EXIT_MWAIT :
1429+ if (!ghcb_rax_is_valid (ghcb ) ||
1430+ !ghcb_rcx_is_valid (ghcb ))
1431+ goto vmgexit_err ;
1432+ break ;
1433+ case SVM_VMGEXIT_UNSUPPORTED_EVENT :
1434+ break ;
1435+ default :
1436+ goto vmgexit_err ;
1437+ }
1438+
1439+ return 0 ;
1440+
1441+ vmgexit_err :
1442+ vcpu = & svm -> vcpu ;
1443+
1444+ if (ghcb -> ghcb_usage ) {
1445+ vcpu_unimpl (vcpu , "vmgexit: ghcb usage %#x is not valid\n" ,
1446+ ghcb -> ghcb_usage );
1447+ } else {
1448+ vcpu_unimpl (vcpu , "vmgexit: exit reason %#llx is not valid\n" ,
1449+ exit_code );
1450+ dump_ghcb (svm );
1451+ }
1452+
1453+ vcpu -> run -> exit_reason = KVM_EXIT_INTERNAL_ERROR ;
1454+ vcpu -> run -> internal .suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON ;
1455+ vcpu -> run -> internal .ndata = 2 ;
1456+ vcpu -> run -> internal .data [0 ] = exit_code ;
1457+ vcpu -> run -> internal .data [1 ] = vcpu -> arch .last_vmentry_cpu ;
1458+
1459+ return - EINVAL ;
1460+ }
1461+
1462+ static void pre_sev_es_run (struct vcpu_svm * svm )
1463+ {
1464+ if (!svm -> ghcb )
1465+ return ;
1466+
1467+ sev_es_sync_to_ghcb (svm );
1468+
1469+ kvm_vcpu_unmap (& svm -> vcpu , & svm -> ghcb_map , true);
1470+ svm -> ghcb = NULL ;
1471+ }
1472+
12601473void pre_sev_run (struct vcpu_svm * svm , int cpu )
12611474{
12621475 struct svm_cpu_data * sd = per_cpu (svm_data , cpu );
12631476 int asid = sev_get_asid (svm -> vcpu .kvm );
12641477
1478+ /* Perform any SEV-ES pre-run actions */
1479+ pre_sev_es_run (svm );
1480+
12651481 /* Assign the asid allocated with this SEV guest */
12661482 svm -> asid = asid ;
12671483
@@ -1279,3 +1495,59 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
12791495 svm -> vmcb -> control .tlb_ctl = TLB_CONTROL_FLUSH_ASID ;
12801496 vmcb_mark_dirty (svm -> vmcb , VMCB_ASID );
12811497}
1498+
1499+ static int sev_handle_vmgexit_msr_protocol (struct vcpu_svm * svm )
1500+ {
1501+ return - EINVAL ;
1502+ }
1503+
1504+ int sev_handle_vmgexit (struct vcpu_svm * svm )
1505+ {
1506+ struct vmcb_control_area * control = & svm -> vmcb -> control ;
1507+ u64 ghcb_gpa , exit_code ;
1508+ struct ghcb * ghcb ;
1509+ int ret ;
1510+
1511+ /* Validate the GHCB */
1512+ ghcb_gpa = control -> ghcb_gpa ;
1513+ if (ghcb_gpa & GHCB_MSR_INFO_MASK )
1514+ return sev_handle_vmgexit_msr_protocol (svm );
1515+
1516+ if (!ghcb_gpa ) {
1517+ vcpu_unimpl (& svm -> vcpu , "vmgexit: GHCB gpa is not set\n" );
1518+ return - EINVAL ;
1519+ }
1520+
1521+ if (kvm_vcpu_map (& svm -> vcpu , ghcb_gpa >> PAGE_SHIFT , & svm -> ghcb_map )) {
1522+ /* Unable to map GHCB from guest */
1523+ vcpu_unimpl (& svm -> vcpu , "vmgexit: error mapping GHCB [%#llx] from guest\n" ,
1524+ ghcb_gpa );
1525+ return - EINVAL ;
1526+ }
1527+
1528+ svm -> ghcb = svm -> ghcb_map .hva ;
1529+ ghcb = svm -> ghcb_map .hva ;
1530+
1531+ exit_code = ghcb_get_sw_exit_code (ghcb );
1532+
1533+ ret = sev_es_validate_vmgexit (svm );
1534+ if (ret )
1535+ return ret ;
1536+
1537+ sev_es_sync_from_ghcb (svm );
1538+ ghcb_set_sw_exit_info_1 (ghcb , 0 );
1539+ ghcb_set_sw_exit_info_2 (ghcb , 0 );
1540+
1541+ ret = - EINVAL ;
1542+ switch (exit_code ) {
1543+ case SVM_VMGEXIT_UNSUPPORTED_EVENT :
1544+ vcpu_unimpl (& svm -> vcpu ,
1545+ "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n" ,
1546+ control -> exit_info_1 , control -> exit_info_2 );
1547+ break ;
1548+ default :
1549+ ret = svm_invoke_exit_handler (svm , exit_code );
1550+ }
1551+
1552+ return ret ;
1553+ }
0 commit comments