Skip to content
This repository was archived by the owner on Jan 28, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
257 changes: 0 additions & 257 deletions core/ept.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,8 @@
* mapping.
*/

#include "../include/hax.h"
#include "include/ept.h"
#include "include/cpu.h"
#include "include/paging.h"
#include "include/vtlb.h"

static uint64_t ept_capabilities;

Expand Down Expand Up @@ -77,251 +74,6 @@ static bool ept_has_cap(uint64_t cap)
return (ept_capabilities & cap) != 0;
}

// Get the PDE entry for the specified gpa in EPT
static epte_t * ept_get_pde(struct hax_ept *ept, hax_paddr_t gpa)
{
epte_t *e;
uint which_g = gpa >> 30;
// PML4 and PDPTE level needs 2 pages
uint64_t offset = (2 + which_g) * PAGE_SIZE_4K;
// Need Xiantao's check
unsigned char *ept_addr = hax_page_va(ept->ept_root_page);

hax_assert(which_g < EPT_MAX_MEM_G);

e = (epte_t *)(ept_addr + offset) + ept_get_pde_idx(gpa);
return e;
}

// ept_set_pte: caller can use it to setup p2m mapping for the guest.
bool ept_set_pte(hax_vm_t *hax_vm, hax_paddr_t gpa, hax_paddr_t hpa, uint emt,
uint mem_type, bool *is_modified)
{
bool ret = true;
struct hax_page *page;
hax_paddr_t pte_ha;
epte_t *pte;
void *pte_base, *addr;
struct hax_ept *ept = hax_vm->ept;
uint which_g = gpa >> 30;
uint perm;
epte_t *pde = ept_get_pde(ept, gpa);

// hax_log(HAX_LOGD, "hpa %llx gpa %llx\n", hpa, gpa);
if (which_g >= EPT_MAX_MEM_G) {
hax_log(HAX_LOGE, "Error: Guest's memory size is beyond %dG!\n",
EPT_MAX_MEM_G);
return false;
}
hax_mutex_lock(hax_vm->vm_lock);
if (!epte_is_present(pde)) {
if (mem_type == EPT_TYPE_NONE) { // unmap
// Don't bother allocating the PT
goto out_unlock;
}

page = hax_alloc_page(0, 1);
if (!page) {
ret = false;
goto out_unlock;
}

hax_list_add(&page->list, &ept->ept_page_list);
addr = hax_page_va(page);
memset(addr, 0, PAGE_SIZE_4K);
pte_ha = hax_page_pa(page);
// Always own full access rights
epte_set_entry(pde, pte_ha, 7, EMT_NONE);
}

// Grab the PTE entry
pte_base = hax_vmap_pfn(pde->addr);
if (!pte_base) {
ret = false;
goto out_unlock;
}
pte = (epte_t *)pte_base + ept_get_pte_idx(gpa);
// TODO: Just for debugging, need check QEMU for more information
/* if (epte_is_present(pte)) {
* hax_log(HAX_LOGD, "Can't change the pte entry!\n");
* hax_mutex_unlock(hax_vm->vm_lock);
* hax_log(HAX_LOGD, "\npte %llx\n", pte->val);
* hax_vunmap_pfn(pte_base);
* return 0;
* }
*/
switch (mem_type) {
case EPT_TYPE_NONE: {
perm = 0; // unmap
break;
}
case EPT_TYPE_MEM: {
perm = 7;
break;
}
case EPT_TYPE_ROM: {
perm = 5;
break;
}
default: {
hax_log(HAX_LOGE, "Unsupported mapping type 0x%x\n", mem_type);
ret = false;
goto out_unmap;
}
}
*is_modified = epte_is_present(pte) && (epte_get_address(pte) != hpa ||
epte_get_perm(pte) != perm || epte_get_emt(pte) != emt);
epte_set_entry(pte, hpa, perm, emt);

out_unmap:
hax_vunmap_pfn(pte_base);
out_unlock:
hax_mutex_unlock(hax_vm->vm_lock);
return ret;
}

static bool ept_lookup(struct vcpu_t *vcpu, hax_paddr_t gpa, hax_paddr_t *hpa)
{
epte_t *pde, *pte;
void *pte_base;
struct hax_ept *ept = vcpu->vm->ept;
uint which_g = gpa >> 30;

hax_assert(ept->ept_root_page);
if (which_g >= EPT_MAX_MEM_G) {
hax_log(HAX_LOGD, "ept_lookup error!\n");
return 0;
}

pde = ept_get_pde(ept, gpa);

if (!epte_is_present(pde))
return 0;

pte_base = hax_vmap_pfn(pde->addr);
if (!pte_base)
return 0;

pte = (epte_t *)pte_base + ept_get_pte_idx(gpa);

if (!epte_is_present(pte)) {
hax_vunmap_pfn(pte_base);
return 0;
}

*hpa = (pte->addr << 12) | (gpa & 0xfff);
hax_vunmap_pfn(pte_base);
return 1;
}

/*
* Deprecated API of EPT
* Translate a GPA to an HPA
* @param vcpu: current vcpu structure pointer
* @param gpa: guest physical address
* @param order: order for gpa
* @param hpa host physical address pointer
*/

// TODO: Do we need to consider cross-page case ??
bool ept_translate(struct vcpu_t *vcpu, hax_paddr_t gpa, uint order, hax_paddr_t *hpa)
{
hax_assert(order == PG_ORDER_4K);
return ept_lookup(vcpu, gpa, hpa);
}

static eptp_t ept_construct_eptp(hax_paddr_t addr)
{
eptp_t eptp;
eptp.val = 0;
eptp.emt = EMT_WB;
eptp.gaw = EPT_DEFAULT_GAW;
eptp.asr = addr >> PG_ORDER_4K;
return eptp;
}

bool ept_init(hax_vm_t *hax_vm)
{
uint i;
hax_paddr_t hpa;
// Need Xiantao's check
unsigned char *ept_addr;
epte_t *e;
struct hax_page *page;
struct hax_ept *ept;

if (hax_vm->ept) {
hax_log(HAX_LOGD, "EPT: EPT has been created already!\n");
return 0;
}

ept = hax_vmalloc(sizeof(struct hax_ept), 0);
if (!ept) {
hax_log(HAX_LOGD,
"EPT: No enough memory for creating EPT structure!\n");
return 0;
}
memset(ept, 0, sizeof(struct hax_ept));
hax_vm->ept = ept;

page = hax_alloc_pages(EPT_PRE_ALLOC_PG_ORDER, 0, 1);
if (!page) {
hax_log(HAX_LOGD, "EPT: No enough memory for creating ept table!\n");
hax_vfree(hax_vm->ept, sizeof(struct hax_ept));
return 0;
}
ept->ept_root_page = page;
ept_addr = hax_page_va(page);
memset(ept_addr, 0, EPT_PRE_ALLOC_PAGES * PAGE_SIZE_4K);

// One page for building PML4 level
ept->eptp = ept_construct_eptp(hax_pa(ept_addr));
e = (epte_t *)ept_addr;

// One page for building PDPTE level
ept_addr += PAGE_SIZE_4K;
hpa = hax_pa(ept_addr);
epte_set_entry(e, hpa, 7, EMT_NONE);
e = (epte_t *)ept_addr;

// The rest pages are used to build PDE level
for (i = 0; i < EPT_MAX_MEM_G; i++) {
ept_addr += PAGE_SIZE_4K;
hpa = hax_pa(ept_addr);
epte_set_entry(e + i, hpa, 7, EMT_NONE);
}

hax_init_list_head(&ept->ept_page_list);

hax_log(HAX_LOGI, "ept_init: Calling INVEPT\n");
invept(hax_vm, EPT_INVEPT_SINGLE_CONTEXT);
return 1;
}

// Free the whole ept structure
void ept_free (hax_vm_t *hax_vm)
{
struct hax_page *page, *n;
struct hax_ept *ept = hax_vm->ept;

hax_assert(ept);

if (!ept->ept_root_page)
return;

hax_log(HAX_LOGI, "ept_free: Calling INVEPT\n");
invept(hax_vm, EPT_INVEPT_SINGLE_CONTEXT);
hax_list_entry_for_each_safe(page, n, &ept->ept_page_list, struct hax_page,
list) {
hax_list_del(&page->list);
hax_free_page(page);
}

hax_free_pages(ept->ept_root_page);
hax_vfree(hax_vm->ept, sizeof(struct hax_ept));
hax_vm->ept = 0;
}

struct invept_bundle {
uint type;
struct invept_desc *desc;
Expand Down Expand Up @@ -416,12 +168,3 @@ void invept(hax_vm_t *hax_vm, uint type)
}
}
}

uint64_t vcpu_get_eptp(struct vcpu_t *vcpu)
{
struct hax_ept *ept = vcpu->vm->ept;

if (vcpu->mmu->mmu_mode != MMU_MODE_EPT)
return INVALID_EPTP;
return ept->eptp.val;
}
2 changes: 0 additions & 2 deletions core/hax.c
Original file line number Diff line number Diff line change
Expand Up @@ -368,10 +368,8 @@ int hax_get_capability(void *buf, int bufLeng, int *outLength)
// Fast MMIO supported since API version 2
cap->winfo = HAX_CAP_FASTMMIO;
cap->winfo |= HAX_CAP_64BIT_RAMBLOCK;
#ifdef CONFIG_HAX_EPT2
cap->winfo |= HAX_CAP_64BIT_SETRAM;
cap->winfo |= HAX_CAP_IMPLICIT_RAMBLOCK;
#endif
cap->winfo |= HAX_CAP_TUNNEL_PAGE;
cap->winfo |= HAX_CAP_RAM_PROTECTION;
cap->winfo |= HAX_CAP_DEBUG;
Expand Down
Loading