diff --git a/core/ept.c b/core/ept.c index c6037ff3..3c57d752 100644 --- a/core/ept.c +++ b/core/ept.c @@ -37,11 +37,8 @@ * mapping. */ -#include "../include/hax.h" #include "include/ept.h" #include "include/cpu.h" -#include "include/paging.h" -#include "include/vtlb.h" static uint64_t ept_capabilities; @@ -77,251 +74,6 @@ static bool ept_has_cap(uint64_t cap) return (ept_capabilities & cap) != 0; } -// Get the PDE entry for the specified gpa in EPT -static epte_t * ept_get_pde(struct hax_ept *ept, hax_paddr_t gpa) -{ - epte_t *e; - uint which_g = gpa >> 30; - // PML4 and PDPTE level needs 2 pages - uint64_t offset = (2 + which_g) * PAGE_SIZE_4K; - // Need Xiantao's check - unsigned char *ept_addr = hax_page_va(ept->ept_root_page); - - hax_assert(which_g < EPT_MAX_MEM_G); - - e = (epte_t *)(ept_addr + offset) + ept_get_pde_idx(gpa); - return e; -} - -// ept_set_pte: caller can use it to setup p2m mapping for the guest. -bool ept_set_pte(hax_vm_t *hax_vm, hax_paddr_t gpa, hax_paddr_t hpa, uint emt, - uint mem_type, bool *is_modified) -{ - bool ret = true; - struct hax_page *page; - hax_paddr_t pte_ha; - epte_t *pte; - void *pte_base, *addr; - struct hax_ept *ept = hax_vm->ept; - uint which_g = gpa >> 30; - uint perm; - epte_t *pde = ept_get_pde(ept, gpa); - - // hax_log(HAX_LOGD, "hpa %llx gpa %llx\n", hpa, gpa); - if (which_g >= EPT_MAX_MEM_G) { - hax_log(HAX_LOGE, "Error: Guest's memory size is beyond %dG!\n", - EPT_MAX_MEM_G); - return false; - } - hax_mutex_lock(hax_vm->vm_lock); - if (!epte_is_present(pde)) { - if (mem_type == EPT_TYPE_NONE) { // unmap - // Don't bother allocating the PT - goto out_unlock; - } - - page = hax_alloc_page(0, 1); - if (!page) { - ret = false; - goto out_unlock; - } - - hax_list_add(&page->list, &ept->ept_page_list); - addr = hax_page_va(page); - memset(addr, 0, PAGE_SIZE_4K); - pte_ha = hax_page_pa(page); - // Always own full access rights - epte_set_entry(pde, pte_ha, 7, EMT_NONE); - } - - // Grab the PTE entry - pte_base = hax_vmap_pfn(pde->addr); - if (!pte_base) { - ret = false; - goto out_unlock; - } - pte = (epte_t *)pte_base + ept_get_pte_idx(gpa); - // TODO: Just for debugging, need check QEMU for more information - /* if (epte_is_present(pte)) { - * hax_log(HAX_LOGD, "Can't change the pte entry!\n"); - * hax_mutex_unlock(hax_vm->vm_lock); - * hax_log(HAX_LOGD, "\npte %llx\n", pte->val); - * hax_vunmap_pfn(pte_base); - * return 0; - * } - */ - switch (mem_type) { - case EPT_TYPE_NONE: { - perm = 0; // unmap - break; - } - case EPT_TYPE_MEM: { - perm = 7; - break; - } - case EPT_TYPE_ROM: { - perm = 5; - break; - } - default: { - hax_log(HAX_LOGE, "Unsupported mapping type 0x%x\n", mem_type); - ret = false; - goto out_unmap; - } - } - *is_modified = epte_is_present(pte) && (epte_get_address(pte) != hpa || - epte_get_perm(pte) != perm || epte_get_emt(pte) != emt); - epte_set_entry(pte, hpa, perm, emt); - -out_unmap: - hax_vunmap_pfn(pte_base); -out_unlock: - hax_mutex_unlock(hax_vm->vm_lock); - return ret; -} - -static bool ept_lookup(struct vcpu_t *vcpu, hax_paddr_t gpa, hax_paddr_t *hpa) -{ - epte_t *pde, *pte; - void *pte_base; - struct hax_ept *ept = vcpu->vm->ept; - uint which_g = gpa >> 30; - - hax_assert(ept->ept_root_page); - if (which_g >= EPT_MAX_MEM_G) { - hax_log(HAX_LOGD, "ept_lookup error!\n"); - return 0; - } - - pde = ept_get_pde(ept, gpa); - - if (!epte_is_present(pde)) - return 0; - - pte_base = hax_vmap_pfn(pde->addr); - if (!pte_base) - return 0; - - pte = (epte_t *)pte_base + ept_get_pte_idx(gpa); - - if (!epte_is_present(pte)) { - hax_vunmap_pfn(pte_base); - return 0; - } - - *hpa = (pte->addr << 12) | (gpa & 0xfff); - hax_vunmap_pfn(pte_base); - return 1; -} - -/* - * Deprecated API of EPT - * Translate a GPA to an HPA - * @param vcpu: current vcpu structure pointer - * @param gpa: guest physical address - * @param order: order for gpa - * @param hpa host physical address pointer - */ - -// TODO: Do we need to consider cross-page case ?? -bool ept_translate(struct vcpu_t *vcpu, hax_paddr_t gpa, uint order, hax_paddr_t *hpa) -{ - hax_assert(order == PG_ORDER_4K); - return ept_lookup(vcpu, gpa, hpa); -} - -static eptp_t ept_construct_eptp(hax_paddr_t addr) -{ - eptp_t eptp; - eptp.val = 0; - eptp.emt = EMT_WB; - eptp.gaw = EPT_DEFAULT_GAW; - eptp.asr = addr >> PG_ORDER_4K; - return eptp; -} - -bool ept_init(hax_vm_t *hax_vm) -{ - uint i; - hax_paddr_t hpa; - // Need Xiantao's check - unsigned char *ept_addr; - epte_t *e; - struct hax_page *page; - struct hax_ept *ept; - - if (hax_vm->ept) { - hax_log(HAX_LOGD, "EPT: EPT has been created already!\n"); - return 0; - } - - ept = hax_vmalloc(sizeof(struct hax_ept), 0); - if (!ept) { - hax_log(HAX_LOGD, - "EPT: No enough memory for creating EPT structure!\n"); - return 0; - } - memset(ept, 0, sizeof(struct hax_ept)); - hax_vm->ept = ept; - - page = hax_alloc_pages(EPT_PRE_ALLOC_PG_ORDER, 0, 1); - if (!page) { - hax_log(HAX_LOGD, "EPT: No enough memory for creating ept table!\n"); - hax_vfree(hax_vm->ept, sizeof(struct hax_ept)); - return 0; - } - ept->ept_root_page = page; - ept_addr = hax_page_va(page); - memset(ept_addr, 0, EPT_PRE_ALLOC_PAGES * PAGE_SIZE_4K); - - // One page for building PML4 level - ept->eptp = ept_construct_eptp(hax_pa(ept_addr)); - e = (epte_t *)ept_addr; - - // One page for building PDPTE level - ept_addr += PAGE_SIZE_4K; - hpa = hax_pa(ept_addr); - epte_set_entry(e, hpa, 7, EMT_NONE); - e = (epte_t *)ept_addr; - - // The rest pages are used to build PDE level - for (i = 0; i < EPT_MAX_MEM_G; i++) { - ept_addr += PAGE_SIZE_4K; - hpa = hax_pa(ept_addr); - epte_set_entry(e + i, hpa, 7, EMT_NONE); - } - - hax_init_list_head(&ept->ept_page_list); - - hax_log(HAX_LOGI, "ept_init: Calling INVEPT\n"); - invept(hax_vm, EPT_INVEPT_SINGLE_CONTEXT); - return 1; -} - -// Free the whole ept structure -void ept_free (hax_vm_t *hax_vm) -{ - struct hax_page *page, *n; - struct hax_ept *ept = hax_vm->ept; - - hax_assert(ept); - - if (!ept->ept_root_page) - return; - - hax_log(HAX_LOGI, "ept_free: Calling INVEPT\n"); - invept(hax_vm, EPT_INVEPT_SINGLE_CONTEXT); - hax_list_entry_for_each_safe(page, n, &ept->ept_page_list, struct hax_page, - list) { - hax_list_del(&page->list); - hax_free_page(page); - } - - hax_free_pages(ept->ept_root_page); - hax_vfree(hax_vm->ept, sizeof(struct hax_ept)); - hax_vm->ept = 0; -} - struct invept_bundle { uint type; struct invept_desc *desc; @@ -416,12 +168,3 @@ void invept(hax_vm_t *hax_vm, uint type) } } } - -uint64_t vcpu_get_eptp(struct vcpu_t *vcpu) -{ - struct hax_ept *ept = vcpu->vm->ept; - - if (vcpu->mmu->mmu_mode != MMU_MODE_EPT) - return INVALID_EPTP; - return ept->eptp.val; -} diff --git a/core/hax.c b/core/hax.c index 995aaf51..cb554996 100644 --- a/core/hax.c +++ b/core/hax.c @@ -368,10 +368,8 @@ int hax_get_capability(void *buf, int bufLeng, int *outLength) // Fast MMIO supported since API version 2 cap->winfo = HAX_CAP_FASTMMIO; cap->winfo |= HAX_CAP_64BIT_RAMBLOCK; -#ifdef CONFIG_HAX_EPT2 cap->winfo |= HAX_CAP_64BIT_SETRAM; cap->winfo |= HAX_CAP_IMPLICIT_RAMBLOCK; -#endif cap->winfo |= HAX_CAP_TUNNEL_PAGE; cap->winfo |= HAX_CAP_RAM_PROTECTION; cap->winfo |= HAX_CAP_DEBUG; diff --git a/core/include/ept.h b/core/include/ept.h index 8823d055..a7f03dc1 100644 --- a/core/include/ept.h +++ b/core/include/ept.h @@ -33,133 +33,6 @@ #include "../../include/hax_types.h" #include "vm.h" -#include "vcpu.h" - -/* - * Structure for an EPT entry - */ - -/* - * Note: - * (1) Bit large_page must be 1 if this is used for 2MB page PDE. - * (2) Do not use accessed/dirty bits for other purpose. - */ - -typedef struct epte { - union { - uint64_t val; - struct { - uint64_t perm : 3; - uint64_t emt : 3; - uint64_t ignore_pat : 1; - uint64_t large_page : 1; - uint64_t accessed : 1; - uint64_t dirty : 1; - uint64_t dont_use : 2; - uint64_t addr : 45; - uint64_t rsvd : 5; - uint64_t avail1 : 2; - }; - }; -} epte_t; - -#define EMT_UC 0 -#define EMT_WB 6 -#define EMT_NONE 0 - -#define EPT_ENTRY 512 - -/* 4 bits are avaiable for software use. */ -#define EPT_TYPE_NONE 0 -#define EPT_TYPE_MEM 0x1 -#define EPT_TYPE_MMIO 0x2 -#define EPT_TYPE_ROM 0x3 -#define EPT_TYPE_RSVD 0x4 - -static inline bool epte_is_present(epte_t *entry) -{ - return !!entry->perm; -} - -static inline hax_paddr_t epte_get_address(epte_t *entry) -{ - return (entry->addr << 12); -} - -static inline uint epte_get_perm(epte_t *entry) -{ - return (uint)entry->perm; -} - -static inline uint epte_get_emt(epte_t *entry) -{ - return (uint)entry->emt; -} - -static void epte_set_entry(epte_t *entry, hax_paddr_t addr, uint perm, uint emt) -{ - entry->val = 0; - entry->addr = addr >> 12; - entry->perm = perm; - entry->emt = emt; -} - -static inline void epte_set_emt(epte_t *entry, uint emt) -{ - entry->emt = emt; -} - -static inline uint ept_get_pde_idx(hax_paddr_t gpa) -{ - return ((gpa >> 21) & 0x1ff); -} - -static inline uint ept_get_pte_idx(hax_paddr_t gpa) -{ - return ((gpa >> 12) & 0x1ff); -} - -/* FIXME: Only support 4-level EPT page table. */ -#define EPT_DEFAULT_GAW 3 - -/* Support up to 14G memory for the guest */ -#define EPT_PRE_ALLOC_PAGES 16 - -/* Two pages used to build up to 2-level table */ -#define EPT_MAX_MEM_G MAX_GMEM_G - -#define EPT_PRE_ALLOC_PG_ORDER 4 -/* 2 ^ EPT_PRE_ALLOC_PG_ORDER = EPT_PRE_ALLOC_PAGES */ - -typedef struct eptp { - union { - uint64_t val; - struct { - uint64_t emt : 3; - uint64_t gaw : 3; - uint64_t rsvd1 : 6; - uint64_t asr : 48; - uint64_t rsvd2 : 4; - }; - }; -} eptp_t; - -#define INVALID_EPTP ~(uint64_t)0 - -struct hax_ept { - bool is_enabled; - struct hax_link_list ept_page_list; - struct hax_page *ept_root_page; - struct eptp eptp; -}; - -static void construct_eptp(eptp_t *entry, hax_paddr_t hpa, uint emt) -{ - entry->val = 0; - entry->emt = emt; - entry->asr = hpa >> 12; - entry->gaw = EPT_DEFAULT_GAW; -}; #define ept_cap_rwX ((uint64_t)1 << 0) #define ept_cap_rWx ((uint64_t)1 << 1) @@ -192,22 +65,15 @@ static void construct_eptp(eptp_t *entry, hax_paddr_t hpa, uint emt) #define ept_cap_invvpid_ac ((uint64_t)1 << 42) #define ept_cap_invvpid_cwpg ((uint64_t)1 << 43) +#define INVALID_EPTP ((uint64_t)~0ULL) + #define EPT_UNSUPPORTED_FEATURES \ (ept_cap_sp2M | ept_cap_sp1G | ept_cap_sp512G | ept_cap_sp256T) #define EPT_INVEPT_SINGLE_CONTEXT 1 #define EPT_INVEPT_ALL_CONTEXT 2 -bool ept_init(hax_vm_t *hax_vm); -void ept_free(hax_vm_t *hax_vm); - -uint64_t vcpu_get_eptp(struct vcpu_t *vcpu); -bool ept_set_pte(hax_vm_t *hax_vm, hax_paddr_t gpa, hax_paddr_t hpa, uint emt, - uint mem_type, bool *is_modified); void invept(hax_vm_t *hax_vm, uint type); bool ept_set_caps(uint64_t caps); -/* Deprecated API due to low performance */ -bool ept_translate(struct vcpu_t *vcpu, hax_paddr_t gpa, uint order, hax_paddr_t *hpa); - #endif // HAX_CORE_EPT_H_ diff --git a/core/include/hax_core_interface.h b/core/include/hax_core_interface.h index d974ce2f..0ad425d7 100644 --- a/core/include/hax_core_interface.h +++ b/core/include/hax_core_interface.h @@ -54,12 +54,9 @@ struct hax_tunnel * get_vcpu_tunnel(struct vcpu_t *vcpu); int hax_vcpu_destroy_hax_tunnel(struct vcpu_t *cv); int hax_vcpu_setup_hax_tunnel(struct vcpu_t *cv, struct hax_tunnel_info *info); int hax_vm_set_ram(struct vm_t *vm, struct hax_set_ram_info *info); -#ifdef CONFIG_HAX_EPT2 int hax_vm_set_ram2(struct vm_t *vm, struct hax_set_ram_info2 *info); int hax_vm_protect_ram(struct vm_t *vm, struct hax_protect_ram_info *info); -#endif int hax_vm_free_all_ram(struct vm_t *vm); -int in_pmem_range(struct hax_vcpu_mem *pmem, uint64_t va); int hax_vm_add_ramblock(struct vm_t *vm, uint64_t start_uva, uint64_t size); void * get_vm_host(struct vm_t *vm); diff --git a/core/include/vcpu.h b/core/include/vcpu.h index dc103dcd..33bb326d 100644 --- a/core/include/vcpu.h +++ b/core/include/vcpu.h @@ -132,7 +132,6 @@ struct vcpu_post_mmio { uint64_t value; }; -#ifdef CONFIG_HAX_EPT2 struct mmio_fetch_cache { uint64_t last_gva; uint64_t last_guest_cr3; @@ -140,7 +139,6 @@ struct mmio_fetch_cache { hax_kmap_user kmap; int hit_count; }; -#endif // CONFIG_HAX_EPT2 #define IOS_MAX_BUFFER 64 @@ -234,9 +232,7 @@ struct vcpu_t { struct em_context_t emulate_ctxt; struct vcpu_post_mmio post_mmio; -#ifdef CONFIG_HAX_EPT2 struct mmio_fetch_cache mmio_fetch; -#endif // CONFIG_HAX_EPT2 }; #define vmx(v, field) v->vmx.field diff --git a/core/include/vm.h b/core/include/vm.h index 08e33849..36a831c8 100644 --- a/core/include/vm.h +++ b/core/include/vm.h @@ -36,10 +36,8 @@ #include "vcpu.h" #include "../../include/hax.h" -#ifdef CONFIG_HAX_EPT2 #include "memory.h" #include "ept2.h" -#endif // CONFIG_HAX_EPT2 #define KERNEL_BASE 0xC0000000 @@ -71,13 +69,10 @@ struct vm_t { hax_list_head vcpu_list; uint16_t bsp_vcpu_id; void *vm_host; - struct hax_ept *ept; void *p2m_map[MAX_GMEM_G]; -#ifdef CONFIG_HAX_EPT2 hax_gpa_space gpa_space; hax_ept_tree ept_tree; hax_gpa_space_listener gpa_space_listener; -#endif // CONFIG_HAX_EPT2 #ifdef HAX_ARCH_X86_32 uint64_t hva_limit; uint64_t hva_index; @@ -125,19 +120,6 @@ enum run_flag { uint64_t hax_gpfn_to_hpa(struct vm_t *vm, uint64_t gpfn); -#ifndef CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 -void * hax_map_gpfn(struct vm_t *vm, uint64_t gpfn, bool flag, hax_paddr_t cr3_cur, - uint8_t level); -void hax_unmap_gpfn(struct vm_t *vm, void *va, uint64_t gpfn); -#else -void * hax_map_gpfn(struct vm_t *vm, uint64_t gpfn); -void hax_unmap_gpfn(void *va); -#endif -#endif // !CONFIG_HAX_EPT2 - -int hax_core_set_p2m(struct vm_t *vm, uint64_t gpfn, uint64_t hpfn, uint64_t hva, - uint8_t flags); struct vm_t *hax_create_vm(int *vm_id); int hax_teardown_vm(struct vm_t *vm); diff --git a/core/include/vtlb.h b/core/include/vtlb.h index 630e773b..199ba813 100644 --- a/core/include/vtlb.h +++ b/core/include/vtlb.h @@ -111,7 +111,6 @@ uint32_t vcpu_write_guest_virtual(struct vcpu_t *vcpu, hax_vaddr_t addr, uint32_t dst_buflen, const void *src, uint32_t size, uint flag); -#ifdef CONFIG_HAX_EPT2 /* * Reads the given number of bytes from guest RAM (using a GVA) into the given * buffer. This function is supposed to be called by the MMIO handler to obtain @@ -128,7 +127,6 @@ uint32_t vcpu_write_guest_virtual(struct vcpu_t *vcpu, hax_vaddr_t addr, */ int mmio_fetch_instruction(struct vcpu_t *vcpu, uint64_t gva, uint8_t *buf, int len); -#endif // CONFIG_HAX_EPT2 void hax_inject_page_fault(struct vcpu_t *vcpu, mword error_code); diff --git a/core/memory.c b/core/memory.c index 18cf8cae..9d39615f 100644 --- a/core/memory.c +++ b/core/memory.c @@ -33,11 +33,8 @@ #include "include/vm.h" #include "include/hax_driver.h" #include "include/ept.h" -#ifdef CONFIG_HAX_EPT2 #include "include/paging.h" -#endif // CONFIG_HAX_EPT2 -#ifdef CONFIG_HAX_EPT2 static int handle_alloc_ram(struct vm_t *vm, uint64_t start_uva, uint64_t size) { int ret; @@ -62,120 +59,10 @@ static int handle_alloc_ram(struct vm_t *vm, uint64_t start_uva, uint64_t size) } return 0; } -#endif // CONFIG_HAX_EPT2 int hax_vm_add_ramblock(struct vm_t *vm, uint64_t start_uva, uint64_t size) { -#ifdef CONFIG_HAX_EPT2 return handle_alloc_ram(vm, start_uva, size); -#else // !CONFIG_HAX_EPT2 - uint64_t gva = start_uva; - uint32_t leftsize; - struct hax_vcpu_mem *mem = NULL, *curmem, *smem; - int entry_num = 0, i, ret; - uint32_t cursize; - - /* A valid size is needed */ - if (0 == size) { - hax_log(HAX_LOGE, "hax_vm_alloc_ram: the size is 0, invalid!\n"); - return -EINVAL; - } - - if (!gva || gva & 0xfff) { - hax_log(HAX_LOGE, "Invalid gva %llx for allocating memory.\n", gva); - return -EINVAL; - } - - hax_log(HAX_LOGI, "hax_vm_alloc_ram: size 0x%x\n", size); - if (!hax_test_bit(VM_STATE_FLAGS_MEM_ALLOC, &vm->flags)) { - hax_log(HAX_LOGI, "!VM_STATE_FLAGS_MEM_ALLOC\n"); - hax_mutex_lock(hax->hax_lock); - if (hax->mem_limit && (size > hax->mem_quota)) { - hax_log(HAX_LOGE, "HAX is out of memory quota.\n"); - hax_mutex_unlock(hax->hax_lock); - return -EINVAL; - } - hax_mutex_unlock(hax->hax_lock); - hax_log(HAX_LOGI, "Memory allocation, va:%llx, size:%x\n", *va, size); - } else { - hax_log(HAX_LOGI, "spare alloc: mem_limit 0x%llx, size 0x%x, " - "spare_ram 0x%llx\n", hax->mem_limit, size, vm->spare_ramsize); - if (hax->mem_limit && (size > vm->spare_ramsize)) { - hax_log(HAX_LOGE, "HAX is out of memory quota, because application" - " requests another %x bytes\n", size); - return -EINVAL; - } - } - - entry_num = (size - 1) / HAX_RAM_ENTRY_SIZE + 1; - - mem = (struct hax_vcpu_mem *)hax_vmalloc( - sizeof(struct hax_vcpu_mem) * (entry_num + vm->ram_entry_num), 0); - if (!mem) - return -ENOMEM; - memset(mem, 0, - sizeof(struct hax_vcpu_mem) * (entry_num + vm->ram_entry_num)); - memcpy_s(mem, sizeof(struct hax_vcpu_mem) * (entry_num + vm->ram_entry_num), - vm->ram_entry, sizeof(struct hax_vcpu_mem) * vm->ram_entry_num); - - smem = curmem = mem + vm->ram_entry_num; - - leftsize = size; - while (leftsize > 0) { - cursize = leftsize > HAX_RAM_ENTRY_SIZE ? HAX_RAM_ENTRY_SIZE : leftsize; - hax_log(HAX_LOGD, "Memory allocation, gva:%llx, cur_size:%x\n", - gva, cursize); - - ret = hax_setup_vcpumem(curmem, gva, cursize, HAX_VCPUMEM_VALIDVA); - if (ret < 0) - goto fail; - - hax_log(HAX_LOGD, "Alloc ram %x kva is %p uva %llx\n", - cursize, curmem->kva, curmem->uva); - - leftsize -= cursize; - curmem++; - gva += cursize; - } - - if (vm->ram_entry) { - hax_vfree(vm->ram_entry, - sizeof(struct hax_vcpu_mem) * vm->ram_entry_num); - } - - vm->ram_entry = mem; - vm->ram_entry_num += entry_num; - if (!hax_test_bit(VM_STATE_FLAGS_MEM_ALLOC, &vm->flags)) { - hax_mutex_lock(hax->hax_lock); - if (hax->mem_limit) { - hax->mem_quota -= size; - } - hax_mutex_unlock(hax->hax_lock); - hax_test_and_set_bit(VM_STATE_FLAGS_MEM_ALLOC, &vm->flags); - vm->spare_ramsize = VM_SPARE_RAMSIZE; - hax_log(HAX_LOGI, "!VM_STATE_FLAGS_MEM_ALLOC: spare_ram 0x%llx\n", - vm->spare_ramsize); - } else { - if (hax->mem_limit) { - vm->spare_ramsize -= size; - hax_log(HAX_LOGI, "VM_STATE_FLAGS_MEM_ALLOC: spare_ram 0x%llx\n", - vm->spare_ramsize); - } - } - hax_log(HAX_LOGD, "Memory allocationg done!\n"); - return 0; - -fail: - curmem = smem; - for (i = 0; i < entry_num; i++) { - hax_clear_vcpumem(curmem); - curmem++; - } - - hax_vfree(mem, - sizeof(struct hax_vcpu_mem) * (entry_num + vm->ram_entry_num)); - return -EINVAL; -#endif // CONFIG_HAX_EPT2 } int hax_vm_free_all_ram(struct vm_t *vm) @@ -209,30 +96,6 @@ int hax_vm_free_all_ram(struct vm_t *vm) return 0; } -int in_pmem_range(struct hax_vcpu_mem *pmem, uint64_t va) -{ - return (va >= pmem->uva) && (va < pmem->uva + pmem->size); -} - -static struct hax_vcpu_mem *get_pmem_range(struct vm_t *vm, uint64_t va) -{ - int i; - struct hax_vcpu_mem *mem; - - mem = vm->ram_entry; - for (i = 0; i < vm->ram_entry_num; i++) { - if (!mem->hinfo) - continue; - if (!in_pmem_range(mem, va)) { - mem++; - continue; - } - return mem; - } - return NULL; -} - -#ifdef CONFIG_HAX_EPT2 static int handle_set_ram(struct vm_t *vm, uint64_t start_gpa, uint64_t size, uint64_t start_uva, uint32_t flags) { @@ -289,98 +152,13 @@ static int handle_set_ram(struct vm_t *vm, uint64_t start_gpa, uint64_t size, } return 0; } -#endif // CONFIG_HAX_EPT2 int hax_vm_set_ram(struct vm_t *vm, struct hax_set_ram_info *info) { -#ifdef CONFIG_HAX_EPT2 return handle_set_ram(vm, info->pa_start, info->size, info->va, info->flags); -#else // !CONFIG_HAX_EPT2 - int num = info->size >> HAX_PAGE_SHIFT; - uint64_t gpfn = info->pa_start >> HAX_PAGE_SHIFT; - uint64_t cur_va = info->va; - bool is_unmap = info->flags & HAX_RAM_INFO_INVALID; - bool is_readonly = info->flags & HAX_RAM_INFO_ROM; - uint emt = is_unmap ? EMT_NONE : (is_readonly ? EMT_UC : EMT_WB); - uint perm = is_unmap ? EPT_TYPE_NONE - : (is_readonly ? EPT_TYPE_ROM : EPT_TYPE_MEM); - bool ept_modified = false; - - // HAX_RAM_INFO_INVALID indicates that guest physical address range - // [pa_start, pa_start + size) should be unmapped - if (is_unmap && (info->flags != HAX_RAM_INFO_INVALID || info->va)) { - hax_log(HAX_LOGE, "HAX_VM_IOCTL_SET_RAM called with invalid " - "parameter(s): flags=0x%x, va=0x%llx\n", info->flags, info->va); - return -EINVAL; - } - - while (num > 0) { - uint64_t hpfn; - uint64_t hva; - bool epte_modified; - - if (is_unmap) { - hpfn = 0; - hva = 0; - } else { - struct hax_vcpu_mem *pmem = get_pmem_range(vm, cur_va); - if (!pmem) { - hax_log(HAX_LOGE, "Can't find pmem for va %llx", cur_va); - return -ENOMEM; - } - hpfn = get_hpfn_from_pmem(pmem, cur_va); - - if (hpfn <= 0) { - hax_log(HAX_LOGE, "Can't get host address for va %llx", cur_va); - /* - * Shall we revert the already setup one? Assume not since the - * QEMU should exit on such situation, although it does not. - */ - return -ENOMEM; - } -#if defined(HAX_PLATFORM_DARWIN) -#ifdef HAX_ARCH_X86_64 - hva = (uint64_t)pmem->kva + (cur_va - pmem->uva); -#else - hva = (uint64_t)(uint32_t)pmem->kva + (cur_va - pmem->uva); -#endif -#else // !HAX_PLATFORM_DARWIN -#ifdef HAX_ARCH_X86_64 - hva = (uint64_t)pmem->kva + (cur_va - pmem->uva); -#else - hva = 0; -#endif -#endif - cur_va += HAX_PAGE_SIZE; - } - - if (!hax_core_set_p2m(vm, gpfn, hpfn, hva, info->flags)) { - return -ENOMEM; - } - if (!ept_set_pte(vm, gpfn << HAX_PAGE_SHIFT, hpfn << HAX_PAGE_SHIFT, emt, perm, - &epte_modified)) { - hax_log(HAX_LOGE, "ept_set_pte() failed at gpfn 0x%llx " - "hpfn 0x%llx\n", gpfn, hpfn); - return -ENOMEM; - } - ept_modified = ept_modified || epte_modified; - - gpfn++; - num--; - } - if (ept_modified) { - /* Invalidate EPT cache (see IASDM Vol. 3C 28.3.3.4) */ - hax_log(HAX_LOGI, "Calling INVEPT after EPT update (pa_start=0x%llx, " - "size=0x%x, flags=0x%x)\n", info->pa_start, info->size, - info->flags); - invept(vm, EPT_INVEPT_SINGLE_CONTEXT); - } - return 0; -#endif // CONFIG_HAX_EPT2 } -#ifdef CONFIG_HAX_EPT2 int hax_vm_set_ram2(struct vm_t *vm, struct hax_set_ram_info2 *info) { return handle_set_ram(vm, info->pa_start, info->size, info->va, @@ -392,7 +170,6 @@ int hax_vm_protect_ram(struct vm_t *vm, struct hax_protect_ram_info *info) return gpa_space_protect_range(&vm->gpa_space, info->pa_start, info->size, info->flags); } -#endif // CONFIG_HAX_EPT2 int hax_vcpu_setup_hax_tunnel(struct vcpu_t *cv, struct hax_tunnel_info *info) { diff --git a/core/page_walker.c b/core/page_walker.c index 9b0f1c5b..5c1c366e 100644 --- a/core/page_walker.c +++ b/core/page_walker.c @@ -582,17 +582,10 @@ uint32_t pw_perform_page_walk( PW_PAGE_ENTRY *pml4te_ptr, *pdpte_ptr, *pde_ptr, *pte_ptr = NULL; PW_PAGE_ENTRY pml4te_val, pdpte_val, pde_val, pte_val; void *pml4t_hva, *pdpt_hva, *pd_hva, *pt_hva; -#ifdef CONFIG_HAX_EPT2 hax_kmap_user pml4t_kmap, pdpt_kmap, pd_kmap, pt_kmap; -#endif // CONFIG_HAX_EPT2 uint64_t pml4t_gpa, pdpt_gpa, pd_gpa, pt_gpa; uint32_t pml4te_index, pdpte_index, pde_index, pte_index; bool is_write, is_user; -#ifndef CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - bool is_kernel; -#endif -#endif // !CONFIG_HAX_EPT2 pml4te_ptr = pdpte_ptr = pde_ptr = NULL; pml4t_hva = pdpt_hva = pd_hva = pt_hva = NULL; @@ -604,11 +597,6 @@ uint32_t pw_perform_page_walk( is_write = access & TF_WRITE; is_user = access & TF_USER; -#ifndef CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - is_kernel = (virt_addr >= KERNEL_BASE) ? true : false; -#endif -#endif // !CONFIG_HAX_EPT2 pw_retrieve_indices(virt_addr, is_pae, is_lme, &pml4te_index, &pdpte_index, &pde_index, &pte_index); @@ -620,18 +608,10 @@ uint32_t pw_perform_page_walk( if (is_lme) { pml4t_gpa = first_table; -#ifdef CONFIG_HAX_EPT2 pml4t_hva = gpa_space_map_page(&vcpu->vm->gpa_space, pml4t_gpa >> PG_ORDER_4K, &pml4t_kmap, NULL); -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - pml4t_hva = hax_map_gpfn(vcpu->vm, pml4t_gpa >> 12, is_kernel, cr3, - 1); -#else - pml4t_hva = hax_map_gpfn(vcpu->vm, pml4t_gpa >> 12); -#endif -#endif // CONFIG_HAX_EPT2 + if (pml4t_hva == NULL) { retval = TF_FAILED; goto out; @@ -656,17 +636,10 @@ uint32_t pw_perform_page_walk( pdpt_gpa = first_table; } -#ifdef CONFIG_HAX_EPT2 pdpt_page_hva = gpa_space_map_page(&vcpu->vm->gpa_space, pdpt_gpa >> PG_ORDER_4K, &pdpt_kmap, NULL); -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - pdpt_page_hva = hax_map_gpfn(vcpu->vm, pdpt_gpa >> 12, is_kernel, cr3, 1); -#else - pdpt_page_hva = hax_map_gpfn(vcpu->vm, pdpt_gpa >> 12); -#endif -#endif // CONFIG_HAX_EPT2 + if (pdpt_page_hva == NULL) { retval = TF_FAILED; goto out; @@ -735,16 +708,9 @@ uint32_t pw_perform_page_walk( } pd_gpa = is_pae ? pw_retrieve_phys_addr(&pdpte_val, is_pae) : first_table; -#ifdef CONFIG_HAX_EPT2 pd_hva = gpa_space_map_page(&vcpu->vm->gpa_space, pd_gpa >> PG_ORDER_4K, &pd_kmap, NULL); -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - pd_hva = hax_map_gpfn(vcpu->vm, pd_gpa >> 12, is_kernel, cr3, 2); -#else - pd_hva = hax_map_gpfn(vcpu->vm, pd_gpa >> 12); -#endif -#endif // CONFIG_HAX_EPT2 + if (pd_hva == NULL) { retval = TF_FAILED; goto out; @@ -813,16 +779,9 @@ uint32_t pw_perform_page_walk( // 4KB page size *order = PG_ORDER_4K; pt_gpa = pw_retrieve_phys_addr(&pde_val, is_pae); -#ifdef CONFIG_HAX_EPT2 pt_hva = gpa_space_map_page(&vcpu->vm->gpa_space, pt_gpa >> 12, &pt_kmap, NULL); -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - pt_hva = hax_map_gpfn(vcpu->vm, pt_gpa >> 12, is_kernel, cr3, 1); -#else - pt_hva = hax_map_gpfn(vcpu->vm, pt_gpa >> 12); -#endif -#endif // CONFIG_HAX_EPT2 + if (pt_hva == NULL) { retval = TF_FAILED; goto out; @@ -876,37 +835,18 @@ uint32_t pw_perform_page_walk( // page walk succeeded out: -#ifdef CONFIG_HAX_EPT2 - if (pml4t_hva != NULL) + if (pml4t_hva != NULL) { gpa_space_unmap_page(&vcpu->vm->gpa_space, &pml4t_kmap); - if (pdpt_hva != NULL) + } + if (pdpt_hva != NULL) { gpa_space_unmap_page(&vcpu->vm->gpa_space, &pdpt_kmap); - if (pd_hva != NULL) + } + if (pd_hva != NULL) { gpa_space_unmap_page(&vcpu->vm->gpa_space, &pd_kmap); - if (pt_hva != NULL) + } + if (pt_hva != NULL) { gpa_space_unmap_page(&vcpu->vm->gpa_space, &pt_kmap); -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - if (pml4t_hva != NULL) - hax_unmap_gpfn(vcpu->vm, pml4t_hva, pml4t_gpa >> 12); - if (pdpt_hva != NULL) - hax_unmap_gpfn(vcpu->vm, pdpt_hva, pdpt_gpa >> 12); - if (pd_hva != NULL) - hax_unmap_gpfn(vcpu->vm, pd_hva, pd_gpa >> 12); - if (pt_hva != NULL) - hax_unmap_gpfn(vcpu->vm, pt_hva, pt_gpa >> 12); -#else - if (pml4t_hva != NULL) - hax_unmap_gpfn(pml4t_hva); - if (pdpt_hva != NULL) - hax_unmap_gpfn(pdpt_hva); - if (pd_hva != NULL) - hax_unmap_gpfn(pd_hva); - if (pt_hva != NULL) - hax_unmap_gpfn(pt_hva); -#endif -#endif // CONFIG_HAX_EPT2 - + } if (gpa_out != NULL) { *gpa_out = gpa; } diff --git a/core/vcpu.c b/core/vcpu.c index 4adc99b3..01366566 100644 --- a/core/vcpu.c +++ b/core/vcpu.c @@ -528,11 +528,9 @@ static int _vcpu_teardown(struct vcpu_t *vcpu) { int vcpu_id = vcpu->vcpu_id; -#ifdef CONFIG_HAX_EPT2 if (vcpu->mmio_fetch.kva) { gpa_space_unmap_page(&vcpu->vm->gpa_space, &vcpu->mmio_fetch.kmap); } -#endif // CONFIG_HAX_EPT2 // TODO: we should call invvpid after calling vcpu_vpid_free(). vcpu_vpid_free(vcpu); @@ -1898,7 +1896,6 @@ static int vcpu_prepare_pae_pdpt(struct vcpu_t *vcpu) { uint64_t cr3 = vcpu->state->_cr3; int pdpt_size = (int)sizeof(vcpu->pae_pdptes); -#ifdef CONFIG_HAX_EPT2 // CR3 is the GPA of the page-directory-pointer table. According to IASDM // Vol. 3A 4.4.1, Table 4-7, bits 63..32 and 4..0 of this GPA are ignored. uint64_t gpa = cr3 & 0xffffffe0; @@ -1919,29 +1916,6 @@ static int vcpu_prepare_pae_pdpt(struct vcpu_t *vcpu) } vcpu->pae_pdpt_dirty = 1; return 0; -#else // !CONFIG_HAX_EPT2 - uint64_t gpfn = (cr3 & 0xfffff000) >> PG_ORDER_4K; - uint8_t *buf, *pdpt; -#ifdef HAX_ARCH_X86_64 - buf = hax_map_gpfn(vcpu->vm, gpfn); -#else // !HAX_ARCH_X86_64, i.e. HAX_ARCH_X86_32 - buf = hax_map_gpfn(vcpu->vm, gpfn, false, cr3 & 0xfffff000, 1); -#endif // HAX_ARCH_X86_64 - if (!buf) { - hax_log(HAX_LOGE, "%s: Failed to map guest page frame containing PAE " - "PDPT: cr3=0x%llx\n", __func__, cr3); - return -ENOMEM; - } - pdpt = buf + (cr3 & 0xfe0); - memcpy_s(vcpu->pae_pdptes, pdpt_size, pdpt, pdpt_size); -#ifdef HAX_ARCH_X86_64 - hax_unmap_gpfn(buf); -#else // !HAX_ARCH_X86_64, i.e. HAX_ARCH_X86_32 - hax_unmap_gpfn(vcpu->vm, buf, gpfn); -#endif // HAX_ARCH_X86_64 - vcpu->pae_pdpt_dirty = 1; - return 0; -#endif // CONFIG_HAX_EPT2 } static void vmwrite_cr(struct vcpu_t *vcpu) @@ -2009,10 +1983,6 @@ static void vmwrite_cr(struct vcpu_t *vcpu) vmwrite(vcpu, GUEST_CR3, vtlb_get_cr3(vcpu)); state->_efer = 0; } else { // EPTE -#ifndef CONFIG_HAX_EPT2 - struct hax_ept *ept = vcpu->vm->ept; - ept->is_enabled = 1; -#endif // !CONFIG_HAX_EPT2 vcpu->mmu->mmu_mode = MMU_MODE_EPT; // In EPT mode, we need to monitor guest writes to CR.PAE, so that we // know when it wants to enter PAE paging mode (see IASDM Vol. 3A 4.1.2, @@ -2148,15 +2118,11 @@ static bool is_mmio_address(struct vcpu_t *vcpu, hax_paddr_t gpa) hpa = hax_gpfn_to_hpa(vcpu->vm, gpa >> HAX_PAGE_SHIFT); // hax_gpfn_to_hpa() assumes hpa == 0 is invalid return !hpa; - } else { -#ifdef CONFIG_HAX_EPT2 - hax_memslot *slot = memslot_find(&vcpu->vm->gpa_space, - gpa >> PG_ORDER_4K); - return !slot; -#else // !CONFIG_HAX_EPT2 - return !ept_translate(vcpu, gpa, PG_ORDER_4K, &hpa); -#endif // CONFIG_HAX_EPT2 } + + hax_memslot *slot = memslot_find(&vcpu->vm->gpa_space, gpa >> PG_ORDER_4K); + + return !slot; } static int vcpu_emulate_insn(struct vcpu_t *vcpu) @@ -2187,7 +2153,6 @@ static int vcpu_emulate_insn(struct vcpu_t *vcpu) // Fetch the instruction at guest CS:IP = CS.Base + IP, omitting segment // limit and privilege checks va = (mode == EM_MODE_PROT64) ? rip : cs_base + rip; -#ifdef CONFIG_HAX_EPT2 if (mmio_fetch_instruction(vcpu, va, instr, INSTR_MAX_LEN)) { vcpu_set_panic(vcpu); hax_log(HAX_LOGPANIC, "%s: mmio_fetch_instruction() failed: vcpu_id=%u," @@ -2196,16 +2161,6 @@ static int vcpu_emulate_insn(struct vcpu_t *vcpu) dump_vmcs(vcpu); return -1; } -#else // !CONFIG_HAX_EPT2 - if (!vcpu_read_guest_virtual(vcpu, va, &instr, INSTR_MAX_LEN, INSTR_MAX_LEN, - 0)) { - vcpu_set_panic(vcpu); - hax_log(HAX_LOGPANIC, "Error reading instruction at 0x%llx for decoding" - " (CS:IP=0x%llx:0x%llx)\n", va, cs_base, rip); - dump_vmcs(vcpu); - return -1; - } -#endif // CONFIG_HAX_EPT2 em_ctxt->rip = rip; rc = em_decode_insn(em_ctxt, instr); @@ -3831,12 +3786,9 @@ static int exit_ept_misconfiguration(struct vcpu_t *vcpu, struct hax_tunnel *htun) { hax_paddr_t gpa; -#ifdef CONFIG_HAX_EPT2 int ret; -#endif // CONFIG_HAX_EPT2 htun->_exit_reason = vmx(vcpu, exit_reason).basic_reason; -#ifdef CONFIG_HAX_EPT2 gpa = vmx(vcpu, exit_gpa); ret = ept_handle_misconfiguration(&vcpu->vm->gpa_space, &vcpu->vm->ept_tree, gpa); @@ -3844,7 +3796,6 @@ static int exit_ept_misconfiguration(struct vcpu_t *vcpu, // The misconfigured entries have been fixed return HAX_RESUME; } -#endif // CONFIG_HAX_EPT2 vcpu_set_panic(vcpu); hax_log(HAX_LOGPANIC, "%s: Unexpected EPT misconfiguration: gpa=0x%llx\n", @@ -3858,9 +3809,7 @@ static int exit_ept_violation(struct vcpu_t *vcpu, struct hax_tunnel *htun) exit_qualification_t *qual = &vmx(vcpu, exit_qualification); hax_paddr_t gpa; int ret = 0; -#ifdef CONFIG_HAX_EPT2 uint64_t fault_gfn; -#endif htun->_exit_reason = vmx(vcpu, exit_reason).basic_reason; @@ -3873,7 +3822,6 @@ static int exit_ept_violation(struct vcpu_t *vcpu, struct hax_tunnel *htun) gpa = vmx(vcpu, exit_gpa); -#ifdef CONFIG_HAX_EPT2 ret = ept_handle_access_violation(&vcpu->vm->gpa_space, &vcpu->vm->ept_tree, *qual, gpa, &fault_gfn); if (ret == -EFAULT) { @@ -3912,7 +3860,7 @@ static int exit_ept_violation(struct vcpu_t *vcpu, struct hax_tunnel *htun) return HAX_RESUME; } // ret == 0: The EPT violation is due to MMIO -#endif + return vcpu_emulate_insn(vcpu); } diff --git a/core/vm.c b/core/vm.c index a5c02091..979f4b84 100644 --- a/core/vm.c +++ b/core/vm.c @@ -80,14 +80,7 @@ int hax_vm_set_qemuversion(struct vm_t *vm, struct hax_qemu_version *ver) uint64_t vm_get_eptp(struct vm_t *vm) { - uint64_t eptp_value; - -#ifdef CONFIG_HAX_EPT2 - eptp_value = vm->ept_tree.eptp.value; -#else // !CONFIG_HAX_EPT2 - eptp_value = vm->ept->eptp.val; -#endif // CONFIG_HAX_EPT2 - return eptp_value; + return vm->ept_tree.eptp.value; } /* Ioctl will call this function to create a vm */ @@ -95,9 +88,7 @@ struct vm_t * hax_create_vm(int *vm_id) { struct vm_t *hvm; int id; -#ifdef CONFIG_HAX_EPT2 int ret; -#endif // CONFIG_HAX_EPT2 if ((!hax->vmx_enable_flag) || (!hax->nx_enable_flag)) { hax_log(HAX_LOGE, "VT or NX is not enabled, can not setup VM!\n"); @@ -134,22 +125,21 @@ struct vm_t * hax_create_vm(int *vm_id) hvm->hva_list_1 = hax_vmalloc(((HVA_MAP_ARRAY_SIZE / 4096) * sizeof(struct hva_entry)), HAX_MEM_NONPAGE); if (!hvm->hva_list_1) - goto fail00; + goto fail0; memset((void *)(hvm->hva_list_1), 0, ((HVA_MAP_ARRAY_SIZE / 4096) * sizeof(struct hva_entry))); #endif -#ifdef CONFIG_HAX_EPT2 ret = gpa_space_init(&hvm->gpa_space); if (ret) { hax_log(HAX_LOGE, "%s: gpa_space_init() returned %d\n", __func__, ret); - goto fail0; + goto fail1; } ret = ept_tree_init(&hvm->ept_tree); if (ret) { hax_log(HAX_LOGE, "%s: ept_tree_init() returned %d\n", __func__, ret); - goto fail0; + goto fail1; } hvm->gpa_space_listener.mapping_added = NULL; @@ -160,10 +150,6 @@ struct vm_t * hax_create_vm(int *vm_id) hax_log(HAX_LOGI, "%s: Invoking INVEPT for VM %d\n", __func__, hvm->vm_id); invept(hvm, EPT_INVEPT_SINGLE_CONTEXT); -#else // !CONFIG_HAX_EPT2 - if (!ept_init(hvm)) - goto fail0; -#endif // CONFIG_HAX_EPT2 hvm->vm_lock = hax_mutex_alloc_init(); if (!hvm->vm_lock) @@ -181,13 +167,11 @@ struct vm_t * hax_create_vm(int *vm_id) fail2: hax_mutex_free(hvm->vm_lock); fail1: - ept_free(hvm); -fail0: #ifdef HAX_ARCH_X86_32 hax_vfree(hvm->hva_list_1, ((HVA_MAP_ARRAY_SIZE / 4096) * sizeof(struct hva_entry))); -fail00: +fail0: hax_vfree(hvm->hva_list, ((HVA_MAP_ARRAY_SIZE / 4096) * sizeof(struct hva_entry))); fail: @@ -238,17 +222,15 @@ int hax_teardown_vm(struct vm_t *vm) ((HVA_MAP_ARRAY_SIZE / 4096) * sizeof(struct hva_entry))); } #endif -#ifndef CONFIG_HAX_EPT2 - ept_free(vm); -#endif // !CONFIG_HAX_EPT2 + hax_vm_free_p2m_map(vm); hax_mutex_free(vm->vm_lock); hax_put_vm_mid(vm->vm_id); -#ifdef CONFIG_HAX_EPT2 + gpa_space_remove_listener(&vm->gpa_space, &vm->gpa_space_listener); ept_tree_free(&vm->ept_tree); gpa_space_free(&vm->gpa_space); -#endif // CONFIG_HAX_EPT2 + hax_vfree(vm, sizeof(struct vm_t)); hax_log(HAX_LOGE, "...........hax_teardown_vm\n"); return 0; @@ -351,30 +333,6 @@ int set_vm_host(struct vm_t *vm, void *vm_host) return 0; } -static int set_p2m_mapping(struct vm_t *vm, uint64_t gpfn, uint64_t hva, uint64_t hpa) -{ - uint32_t which_g = gpfn_to_g(gpfn); - uint32_t index = gpfn_in_g(gpfn); - struct hax_p2m_entry *p2m_base; - - if (which_g >= MAX_GMEM_G) - return -E2BIG; - - p2m_base = vm->p2m_map[which_g]; - - if (!p2m_base) { - p2m_base = hax_vmalloc(GPFN_MAP_ARRAY_SIZE, 0); - if (!p2m_base) - return -ENOMEM; - memset((void *)p2m_base, 0, GPFN_MAP_ARRAY_SIZE); - vm->p2m_map[which_g] = p2m_base; - } - p2m_base[index].hva = hva; - p2m_base[index].hpa = hpa; - - return 0; -} - static struct hax_p2m_entry * hax_get_p2m_entry(struct vm_t *vm, uint64_t gpfn) { uint32_t which_g = gpfn_to_g(gpfn); @@ -387,22 +345,8 @@ static struct hax_p2m_entry * hax_get_p2m_entry(struct vm_t *vm, uint64_t gpfn) return &p2m_base[index]; } -/* FIXME: This call doesn't work for 32-bit Windows. */ -static void * hax_gpfn_to_hva(struct vm_t *vm, uint64_t gpfn) -{ - mword hva; - struct hax_p2m_entry *entry; - - entry = hax_get_p2m_entry(vm, gpfn); - if (!entry || !entry->hva) - return NULL; - hva = (mword)entry->hva; - return (void *)hva; -} - uint64_t hax_gpfn_to_hpa(struct vm_t *vm, uint64_t gpfn) { -#ifdef CONFIG_HAX_EPT2 uint64_t pfn; pfn = gpa_space_get_pfn(&vm->gpa_space, gpfn, NULL); @@ -410,16 +354,6 @@ uint64_t hax_gpfn_to_hpa(struct vm_t *vm, uint64_t gpfn) return 0; } return pfn << PG_ORDER_4K; -#else // !CONFIG_HAX_EPT2 - uint64_t hpa; - struct hax_p2m_entry *entry; - - entry = hax_get_p2m_entry(vm, gpfn); - if (!entry || !entry->hpa) - return 0; - hpa = entry->hpa; - return hpa; -#endif // CONFIG_HAX_EPT2 } #ifdef HAX_ARCH_X86_32 @@ -535,113 +469,3 @@ static int gpfn_to_hva_recycle(struct vm_t *vm, uint64_t cr3_cur, int flag) return count; } #endif - -#ifndef CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_64 -void * hax_map_gpfn(struct vm_t *vm, uint64_t gpfn) -{ -#ifdef HAX_ARCH_X86_64 - return hax_gpfn_to_hva(vm, gpfn); -#else - uint64_t hpa; - hpa = hax_gpfn_to_hpa(vm, gpfn); - return hax_vmap(hpa, 4096); -#endif -} - -void hax_unmap_gpfn(void *va) -{ -#ifdef HAX_ARCH_X86_64 -#else - hax_vunmap(va, 4096); -#endif -} -#else // !HAX_ARCH_X86_64 -void * hax_map_gpfn(struct vm_t *vm, uint64_t gpfn, bool flag, hax_paddr_t gcr3, - uint8_t level) -{ -#ifdef HAX_ARCH_X86_64 - return hax_gpfn_to_hva(vm, gpfn); -#else - struct hax_p2m_entry *entry; - uint64_t hpa = 0; - void *hva = NULL; - - entry = hax_get_p2m_entry(vm, gpfn); - -retry: - if (!entry || !entry->hva) { - if (entry) { - hpa = entry->hpa; - } - if (flag || (vm->hva_limit < HOST_VIRTUAL_ADDR_LIMIT)) { - hva = hax_vmap(hpa, 4096); - if (entry) { - entry->hva = (uint64_t)hva; - } - vm->hva_limit += 4096; - if ((vm->hva_limit > HOST_VIRTUAL_ADDR_RECYCLE) && - (vm->hva_limit <= HOST_VIRTUAL_ADDR_LIMIT)) { - while (vm->hva_list[vm->hva_index].hva) { - vm->hva_index++; - } - vm->hva_list[vm->hva_index].gpfn = gpfn; - vm->hva_list[vm->hva_index].hva = (uint64_t)hva; - vm->hva_list[vm->hva_index].gcr3 = gcr3; - vm->hva_list[vm->hva_index].is_kern = flag; - vm->hva_list[vm->hva_index].level = level; - vm->hva_index++; - } else { - vm->hva_list_1[vm->hva_index_1].gpfn = gpfn; - vm->hva_list_1[vm->hva_index_1].hva = (uint64_t)hva; - vm->hva_list_1[vm->hva_index_1].gcr3 = gcr3; - vm->hva_list_1[vm->hva_index_1].is_kern = flag; - vm->hva_list_1[vm->hva_index_1].level = level; - vm->hva_index_1++; - } - } else { - if (gpfn_to_hva_recycle(vm, gcr3, false)) - goto retry; - else - hva = hax_vmap(hpa, 4096); - } - return hva; - } else - return (void *)((mword)entry->hva); -#endif -} - -void hax_unmap_gpfn(struct vm_t *vm, void *va, uint64_t gpfn) -{ -#ifdef HAX_ARCH_X86_64 -#else - struct hax_p2m_entry *entry; - - entry = hax_get_p2m_entry(vm, gpfn); - if (!entry) { - hax_log(HAX_LOGE, "We cannot find the p2m entry!\n"); - return; - } - - if (!entry->hva) { - hax_vunmap(va, 4096); - } -#endif -} -#endif // HAX_ARCH_X86_64 -#endif // !CONFIG_HAX_EPT2 - -int hax_core_set_p2m(struct vm_t *vm, uint64_t gpfn, uint64_t hpfn, uint64_t hva, - uint8_t flags) -{ - int ret; - - ret = set_p2m_mapping(vm, gpfn, hva & ~HAX_PAGE_MASK, hpfn << 12); - if (ret < 0) { - hax_log(HAX_LOGE, "Failed to set p2m mapping, gpfn:%llx, hva:%llx, " - "hpa:%llx, ret:%d\n", gpfn, hva, hpfn << 12, ret); - return 0; - } - - return 1; -} diff --git a/core/vtlb.c b/core/vtlb.c index 58702027..a87bb1ec 100644 --- a/core/vtlb.c +++ b/core/vtlb.c @@ -546,19 +546,10 @@ static uint32_t vcpu_mmu_walk(struct vcpu_t *vcpu, hax_vaddr_t va, uint32_t acce { uint lvl, idx; void *pte_va; -#ifdef CONFIG_HAX_EPT2 hax_kmap_user pte_kmap; bool writable; -#endif // CONFIG_HAX_EPT2 pte32_t *pte, old_pte; hax_paddr_t gpt_base; -#ifndef CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - hax_paddr_t g_cr3 = 0; - bool is_kernel = false; - int old_gpt_base; -#endif -#endif // !CONFIG_HAX_EPT2 bool pat; uint64_t rights, requested_rights; @@ -568,70 +559,35 @@ static uint32_t vcpu_mmu_walk(struct vcpu_t *vcpu, hax_vaddr_t va, uint32_t acce // Seems the following one is wrong? // hax_assert((mmu->guest_mode) == PM_2LVL); -#ifndef CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - is_kernel = (va >= KERNEL_BASE) ? true : false; -#endif -#endif // !CONFIG_HAX_EPT2 - retry: rights = TF_WRITE | TF_USER; gpt_base = vcpu->state->_cr3 & pte32_get_cr3_mask(); -#ifndef CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - g_cr3 = gpt_base; -#endif -#endif // !CONFIG_HAX_EPT2 - // Page table walker. for (lvl = PM_2LVL; lvl--; ) { // Fetch the page table entry. idx = pte32_get_idx(lvl, va); -#ifdef CONFIG_HAX_EPT2 pte_va = gpa_space_map_page(&vcpu->vm->gpa_space, gpt_base >> PG_ORDER_4K, &pte_kmap, &writable); -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - pte_va = hax_map_gpfn(vcpu->vm, gpt_base >> 12, is_kernel, g_cr3, lvl); -#else - pte_va = hax_map_gpfn(vcpu->vm, gpt_base >> 12); -#endif -#endif // CONFIG_HAX_EPT2 + if (!pte_va) return TF_FAILED; -#ifdef CONFIG_HAX_EPT2 + hax_assert(!(update && !writable)); -#endif // CONFIG_HAX_EPT2 + pte = (pte32_t *)pte_va + idx; old_pte = *pte; // Check access if (!pte32_is_present(&old_pte)) { -#ifdef CONFIG_HAX_EPT2 gpa_space_unmap_page(&vcpu->vm->gpa_space, &pte_kmap); -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - hax_unmap_gpfn(vcpu->vm, pte_va, gpt_base >> 12); -#else - hax_unmap_gpfn(pte_va); -#endif -#endif // CONFIG_HAX_EPT2 return TF_FAILED | access; } if (pte32_check_rsvd(&old_pte, lvl)) { -#ifdef CONFIG_HAX_EPT2 gpa_space_unmap_page(&vcpu->vm->gpa_space, &pte_kmap); -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - hax_unmap_gpfn(vcpu->vm, pte_va, gpt_base >> 12); -#else - hax_unmap_gpfn(pte_va); -#endif -#endif // CONFIG_HAX_EPT2 return TF_FAILED | TF_PROTECT | TF_RSVD | access; } @@ -648,35 +604,13 @@ static uint32_t vcpu_mmu_walk(struct vcpu_t *vcpu, hax_vaddr_t va, uint32_t acce if (!pte32_atomic_set_accessed(pte, &old_pte)) { hax_log(HAX_LOGD, "translate walk: atomic PTE update failed\n"); -#ifdef CONFIG_HAX_EPT2 gpa_space_unmap_page(&vcpu->vm->gpa_space, &pte_kmap); -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - hax_unmap_gpfn(vcpu->vm, pte_va, gpt_base >> 12); -#else - hax_unmap_gpfn(pte_va); -#endif -#endif // CONFIG_HAX_EPT2 + goto retry; } } -#ifndef CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - old_gpt_base = gpt_base; -#endif -#endif // !CONFIG_HAX_EPT2 gpt_base = pte32_get_address(&old_pte, lvl, 0); -#ifdef CONFIG_HAX_EPT2 gpa_space_unmap_page(&vcpu->vm->gpa_space, &pte_kmap); -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - hax_unmap_gpfn(vcpu->vm, pte_va, old_gpt_base >> 12); -#endif - -#ifdef HAX_ARCH_X86_64 - hax_unmap_gpfn(pte_va); -#endif // CONFIG_HAX_EPT2 -#endif } else { // Permission violations must be checked only after present bit is // checked at every level. @@ -687,15 +621,8 @@ static uint32_t vcpu_mmu_walk(struct vcpu_t *vcpu, hax_vaddr_t va, uint32_t acce } if ((rights & requested_rights) != requested_rights) { -#ifdef CONFIG_HAX_EPT2 gpa_space_unmap_page(&vcpu->vm->gpa_space, &pte_kmap); -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - hax_unmap_gpfn(vcpu->vm, pte_va, gpt_base >> 12); -#else - hax_unmap_gpfn(pte_va); -#endif // CONFIG_HAX_EPT2 -#endif + return TF_FAILED | TF_PROTECT | access; } @@ -706,15 +633,7 @@ static uint32_t vcpu_mmu_walk(struct vcpu_t *vcpu, hax_vaddr_t va, uint32_t acce &old_pte)) { hax_log(HAX_LOGD, "translate walk: atomic PTE update failed\n"); -#ifdef CONFIG_HAX_EPT2 gpa_space_unmap_page(&vcpu->vm->gpa_space, &pte_kmap); -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - hax_unmap_gpfn(vcpu->vm, pte_va, gpt_base >> 12); -#else - hax_unmap_gpfn(pte_va); -#endif -#endif // CONFIG_HAX_EPT2 goto retry; } } @@ -767,17 +686,9 @@ static uint32_t vcpu_mmu_walk(struct vcpu_t *vcpu, hax_vaddr_t va, uint32_t acce vcpu->prefetch[i].flag = 1; } } -#ifdef CONFIG_HAX_EPT2 + gpa_space_unmap_page(&vcpu->vm->gpa_space, &pte_kmap); -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - hax_unmap_gpfn(vcpu->vm, pte_va, gpt_base >> 12); -#endif - -#ifdef HAX_ARCH_X86_64 - hax_unmap_gpfn(pte_va); -#endif -#endif // CONFIG_HAX_EPT2 + return TF_OK; } } @@ -813,7 +724,6 @@ bool handle_vtlb(struct vcpu_t *vcpu) return 1; } -#ifdef CONFIG_HAX_EPT2 // TODO: Move these functions to another source file (e.g. mmio.c), since they // are not specific to vTLB mode static inline void * mmio_map_guest_virtual_page_fast(struct vcpu_t *vcpu, @@ -921,7 +831,6 @@ int mmio_fetch_instruction(struct vcpu_t *vcpu, uint64_t gva, uint8_t *buf, int memcpy_s(buf, len, src_buf + offset, len); return 0; } -#endif // CONFIG_HAX_EPT2 /* * Read guest-linear memory. @@ -941,25 +850,11 @@ uint32_t vcpu_read_guest_virtual(struct vcpu_t *vcpu, hax_vaddr_t addr, void *ds // TBD: use guest CPL for access checks char *dstp = dst; uint32_t offset = 0; -#ifdef CONFIG_HAX_EPT2 int len2; -#else // !CONFIG_HAX_EPT2 - void *hva, *hva_base; -#ifdef HAX_ARCH_X86_32 - bool is_kernel = false; - hax_paddr_t g_cr3 = 0; -#endif -#endif // !CONFIG_HAX_EPT2 + // Flag == 1 is not currently used, but it could be enabled if useful. hax_assert(flag == 0 || flag == 2); -#ifndef CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - is_kernel = addr >= KERNEL_BASE; - g_cr3 = vcpu->state->_cr3 & pte32_get_cr3_mask(); -#endif -#endif // !CONFIG_HAX_EPT2 - while (offset < size) { hax_paddr_t gpa; uint64_t len = size - offset; @@ -981,7 +876,7 @@ uint32_t vcpu_read_guest_virtual(struct vcpu_t *vcpu, hax_vaddr_t addr, void *ds // hax_log(HAX_LOGI, "%s: gva=0x%llx, gpa=0x%llx, len=0x%llx\n", // __func__, addr + offset, gpa, len); // } -#ifdef CONFIG_HAX_EPT2 + len2 = gpa_space_read_data(&vcpu->vm->gpa_space, gpa, (int)len, (uint8_t *)(dstp + offset)); if (len2 <= 0) { @@ -993,25 +888,7 @@ uint32_t vcpu_read_guest_virtual(struct vcpu_t *vcpu, hax_vaddr_t addr, void *ds } else { len = (uint64_t)len2; } -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - hva_base = hax_map_gpfn(vcpu->vm, gpa >> 12, is_kernel, g_cr3, 0); -#else - hva_base = hax_map_gpfn(vcpu->vm, gpa >> 12); -#endif - if (hva_base) { - hva = (uint8_t *)hva_base + (gpa & 0xfff); - memcpy_s((void *)(dstp + offset), dst_buflen - offset , hva, len); - } else { - vcpu_set_panic(vcpu); - hax_log(HAX_LOGPANIC, "BUG_ON during the call:%s\n", __FUNCTION__); - } -#ifdef HAX_ARCH_X86_32 - hax_unmap_gpfn(vcpu->vm, hva_base, gpa >> 12); -#else - hax_unmap_gpfn(hva_base); -#endif -#endif //CONFIG_HAX_EPT2 + offset += len; } @@ -1037,24 +914,9 @@ uint32_t vcpu_write_guest_virtual(struct vcpu_t *vcpu, hax_vaddr_t addr, // TODO: use guest CPL for access checks const char *srcp = src; uint32_t offset = 0; -#ifdef CONFIG_HAX_EPT2 int len2; -#else // !CONFIG_HAX_EPT2 - void *hva, *hva_base; -#ifdef HAX_ARCH_X86_32 - bool is_kernel = false; - hax_paddr_t g_cr3 = 0; -#endif -#endif // !CONFIG_HAX_EPT2 - hax_assert(flag == 0 || flag == 1); - -#ifndef CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - is_kernel = addr >= KERNEL_BASE; - g_cr3 = vcpu->state->_cr3 & pte32_get_cr3_mask(); -#endif -#endif // !CONFIG_HAX_EPT2 + hax_assert(flag == 0 || flag == 1); hax_assert(dst_buflen >= size); while (offset < size) { @@ -1076,7 +938,7 @@ uint32_t vcpu_write_guest_virtual(struct vcpu_t *vcpu, hax_vaddr_t addr, hax_inject_page_fault(vcpu, r & 0x1f); return false; } -#ifdef CONFIG_HAX_EPT2 + len2 = (uint64_t)gpa_space_write_data(&vcpu->vm->gpa_space, gpa, len, (uint8_t *)(srcp + offset)); if (len2 <= 0) { @@ -1088,25 +950,7 @@ uint32_t vcpu_write_guest_virtual(struct vcpu_t *vcpu, hax_vaddr_t addr, } else { len = len2; } -#else // !CONFIG_HAX_EPT2 -#ifdef HAX_ARCH_X86_32 - hva_base = hax_map_gpfn(vcpu->vm, gpa >> 12, is_kernel, g_cr3, 0); -#else - hva_base = hax_map_gpfn(vcpu->vm, gpa >> 12); -#endif - if (hva_base) { - hva = (uint8_t *)hva_base + (gpa & 0xfff); - memcpy_s(hva, dst_buflen - offset , (void *)(srcp + offset), len); - } else { - vcpu_set_panic(vcpu); - hax_log(HAX_LOGPANIC, "BUG_ON during the call:%s\n", __FUNCTION__); - } -#ifdef HAX_ARCH_X86_32 - hax_unmap_gpfn(vcpu->vm, hva_base, gpa >> 12); -#else - hax_unmap_gpfn(hva_base); -#endif -#endif // CONFIG_HAX_EPT2 + offset += len; } diff --git a/include/hax.h b/include/hax.h index a95345f6..c82863b0 100644 --- a/include/hax.h +++ b/include/hax.h @@ -42,20 +42,6 @@ struct vcpu_t; #define HAX_CUR_VERSION 0x0004 #define HAX_COMPAT_VERSION 0x0001 -// EPT2 refers to the new memory virtualization engine, which implements lazy -// allocation, and therefore greatly speeds up ALLOC_RAM and SET_RAM VM ioctls -// as well as brings down HAXM driver's memory footprint. It is mostly written -// in new source files (including core/include/memory.h, core/include/ept2.h, -// include/hax_host_mem.h and their respective .c/.cpp files), separate from -// the code for the legacy memory virtualization engine (which is scattered -// throughout core/memory.c, core/vm.c, core/ept.c, etc.). This makes it -// possible to select between the two engines at compile time, simply by -// defining (which selects the new engine) or undefining (which selects the old -// engine) the following macro. -// TODO: Completely remove the legacy engine and this macro when the new engine -// is considered stable. -#define CONFIG_HAX_EPT2 - /* TBD */ #define for_each_vcpu(vcpu, vm) @@ -129,8 +115,6 @@ int hax_clear_vcpumem(struct hax_vcpu_mem *mem); int hax_setup_vcpumem(struct hax_vcpu_mem *vcpumem, uint64_t uva, uint32_t size, int flags); -uint64_t get_hpfn_from_pmem(struct hax_vcpu_mem *pmem, uint64_t va); - #define HAX_VCPUMEM_VALIDVA 0x1 enum hax_notify_event { @@ -160,27 +144,11 @@ void hax_slab_free(phax_slab_t *type, void* cache); * requirement */ hax_pa_t hax_pa(void *va); -/* - * Map the physical address into kernel address space - * XXX please don't use this function for long-time map. - * in Mac side, we utilize the IOMemoryDescriptor class to map this, and the - * object have to be kept in a list till the vunmap. And when we do the vunmap, - * we need search the list again, thus it will cost memory/performance issue - */ -void *hax_vmap(hax_pa_t pa, uint32_t size); -static inline void * hax_vmap_pfn(hax_pfn_t pfn) -{ - return hax_vmap(pfn << HAX_PAGE_SHIFT, HAX_PAGE_SIZE); -} /* * unmap the memory mapped above */ void hax_vunmap(void *va, uint32_t size); -static inline void hax_vunmap_pfn(void *va) -{ - hax_vunmap((void*)((mword)va & ~HAX_PAGE_MASK), HAX_PAGE_SIZE); -} struct hax_page; typedef struct hax_page * phax_page; diff --git a/platforms/darwin/com_intel_hax_mem.cpp b/platforms/darwin/com_intel_hax_mem.cpp index 9011c198..2786b674 100644 --- a/platforms/darwin/com_intel_hax_mem.cpp +++ b/platforms/darwin/com_intel_hax_mem.cpp @@ -147,30 +147,8 @@ extern "C" int hax_clear_vcpumem(struct hax_vcpu_mem *mem) return 0; } -extern "C" uint64_t get_hpfn_from_pmem(struct hax_vcpu_mem *pmem, uint64_t va) -{ - uint64_t phys; - uint64_t length; - struct darwin_vcpu_mem *hinfo; - - if (!pmem || !pmem->hinfo) - return 0; - if (!in_pmem_range(pmem, va)) - return 0; - - hinfo = (struct darwin_vcpu_mem *)pmem->hinfo; - phys = hinfo->md->getPhysicalSegment((va - pmem->uva), - (IOByteCount *)&length, - kIOMemoryMapperNone); - return phys >> page_shift; -} - /* In darwin, we depend on boot code to set the limit */ extern "C" uint64_t hax_get_memory_threshold(void) { -#ifdef CONFIG_HAX_EPT2 // Since there is no memory cap, just return a sufficiently large value return 1ULL << 48; // PHYSADDR_MAX + 1 -#else // !CONFIG_HAX_EPT2 - return 0; -#endif // CONFIG_HAX_EPT2 } diff --git a/platforms/darwin/com_intel_hax_ui.c b/platforms/darwin/com_intel_hax_ui.c index dcce75e1..22fb15be 100644 --- a/platforms/darwin/com_intel_hax_ui.c +++ b/platforms/darwin/com_intel_hax_ui.c @@ -411,7 +411,6 @@ static int hax_vm_ioctl(dev_t dev, ulong cmd, caddr_t data, int flag, ret = hax_vm_set_ram(cvm, info); break; } -#ifdef CONFIG_HAX_EPT2 case HAX_VM_IOCTL_SET_RAM2: { struct hax_set_ram_info2 *info; info = (struct hax_set_ram_info2 *)data; @@ -437,7 +436,6 @@ static int hax_vm_ioctl(dev_t dev, ulong cmd, caddr_t data, int flag, ret = hax_vm_protect_ram(cvm, info); break; } -#endif case HAX_VM_IOCTL_NOTIFY_QEMU_VERSION: { int pid; char task_name[TASK_NAME_LEN]; diff --git a/platforms/darwin/hax_mem_alloc.cpp b/platforms/darwin/hax_mem_alloc.cpp index 25580190..ae9c7bb6 100644 --- a/platforms/darwin/hax_mem_alloc.cpp +++ b/platforms/darwin/hax_mem_alloc.cpp @@ -132,43 +132,6 @@ struct _hax_vmap_entry { uint32_t size; }; -extern "C" void * hax_vmap(hax_pa_t pa, uint32_t size) -{ - IOMemoryDescriptor *md; - IOMemoryMap *mm; - struct _hax_vmap_entry *entry; - - entry = (struct _hax_vmap_entry *)hax_vmalloc( - sizeof(struct _hax_vmap_entry), 0); - if (entry == NULL) { - printf("Error to vmalloc the hax vmap entry\n"); - return NULL; - } - entry->size = size; - - md = IOMemoryDescriptor::withPhysicalAddress(pa, size, kIODirectionOutIn); - if (md == NULL) { - hax_vfree(entry, 0); - return NULL; - } - entry->md = md; - - mm = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere, 0, size); - if (mm == NULL) { - hax_vfree(entry, 0); - md->release(); - return NULL; - } - entry->mm = mm; - entry->va = (void *)(mm->getVirtualAddress()); - - hax_spin_lock(vmap_lock); - hax_list_add(&entry->list, &_vmap_list); - hax_spin_unlock(vmap_lock); - - return entry->va; -} - extern "C" void hax_vunmap(void *addr, uint32_t size) { unsigned long va = (unsigned long)addr; diff --git a/platforms/linux/components.c b/platforms/linux/components.c index 9a0ef1bd..0f4140b9 100644 --- a/platforms/linux/components.c +++ b/platforms/linux/components.c @@ -567,7 +567,6 @@ static long hax_vm_ioctl(struct file *filp, unsigned int cmd, ret = hax_vm_set_ram(cvm, &info); break; } -#ifdef CONFIG_HAX_EPT2 case HAX_VM_IOCTL_SET_RAM2: { struct hax_set_ram_info2 info; if (copy_from_user(&info, argp, sizeof(info))) { @@ -599,7 +598,6 @@ static long hax_vm_ioctl(struct file *filp, unsigned int cmd, ret = hax_vm_protect_ram(cvm, &info); break; } -#endif case HAX_VM_IOCTL_NOTIFY_QEMU_VERSION: { struct hax_qemu_version info; if (copy_from_user(&info, argp, sizeof(info))) { diff --git a/platforms/linux/hax_mem_alloc.c b/platforms/linux/hax_mem_alloc.c index f9e41076..187ba532 100644 --- a/platforms/linux/hax_mem_alloc.c +++ b/platforms/linux/hax_mem_alloc.c @@ -66,11 +66,6 @@ void hax_vfree_aligned(void *va, uint32_t size, uint32_t alignment, hax_vfree_flags(va, size, flags); } -void * hax_vmap(hax_pa_t pa, uint32_t size) -{ - return ioremap(pa, size); -} - void hax_vunmap(void *addr, uint32_t size) { return iounmap(addr); diff --git a/platforms/linux/hax_mm.c b/platforms/linux/hax_mm.c index 5af70280..3bcf7fdf 100644 --- a/platforms/linux/hax_mm.c +++ b/platforms/linux/hax_mm.c @@ -125,10 +125,6 @@ int hax_setup_vcpumem(struct hax_vcpu_mem *mem, uint64_t uva, uint32_t size, uint64_t hax_get_memory_threshold(void) { -#ifdef CONFIG_HAX_EPT2 // Since there is no memory cap, just return a sufficiently large value return 1ULL << 48; // PHYSADDR_MAX + 1 -#else // !CONFIG_HAX_EPT2 - return 0; -#endif // CONFIG_HAX_EPT2 } diff --git a/platforms/netbsd/hax_entry_vm.c b/platforms/netbsd/hax_entry_vm.c index 3e112c94..9931ee64 100644 --- a/platforms/netbsd/hax_entry_vm.c +++ b/platforms/netbsd/hax_entry_vm.c @@ -182,7 +182,6 @@ int hax_vm_ioctl(dev_t self __unused, u_long cmd, void *data, int flag, ret = hax_vm_set_ram(cvm, info); break; } -#ifdef CONFIG_HAX_EPT2 case HAX_VM_IOCTL_SET_RAM2: { struct hax_set_ram_info2 *info; info = (struct hax_set_ram_info2 *)data; @@ -208,7 +207,6 @@ int hax_vm_ioctl(dev_t self __unused, u_long cmd, void *data, int flag, ret = hax_vm_protect_ram(cvm, info); break; } -#endif case HAX_VM_IOCTL_NOTIFY_QEMU_VERSION: { struct hax_qemu_version *info; info = (struct hax_qemu_version *)data; diff --git a/platforms/netbsd/hax_mem_alloc.c b/platforms/netbsd/hax_mem_alloc.c index 0458104c..1c587ced 100644 --- a/platforms/netbsd/hax_mem_alloc.c +++ b/platforms/netbsd/hax_mem_alloc.c @@ -84,26 +84,6 @@ void hax_vfree_aligned(void *va, uint32_t size, uint32_t alignment, hax_vfree_flags(va, size, flags); } -void * hax_vmap(hax_pa_t pa, uint32_t size) -{ - vaddr_t kva; - vaddr_t va, end_va; - unsigned long offset; - - offset = pa & PAGE_MASK; - pa = trunc_page(pa); - size = round_page(size + offset); - - kva = uvm_km_alloc(kernel_map, size, PAGE_SIZE, UVM_KMF_VAONLY|UVM_KMF_WAITVA); - - for (va = kva, end_va = kva + size; va < end_va; va += PAGE_SIZE, pa += PAGE_SIZE) { - pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED); - } - pmap_update(pmap_kernel()); - - return (void *)(kva + offset); -} - void hax_vunmap(void *addr, uint32_t size) { unsigned long offset; diff --git a/platforms/netbsd/hax_mm.c b/platforms/netbsd/hax_mm.c index 78dd1987..0b32b48d 100644 --- a/platforms/netbsd/hax_mm.c +++ b/platforms/netbsd/hax_mm.c @@ -136,10 +136,6 @@ int hax_setup_vcpumem(struct hax_vcpu_mem *mem, uint64_t uva, uint32_t size, uint64_t hax_get_memory_threshold(void) { -#ifdef CONFIG_HAX_EPT2 // Since there is no memory cap, just return a sufficiently large value return 1ULL << 48; // PHYSADDR_MAX + 1 -#else // !CONFIG_HAX_EPT2 - return 0; -#endif // CONFIG_HAX_EPT2 } diff --git a/platforms/windows/hax_entry.c b/platforms/windows/hax_entry.c index 74a0b440..2d184b6b 100644 --- a/platforms/windows/hax_entry.c +++ b/platforms/windows/hax_entry.c @@ -552,7 +552,6 @@ NTSTATUS HaxVmControl(PDEVICE_OBJECT DeviceObject, struct hax_vm_windows *ext, } break; } -#ifdef CONFIG_HAX_EPT2 case HAX_VM_IOCTL_SET_RAM2: { struct hax_set_ram_info2 *info; int res; @@ -601,7 +600,6 @@ NTSTATUS HaxVmControl(PDEVICE_OBJECT DeviceObject, struct hax_vm_windows *ext, } break; } -#endif case HAX_VM_IOCTL_NOTIFY_QEMU_VERSION: { struct hax_qemu_version *info; diff --git a/platforms/windows/hax_mem_alloc.c b/platforms/windows/hax_mem_alloc.c index 05db2dba..b20b6541 100644 --- a/platforms/windows/hax_mem_alloc.c +++ b/platforms/windows/hax_mem_alloc.c @@ -78,18 +78,6 @@ void hax_vfree_aligned(void *va, uint32_t size, uint32_t alignment, hax_vfree_flags(va, size, flags); } -void * hax_vmap(hax_pa_t pa, uint32_t size) -{ - PHYSICAL_ADDRESS phys_addr; - phys_addr.QuadPart = pa; - - if ((pa & (PAGE_SIZE - 1)) + size > PAGE_SIZE) { - hax_log(HAX_LOGW, "hax_vmap can't handle cross-page case!\n"); - return NULL; - } - return MmMapIoSpace(phys_addr, size, MmCached); -} - void hax_vunmap(void *addr, uint32_t size) { MmUnmapIoSpace(addr, size); diff --git a/platforms/windows/hax_mm.c b/platforms/windows/hax_mm.c index 87e71656..718d946c 100644 --- a/platforms/windows/hax_mm.c +++ b/platforms/windows/hax_mm.c @@ -175,75 +175,8 @@ int hax_setup_vcpumem(struct hax_vcpu_mem *mem, uint64_t uva, uint32_t size, return -1; } -uint64_t get_hpfn_from_pmem(struct hax_vcpu_mem *pmem, uint64_t va) -{ - PHYSICAL_ADDRESS phys; - - if (!in_pmem_range(pmem, va)) - return 0; - - phys = MmGetPhysicalAddress((PVOID)va); - if (phys.QuadPart == 0) { - if (pmem->kva != 0) { - uint64_t kva; - PHYSICAL_ADDRESS kphys; - - kva = (uint64_t)pmem->kva + (va - pmem->uva); - kphys = MmGetPhysicalAddress((PVOID)kva); - if (kphys.QuadPart == 0) - hax_log(HAX_LOGE, "kva phys is 0\n"); - else - return kphys.QuadPart >> PAGE_SHIFT; - } else { - unsigned long long index = 0; - PMDL pmdl = NULL; - PPFN_NUMBER ppfnnum; - - pmdl = ((struct windows_vcpu_mem *)(pmem->hinfo))->pmdl; - ppfnnum = MmGetMdlPfnArray(pmdl); - index = (va - (pmem->uva)) / PAGE_SIZE; - return ppfnnum[index]; - } - } - - return phys.QuadPart >> PAGE_SHIFT; -} - uint64_t hax_get_memory_threshold(void) { -#ifdef CONFIG_HAX_EPT2 // Since there is no memory cap, just return a sufficiently large value return 1ULL << 48; // PHYSADDR_MAX + 1 -#else // !CONFIG_HAX_EPT2 - uint64_t result = 0; - NTSTATUS status; - ULONG relative_to; - UNICODE_STRING path; - RTL_QUERY_REGISTRY_TABLE query_table[2]; - ULONG memlimit_megs = 0; - - relative_to = RTL_REGISTRY_ABSOLUTE | RTL_REGISTRY_OPTIONAL; - - RtlInitUnicodeString(&path, L"\\Registry\\Machine\\SOFTWARE\\HAXM\\HAXM\\"); - - /* The registry is Mega byte count */ - RtlZeroMemory(query_table, sizeof(query_table)); - - query_table[0].Flags = RTL_QUERY_REGISTRY_DIRECT; - query_table[0].Name = L"MemLimit"; - query_table[0].EntryContext = &memlimit_megs; - query_table[0].DefaultType = REG_DWORD; - query_table[0].DefaultLength = sizeof(ULONG); - query_table[0].DefaultData = &memlimit_megs; - - status = RtlQueryRegistryValues(relative_to, path.Buffer, &query_table[0], - NULL, NULL); - - if (NT_SUCCESS(status)) { - result = (uint64_t)memlimit_megs << 20; - hax_log(HAX_LOGI, "%s: result = 0x%x\n", __func__, result); - } - - return result; -#endif // CONFIG_HAX_EPT2 }