From 08405b23757e7055cafc1454a30787495c67d45e Mon Sep 17 00:00:00 2001 From: Ivan Gualandri Date: Sun, 24 Mar 2024 19:14:10 +0000 Subject: [PATCH 01/14] Fix build issues --- src/include/kernel/mem/bitmap.h | 10 ++-- src/include/kernel/mem/vmm.h | 2 + src/kernel/arch/x86_64/mem/vmm_mapping.c | 74 +++++++++++++----------- src/kernel/framebuffer/framebuffer.c | 10 ++-- src/kernel/main.c | 4 +- src/kernel/mem/mmap.c | 2 +- src/kernel/mem/vmm.c | 4 +- 7 files changed, 59 insertions(+), 47 deletions(-) diff --git a/src/include/kernel/mem/bitmap.h b/src/include/kernel/mem/bitmap.h index e5a1a5d7..241603e6 100644 --- a/src/include/kernel/mem/bitmap.h +++ b/src/include/kernel/mem/bitmap.h @@ -27,15 +27,15 @@ typedef enum { extern size_t memory_size_in_bytes; extern uint64_t memory_map_phys_addr; -void _initialize_bitmap(unsigned long); +void _initialize_bitmap(uint64_t end_of_reserved_area); void _bitmap_get_region(uint64_t* base_address, size_t* length_in_bytes, address_type_t type); int64_t _bitmap_request_frame(); int64_t _bitmap_request_frames(size_t number_of_frames); -void _bitmap_set_bit(uint64_t); -void _bitmap_free_bit(uint64_t); -bool _bitmap_test_bit(uint64_t); -void _bitmap_set_bit_from_address(uint64_t); +void _bitmap_set_bit(uint64_t location); +void _bitmap_free_bit(uint64_t location); +bool _bitmap_test_bit(uint64_t location); +void _bitmap_set_bit_from_address(uint64_t address); uint32_t _compute_kernel_entries(uint64_t); #endif diff --git a/src/include/kernel/mem/vmm.h b/src/include/kernel/mem/vmm.h index dee0b9b3..9818aa44 100644 --- a/src/include/kernel/mem/vmm.h +++ b/src/include/kernel/mem/vmm.h @@ -9,6 +9,8 @@ //#define WRITE_ENABLE 2 //#define USER_LEVEL 4 +#define PAGE_DIR_SIZE 0x1000 + #define PHYS_ADDRESS_NOT_MAPPED 0 // Address is not mapped #define PHYS_ADDRESS_MAPPED 0b1 #define PHYS_ADDRESS_MISMATCH 0b10 // This is returned when given a phys and virt address, the virt address does not contain the phys one diff --git a/src/kernel/arch/x86_64/mem/vmm_mapping.c b/src/kernel/arch/x86_64/mem/vmm_mapping.c index 197752ab..59fb822a 100644 --- a/src/kernel/arch/x86_64/mem/vmm_mapping.c +++ b/src/kernel/arch/x86_64/mem/vmm_mapping.c @@ -11,10 +11,15 @@ void *map_phys_to_virt_addr_hh(void* physical_address, void* address, size_t fla uint16_t pdpr_e = PDPR_ENTRY((uint64_t) address); uint16_t pd_e = PD_ENTRY((uint64_t) address); - uint64_t *pml4_table = NULL; + //uint64_t *pml4_table = NULL; uint64_t *pdpr_root = NULL; uint64_t *pd_root = NULL; +#if SMALL_PAGES == 1 + uint64_t *pt_table = NULL; + uint16_t pt_e = PT_ENTRY((uint64_t) address); +#endif + uint8_t user_mode_status = 0; if ( !is_address_higher_half((uint64_t) address) ) { @@ -28,7 +33,7 @@ void *map_phys_to_virt_addr_hh(void* physical_address, void* address, size_t fla } if (pml4_root != NULL) { - pml4_table = pml4_root; + //pml4_table = pml4_root; pretty_logf(Verbose, "Entries values pml4_e: 0x%d pdpr_e: 0x%d pd_e: 0x%d", pml4_e, pdpr_e, pd_e); pretty_logf(Verbose, "\taddress: 0x%x, phys_address: 0x%x", address, physical_address); pretty_logf(Verbose, "\tpdpr base_address: 0x%x", pml4_root[pml4_e] & VM_PAGE_TABLE_BASE_ADDRESS_MASK); @@ -54,43 +59,38 @@ void *map_phys_to_virt_addr_hh(void* physical_address, void* address, size_t fla pd_root = new_table_hhdm; } else { pretty_log(Verbose, "No need to allocate pdpr"); - pd_root = (uint64_t *) hhdm_get_variable((uintptr_t) pdpr_root[pdpr_e] & VM_PAGE_TABLE_BASE_ADDRESS_MASK); + pd_root = (uint64_t *) hhdm_get_variable((uintptr_t) pdpr_root[pdpr_e] & VM_PAGE_TABLE_BASE_ADDRESS_MASK); } if( !(pd_root[pd_e] & 0b1) ) { + #if SMALL_PAGES == 1 uint64_t *new_table = pmm_alloc_frame(); pd_root[pd_e] = (uint64_t) new_table | user_mode_status | WRITE_BIT | PRESENT_BIT; uint64_t *new_table_hhdm = hhdm_get_variable((uintptr_t) new_table); clean_new_table(new_table_hhdm); - pt_root = new_table_hhdm; + pt_table = new_table_hhdm; + } else { + pretty_log(Verbose, "No need to allocate pd"); + pt_table = (uint64_t *) hhdm_get_variable((uintptr_t) pd_root[pd_e] & VM_PAGE_TABLE_BASE_ADDRESS_MASK); + } + + // This case apply only for 4kb pages, if the pt_e entry is not present in the page table we need to allocate a new 4k page + // Every entry in the page table is a 4kb page of physical memory + if( !(pt_table[pt_e] & 0b1)) { + pt_table[pt_e] = (uint64_t) physical_address | flags; + return address; + } #elif SMALL_PAGES == 0 pd_root[pd_e] = (uint64_t) (physical_address) | HUGEPAGE_BIT | flags | user_mode_status; pretty_logf(Verbose, " PD Flags: 0x%x entry value pd_root[0x%x]: 0x%x", flags, pd_e, pd_root[pd_e]); -#endif + return address; } - -#if SMALL_PAGES == 1 - //uint64_t *pt_table = (uint64_t *) (SIGN_EXTENSION | ENTRIES_TO_ADDRESS(510l, (uint64_t) pml4_e, (uint64_t) pdpr_e, (uint64_t) pd_e)); - uint16_t pt_e = PT_ENTRY((uint64_t) address); -#endif - -#if SMALL_PAGES == 1 - - // This case apply only for 4kb pages, if the pt_e entry is not present in the page table we need to allocate a new 4k page - // Every entry in the page table is a 4kb page of physical memory - if( !(pt_root[pt_e] & 0b1)) { - pt_root[pt_e] = (uint64_t) physical_address | flags; - } #endif - - //pml4_table = pml4_root; - - return address; } + //pml4_table = pml4_root return NULL; - } @@ -104,7 +104,7 @@ void *map_phys_to_virt_addr_hh(void* physical_address, void* address, size_t fla * @param pml4_root the pointer to the pml4 table, It should be the hhdm address. if null it will use the kernel default pml4 table. * @return address the virtual address specified in input, or NULL in case of error. */ -void *map_phys_to_virt_addr(void* physical_address, void* address, size_t flags){ +void *map_phys_to_virt_addr(void* physical_address, void* address, size_t flags) { uint16_t pml4_e = PML4_ENTRY((uint64_t) address); uint16_t pdpr_e = PDPR_ENTRY((uint64_t) address); uint16_t pd_e = PD_ENTRY((uint64_t) address); @@ -161,7 +161,6 @@ void *map_phys_to_virt_addr(void* physical_address, void* address, size_t flags) } #if SMALL_PAGES == 1 - // This case apply only for 4kb pages, if the pt_e entry is not present in the page table we need to allocate a new 4k page // Every entry in the page table is a 4kb page of physical memory if( !(pt_table[pt_e] & 0b1)) { @@ -200,12 +199,12 @@ int unmap_vaddress(void *address){ return -1; } - #if SMALL_PAGES == 0 +#if SMALL_PAGES == 0 pretty_log(Verbose, "Freeing page"); pd_table[pd_e] = 0x0l; invalidate_page_table(pd_table); - #elif SMALL_PAGES == 1 - uint64_t *pt_table = SIGN_EXTENSION | ENTRIES_TO_ADDRESS(510l, (uint64_t) pml4_e, (uint64_t) pdpr_e, (uint64_t) pd_e); +#elif SMALL_PAGES == 1 + uint64_t *pt_table = (uint64_t *) (SIGN_EXTENSION | ENTRIES_TO_ADDRESS(510l, (uint64_t) pml4_e, (uint64_t) pdpr_e, (uint64_t) pd_e)); uint16_t pt_e = PT_ENTRY((uint64_t) address); if(!(pt_table[pt_e] & 0b1)) { @@ -213,7 +212,7 @@ int unmap_vaddress(void *address){ } pt_table[pt_e] = 0x0l; invalidate_page_table(address); - #endif +#endif return 0; } @@ -238,19 +237,28 @@ int unmap_vaddress_hh(void *address, uint64_t *pml4_root) { uint16_t pd_e = PD_ENTRY((uint64_t) address); uint64_t *pd_table = (uint64_t *) hhdm_get_variable((uintptr_t) pdpr_table[pdpr_e] & VM_PAGE_TABLE_BASE_ADDRESS_MASK); pretty_logf(Verbose, " pd_table[%d] = 0x%x", pd_e, pd_table[pd_e]); - #if SMALL_PAGES == 0 + if (!(pd_table[pd_e] &0b01)) { return -1; } +#if SMALL_PAGES == 0 pretty_logf(Verbose, " Unmapping address: 0x%x, pd_entry: %d", address, pd_e); pd_table[pd_e] = 0x0l; invalidate_page_table(address); return 0; - #endif +#elif SMALL_PAGES == 1 + uint64_t *pt_table = (uint64_t *) hhdm_get_variable((uintptr_t) pd_table[pd_e] & VM_PAGE_TABLE_BASE_ADDRESS_MASK); + uint16_t pt_e = PT_ENTRY((uint64_t) address); + pretty_logf(Verbose, " Unmapping address: 0x%x, pd_entry: %d", address, pt_e); + if ( !(pt_table[pt_e] & 0b01) ) { + return -1; + } - pretty_log(Verbose, "Either address or pml4_root are null, returning error"); - return -1; + pt_table[pt_e] = 0x0l; + invalidate_page_table(address); + return 0; +#endif } //TODO This function is no longer used, it may be removed in the future diff --git a/src/kernel/framebuffer/framebuffer.c b/src/kernel/framebuffer/framebuffer.c index 75bacef5..597ca745 100644 --- a/src/kernel/framebuffer/framebuffer.c +++ b/src/kernel/framebuffer/framebuffer.c @@ -1,15 +1,15 @@ #include -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include //#ifdef DEBUG - This will be uncommented when the framebuffer library will be completed #include //#endif - +#include #include #include diff --git a/src/kernel/main.c b/src/kernel/main.c index 11d6fa10..6491be9a 100644 --- a/src/kernel/main.c +++ b/src/kernel/main.c @@ -76,7 +76,7 @@ void _init_basic_system(unsigned long addr){ tagmmap = (struct multiboot_tag_mmap *) (multiboot_mmap_data + _HIGHER_HALF_KERNEL_MEM_START); tagfb = (struct multiboot_tag_framebuffer *) (multiboot_framebuffer_data + _HIGHER_HALF_KERNEL_MEM_START); //Print basic mem Info data - pretty_logf(Info, "Available memory: lower (in kb): %d - upper (in kb): %d", tagmem->mem_lower, tagmem->mem_upper); + pretty_logf(Info, "Available memory: lower (in kb): %d - upper (in kb): %d - mbi_size: 0x%x", tagmem->mem_lower, tagmem->mem_upper, mbi_size); memory_size_in_bytes = (tagmem->mem_upper + 1024) * 1024; //Print mmap_info pretty_logf(Verbose, "Memory map Size: 0x%x, Entry size: 0x%x, EntryVersion: 0x%x", tagmmap->size, tagmmap->entry_size, tagmmap->entry_version); @@ -153,6 +153,8 @@ void kernel_start(unsigned long addr, unsigned long magic){ pretty_logf(Verbose, "\tNumber of glyphs: [0x%x] - Bytes per glyphs: [0x%x]", font->numglyph, font->bytesperglyph); pretty_logf(Verbose, "\tWidth: [0x%x] - Height: [0x%x]", font->width, font->height); } + higherHalfDirectMapBase = ((uint64_t) HIGHER_HALF_ADDRESS_OFFSET + VM_KERNEL_MEMORY_PADDING); + pretty_logf(Verbose, "HigherHalf Initial entries: pml4: %d, pdpr: %d, pd: %d", PML4_ENTRY((uint64_t) higherHalfDirectMapBase), PDPR_ENTRY((uint64_t) higherHalfDirectMapBase), PD_ENTRY((uint64_t) higherHalfDirectMapBase)); _init_basic_system(addr); pretty_logf(Verbose, "Kernel End: 0x%x - Physical: %x", (unsigned long)&_kernel_end, (unsigned long)&_kernel_physical_end); diff --git a/src/kernel/mem/mmap.c b/src/kernel/mem/mmap.c index 2dcdb490..a0a77894 100644 --- a/src/kernel/mem/mmap.c +++ b/src/kernel/mem/mmap.c @@ -74,7 +74,7 @@ uintptr_t _mmap_determine_bitmap_region(uint64_t lower_limit, size_t bytes_neede if (actual_available_space >= bytes_needed) { - pretty_logf(Verbose, "Found space for bitmap at address: 0x%x", current_entry->addr + entry_offset); + pretty_logf(Verbose, "Found space for bitmap at address: 0x%x requested size: 0x%x", current_entry->addr + entry_offset, bytes_needed); return current_entry->addr + entry_offset; } } diff --git a/src/kernel/mem/vmm.c b/src/kernel/mem/vmm.c index 17c2d69b..dd7f4554 100644 --- a/src/kernel/mem/vmm.c +++ b/src/kernel/mem/vmm.c @@ -248,8 +248,8 @@ uint8_t is_phyisical_address_mapped(uintptr_t physical_address, uintptr_t virtua #if SMALL_PAGES == 1 uint16_t pt_e = PT_ENTRY((uint64_t) virtual_address); - uint64_t *pt_table = (uint64_t) (SIGN_EXTENSION | ENTRIES_TO_ADDRESS(510l, (uint64_t) pml4_e, (uint64_t) pdpr_e, (uint64_t) pd_e)); - if ( !pt_table[pt_e] & PRESENT_BIT ) { + uint64_t *pt_table = (uint64_t *) (SIGN_EXTENSION | ENTRIES_TO_ADDRESS(510l, (uint64_t) pml4_e, (uint64_t) pdpr_e, (uint64_t) pd_e)); + if ( !(pt_table[pt_e] & PRESENT_BIT )) { return PHYS_ADDRESS_NOT_MAPPED; } else { if (ALIGN_PHYSADDRESS(pt_table[pt_e]) == ALIGN_PHYSADDRESS(physical_address)) { From 648d8b2818eb01380f16dc5e7897e5c0bb4f0dd9 Mon Sep 17 00:00:00 2001 From: Ivan Gualandri Date: Mon, 1 Apr 2024 12:44:41 +0100 Subject: [PATCH 02/14] Try to fix build, and move the hhdm preparation before everything else in the pmm initialization --- src/include/kernel/mem/hh_direct_map.h | 6 +++ src/include/kernel/mem/mmap.h | 1 + src/include/kernel/mem/pmm.h | 6 ++- src/kernel/arch/x86_64/mem/vmm_mapping.c | 14 +++---- src/kernel/framebuffer/framebuffer.c | 12 ++++-- src/kernel/main.c | 4 +- src/kernel/mem/bitmap.c | 30 ++++++++------ src/kernel/mem/hh_direct_map.c | 9 ++-- src/kernel/mem/mmap.c | 13 ++++++ src/kernel/mem/pmm.c | 53 +++++++++++++++++++++++- 10 files changed, 116 insertions(+), 32 deletions(-) diff --git a/src/include/kernel/mem/hh_direct_map.h b/src/include/kernel/mem/hh_direct_map.h index c2c60569..d2420f35 100644 --- a/src/include/kernel/mem/hh_direct_map.h +++ b/src/include/kernel/mem/hh_direct_map.h @@ -4,6 +4,12 @@ #include #include +// Same as kernel KERNEL_MEMORY_PADDING, that is in kheap.h, they will be merged once the memory initialization is fixed +#define HH_MEMORY_PADDING 0x1000 + +// This function is temporary +void early_map_physical_memory(uint64_t end_of_reserved_area); + void *hhdm_get_variable ( uintptr_t phys_address ); void hhdm_map_physical_memory(); diff --git a/src/include/kernel/mem/mmap.h b/src/include/kernel/mem/mmap.h index 98d5d86b..8f651b4e 100644 --- a/src/include/kernel/mem/mmap.h +++ b/src/include/kernel/mem/mmap.h @@ -17,4 +17,5 @@ extern const char *mmap_types[]; void _mmap_parse(struct multiboot_tag_mmap*); void _mmap_setup(); uintptr_t _mmap_determine_bitmap_region(uint64_t lower_limit, size_t size); +bool _mmap_is_address_in_available_space(uint64_t address, uint64_t upper_limit); #endif diff --git a/src/include/kernel/mem/pmm.h b/src/include/kernel/mem/pmm.h index b855858f..2c3724de 100644 --- a/src/include/kernel/mem/pmm.h +++ b/src/include/kernel/mem/pmm.h @@ -5,8 +5,11 @@ #include #include -void pmm_setup(unsigned long addr, uint32_t size); +extern bool pmm_initialized; + +void pmm_setup(uint64_t addr, uint32_t size); void _map_pmm(); +void *pmm_prepare_new_pagetable(); void *pmm_alloc_frame(); void *pmm_alloc_area(size_t size); void pmm_free_frame(void *address); @@ -14,4 +17,5 @@ bool pmm_check_frame_availability(); void pmm_reserve_area(uint64_t starting_address, size_t size); void pmm_free_area(uint64_t starting_address, size_t size); + #endif diff --git a/src/kernel/arch/x86_64/mem/vmm_mapping.c b/src/kernel/arch/x86_64/mem/vmm_mapping.c index 59fb822a..dcdac280 100644 --- a/src/kernel/arch/x86_64/mem/vmm_mapping.c +++ b/src/kernel/arch/x86_64/mem/vmm_mapping.c @@ -40,7 +40,7 @@ void *map_phys_to_virt_addr_hh(void* physical_address, void* address, size_t fla if ( !(pml4_root[pml4_e] & 0b1) ) { pretty_logf(Verbose, " We should allocate a new table at pml4_e: %d", pml4_e); - uint64_t *new_table = pmm_alloc_frame(); + uint64_t *new_table = pmm_prepare_new_pagetable(); pml4_root[pml4_e] = (uint64_t) new_table | user_mode_status | WRITE_BIT | PRESENT_BIT; uint64_t *new_table_hhdm = hhdm_get_variable((uintptr_t) new_table); clean_new_table(new_table_hhdm); @@ -52,7 +52,7 @@ void *map_phys_to_virt_addr_hh(void* physical_address, void* address, size_t fla if ( !(pdpr_root[pdpr_e] & 0b1) ) { pretty_logf(Verbose, " We should allocate a new table at pdpr_e: %d", pdpr_e); - uint64_t *new_table = pmm_alloc_frame(); + uint64_t *new_table = pmm_prepare_new_pagetable(); pdpr_root[pdpr_e] = (uint64_t) new_table | user_mode_status | WRITE_BIT | PRESENT_BIT; uint64_t *new_table_hhdm = hhdm_get_variable((uintptr_t) new_table); clean_new_table(new_table_hhdm); @@ -65,7 +65,7 @@ void *map_phys_to_virt_addr_hh(void* physical_address, void* address, size_t fla if( !(pd_root[pd_e] & 0b1) ) { #if SMALL_PAGES == 1 - uint64_t *new_table = pmm_alloc_frame(); + uint64_t *new_table = pmm_prepare_new_pagetable(); pd_root[pd_e] = (uint64_t) new_table | user_mode_status | WRITE_BIT | PRESENT_BIT; uint64_t *new_table_hhdm = hhdm_get_variable((uintptr_t) new_table); clean_new_table(new_table_hhdm); @@ -132,7 +132,7 @@ void *map_phys_to_virt_addr(void* physical_address, void* address, size_t flags) // If the pml4_e item in the pml4 table is not present, we need to create a new one. // Every entry in pml4 table points to a pdpr table if( !(pml4_table[pml4_e] & 0b1) ) { - uint64_t *new_table = pmm_alloc_frame(); + uint64_t *new_table = pmm_prepare_new_pagetable();; pml4_table[pml4_e] = (uint64_t) new_table | user_mode_status | WRITE_BIT | PRESENT_BIT; // pretty_logf(Verbose, " need to allocate pml4 for address: 0x%x - Entry value: 0x%x - phys_address: 0x%x", (uint64_t) address, pml4_table[pml4_e], new_table); clean_new_table(pdpr_table); @@ -141,7 +141,7 @@ void *map_phys_to_virt_addr(void* physical_address, void* address, size_t flags) // If the pdpr_e item in the pdpr table is not present, we need to create a new one. // Every entry in pdpr table points to a pdpr table if( !(pdpr_table[pdpr_e] & 0b1) ) { - uint64_t *new_table = pmm_alloc_frame(); + uint64_t *new_table = pmm_prepare_new_pagetable(); pdpr_table[pdpr_e] = (uint64_t) new_table | user_mode_status | WRITE_BIT | PRESENT_BIT; //pretty_logf(Verbose, " PDPR entry value: 0x%x", pdpr_table[pdpr_e]); clean_new_table(pd_table); @@ -151,7 +151,7 @@ void *map_phys_to_virt_addr(void* physical_address, void* address, size_t flags) // Every entry in pdpr table points to a page table if using 4k pages, or to a 2mb memory area if using 2mb pages if( !(pd_table[pd_e] & 0b01) ) { #if SMALL_PAGES == 1 - uint64_t *new_table = pmm_alloc_frame(); + uint64_t *new_table = pmm_prepare_new_pagetable(); pd_table[pd_e] = (uint64_t) new_table | user_mode_status | WRITE_BIT | PRESENT_BIT; clean_new_table(pt_table); #elif SMALL_PAGES == 0 @@ -172,7 +172,7 @@ void *map_phys_to_virt_addr(void* physical_address, void* address, size_t flags) void *map_vaddress(void *virtual_address, size_t flags, uint64_t *pml4_root){ pretty_logf(Verbose, "address: 0x%x", virtual_address); - void *new_addr = pmm_alloc_frame(); + void *new_addr = pmm_prepare_new_pagetable(); return map_phys_to_virt_addr_hh(new_addr, virtual_address, flags, pml4_root); } diff --git a/src/kernel/framebuffer/framebuffer.c b/src/kernel/framebuffer/framebuffer.c index 597ca745..7ce7a369 100644 --- a/src/kernel/framebuffer/framebuffer.c +++ b/src/kernel/framebuffer/framebuffer.c @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -47,7 +48,6 @@ void map_framebuffer(struct framebuffer_info fbdata) { uint32_t pt = PT_ENTRY(_FRAMEBUFFER_MEM_START); #endif - if(p4_table[pml4] == 0x00l || p3_table_hh[pdpr] == 0x00l){ pretty_log(Verbose, "PANIC - PML4 or PDPR Empty - not supported for now\n"); asm("hlt"); @@ -59,10 +59,11 @@ void map_framebuffer(struct framebuffer_info fbdata) { bool newly_allocated = false; // Probably should be safer to rely on the direct map if possible? if(p2_table[pd] == 0x00){ - uint64_t *new_table = pmm_alloc_frame(); + uint64_t *new_table = pmm_prepare_new_pagetable(); p2_table[pd] = (uint64_t)new_table | (PRESENT_BIT | WRITE_BIT); - current_page_table = new_table; - clean_new_table((uint64_t *)new_table); + uint64_t *new_table_hhdm = hhdm_get_variable((uintptr_t)new_table); + current_page_table = new_table_hhdm; + clean_new_table((uint64_t *)new_table_hhdm); newly_allocated = true; } for(int j=0; j < VM_PAGES_PER_TABLE && fb_entries > 0; j++){ @@ -98,6 +99,7 @@ void set_fb_data(struct multiboot_tag_framebuffer *fbtag){ //FRAMEBUFFER_MEM = (void*)(uint64_t)fbtag->common.framebuffer_addr; #if USE_FRAMEBUFFER == 1 framebuffer_data.address = (void*)(uint64_t)_FRAMEBUFFER_MEM_START; + //framebuffer_data.address = hhdm_get_variable((uintptr_t) (fbtag->common.framebuffer_addr)); framebuffer_data.pitch = fbtag->common.framebuffer_pitch; framebuffer_data.bpp = fbtag->common.framebuffer_bpp; framebuffer_data.memory_size = fbtag->common.framebuffer_pitch * fbtag->common.framebuffer_height; @@ -229,11 +231,13 @@ void _fb_put_pixel(uint32_t x, uint32_t y, uint32_t color) { } void draw_logo(uint32_t start_x, uint32_t start_y) { + pretty_logf(Verbose, "Header_data: 0x%x", header_data); char *logo_data = header_data; char pixel[4]; for (uint32_t i = 0; i < height; i++) { for(uint32_t j = 0; j < width; j++) { HEADER_PIXEL(logo_data, pixel); + //pretty_logf(Verbose, "(%d)[%d]: plotting pixel: 0x%x", i, j, pixel); pixel[3] = 0; uint32_t num = (uint32_t) pixel[0] << 24 | (uint32_t)pixel[1] << 16 | diff --git a/src/kernel/main.c b/src/kernel/main.c index 6491be9a..1bfd3df7 100644 --- a/src/kernel/main.c +++ b/src/kernel/main.c @@ -70,6 +70,8 @@ uintptr_t higherHalfDirectMapBase; void _init_basic_system(unsigned long addr){ struct multiboot_tag* tag; uint32_t mbi_size = *(uint32_t *) (addr + _HIGHER_HALF_KERNEL_MEM_START); + uint64_t end_of_mapped_physical_memory = end_of_mapped_memory - _HIGHER_HALF_KERNEL_MEM_START; + pretty_logf(Verbose, " Addr: 0x%x - Size: 0x%x end_of_mapped_memory: 0x%x - physical: 0x%x", addr, mbi_size, end_of_mapped_memory, end_of_mapped_physical_memory); pretty_log(Info, "Initialize base system"); //These data structure are initialized during the boot process. tagmem = (struct multiboot_tag_basic_meminfo *)(multiboot_basic_meminfo + _HIGHER_HALF_KERNEL_MEM_START); @@ -156,8 +158,8 @@ void kernel_start(unsigned long addr, unsigned long magic){ higherHalfDirectMapBase = ((uint64_t) HIGHER_HALF_ADDRESS_OFFSET + VM_KERNEL_MEMORY_PADDING); pretty_logf(Verbose, "HigherHalf Initial entries: pml4: %d, pdpr: %d, pd: %d", PML4_ENTRY((uint64_t) higherHalfDirectMapBase), PDPR_ENTRY((uint64_t) higherHalfDirectMapBase), PD_ENTRY((uint64_t) higherHalfDirectMapBase)); + pretty_logf(Verbose, "Kernel End: 0x%x - Physical: 0x%x", (unsigned long)&_kernel_end, (unsigned long)&_kernel_physical_end); _init_basic_system(addr); - pretty_logf(Verbose, "Kernel End: 0x%x - Physical: %x", (unsigned long)&_kernel_end, (unsigned long)&_kernel_physical_end); // Reminder here: The first 8 bytes have a fixed structure in the multiboot info: // They are: 0-4: size of the boot information in bytes // 4-8: Reserved (0) diff --git a/src/kernel/mem/bitmap.c b/src/kernel/mem/bitmap.c index 143d43a9..55354e0c 100644 --- a/src/kernel/mem/bitmap.c +++ b/src/kernel/mem/bitmap.c @@ -1,16 +1,17 @@ #include +#include #include #include #include #include #include #ifndef _TEST_ -#include +#include #include #include #include #include -#include +#include #endif #ifdef _TEST_ @@ -31,9 +32,11 @@ uint32_t used_frames; uint64_t memory_map_phys_addr; -void _initialize_bitmap ( unsigned long end_of_reserved_area ) { +void _initialize_bitmap ( uint64_t end_of_reserved_area ) { + pretty_logf(Verbose, "\tend_of_reserved_area: 0x%x", end_of_reserved_area); uint64_t memory_size = (tagmem->mem_upper + 1024) * 1024; bitmap_size = memory_size / PAGE_SIZE_IN_BYTES + 1; + pretty_logf(Verbose, " bitmap_size: 0x%x", bitmap_size); used_frames = 0; number_of_entries = bitmap_size / 64 + 1; uint64_t memory_map_phys_addr; @@ -41,22 +44,23 @@ void _initialize_bitmap ( unsigned long end_of_reserved_area ) { memory_map = malloc(bitmap_size / 8 + 1); #else memory_map_phys_addr = _mmap_determine_bitmap_region(end_of_reserved_area, bitmap_size / 8 + 1); + //pretty_logf(Verbose, "memory_map_phys_addr: 0x%x", memory_map_phys_addr); + //uint64_t *test_var = hhdm_get_variable(memory_map_phys_addr); + //test_var[0] = 5; + //pretty_logf(Verbose, "0x%x: test_var: 0x%x", test_var, test_var[0]); //memory_map = memory_map_phys_addr; - uint64_t end_of_mapped_physical_memory = end_of_mapped_memory - _HIGHER_HALF_KERNEL_MEM_START; - if(memory_map_phys_addr > end_of_mapped_physical_memory) { - pretty_logf(Verbose, "The address 0x%x is above the initially mapped memory: 0x%x", memory_map_phys_addr, end_of_mapped_physical_memory); - //TODO: This need to be fixed map_phys_to_virt_addr can't be used here since it relies on the bitmap, and it is not initialized yet. - map_phys_to_virt_addr((void*)ALIGN_PHYSADDRESS(memory_map_phys_addr), (void*)(memory_map_phys_addr + _HIGHER_HALF_KERNEL_MEM_START), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); - } else { - pretty_logf(Verbose, "The address 0x%x is not above the initially mapped memory: 0x%x", memory_map_phys_addr, end_of_mapped_physical_memory); - } - memory_map = (uint64_t *) (memory_map_phys_addr + _HIGHER_HALF_KERNEL_MEM_START); + memory_map = (uint64_t *) hhdm_get_variable(memory_map_phys_addr); + pretty_logf(Verbose, "memory_map: 0x%x - memory_map_phys_addr: 0x%x", memory_map,memory_map_phys_addr); + //uint64_t end_of_mapped_physical_memory = end_of_mapped_memory - _HIGHER_HALF_KERNEL_MEM_START; + //memory_map = (uint64_t *) (memory_map_phys_addr + _HIGHER_HALF_KERNEL_MEM_START); #endif + pretty_logf(Verbose, "Number of entries: 0x%d", number_of_entries); for (uint32_t i=0; iaddr + current_entry->len < address + upper_limit) { + if(current_entry->type == _MMAP_AVAILABLE) { + //pretty_logf(Verbose, "Entry 0x%x is in an available space (with size: 0x%x", address, upper_limit ); + return true; + } + } + } + return false; +} + uintptr_t _mmap_determine_bitmap_region(uint64_t lower_limit, size_t bytes_needed){ //NOTE: lower_limit can be used to place the bitmap after the kernel, or after anything if need be. for (size_t i = 0; i < mmap_number_of_entries; i++){ diff --git a/src/kernel/mem/pmm.c b/src/kernel/mem/pmm.c index 5c8f0fca..a8eb35a9 100644 --- a/src/kernel/mem/pmm.c +++ b/src/kernel/mem/pmm.c @@ -1,3 +1,5 @@ +#include +#include #include #include #include @@ -20,8 +22,23 @@ extern size_t memory_size_in_bytes; spinlock_t memory_spinlock; -void pmm_setup(unsigned long addr, uint32_t size){ - _initialize_bitmap(addr + size); +bool pmm_initialized = false; +uint64_t anon_memory_loc; +uint64_t anon_physical_memory_loc; + +void pmm_setup(uint64_t addr, uint32_t size){ + // addr = address of multiboot structre + // size = size of the structure + pretty_logf(Verbose, "addr: 0x%x, size: 0x%x", addr,size); + anon_memory_loc = (uint64_t) (&_kernel_end + PAGE_SIZE_IN_BYTES); + anon_physical_memory_loc = (uint64_t) (&_kernel_physical_end + PAGE_SIZE_IN_BYTES) ; + + pretty_logf(Verbose, "anon_memory_loc: 0x%x, anon_physical_memory_loc: 0x%x", anon_memory_loc, anon_physical_memory_loc); + + hhdm_map_physical_memory(); + pretty_log(Verbose, "HHDM setup finished"); + + _initialize_bitmap(anon_physical_memory_loc + PAGE_SIZE_IN_BYTES); uint64_t bitmap_start_addr; size_t bitmap_size; _bitmap_get_region(&bitmap_start_addr, &bitmap_size, ADDRESS_TYPE_PHYSICAL); @@ -34,6 +51,7 @@ void pmm_setup(unsigned long addr, uint32_t size){ #endif //_map_pmm(); + pmm_initialized = true; } /** @@ -58,6 +76,37 @@ void *pmm_alloc_frame(){ return NULL; } + +void *pmm_prepare_new_pagetable() { + pretty_logf(Verbose, "pmm_initialized: %d", pmm_initialized ); + if ( !pmm_initialized) { + if( _mmap_is_address_in_available_space(anon_physical_memory_loc, PAGE_DIR_SIZE) ) { + // This space should be potentially safe + //pretty_log(Verbose, " Current_address is mapped and in the available memory area" ); + if(anon_physical_memory_loc >= 0x18c000) { + pretty_log(Verbose, "Overwriting the module"); + } + anon_memory_loc += PAGE_DIR_SIZE; + anon_physical_memory_loc += PAGE_DIR_SIZE; + return (void *) (anon_physical_memory_loc - PAGE_DIR_SIZE); + } else { + // mmm... what should i do now? + // i suppose this shouldn't happen + pretty_log(Fatal, " New location is not in available area, this most likely shouldn't happen"); + } + /*} /*else { + // This is the tricky part, i need to map new memory. still in the anon area + // I need to check that it is not any of the reserved memory locations (i.e. faramebuffer) + // If yes it should probably panic + + }*/ + // Get the first available address and check if is in mapped area? + //return NULL; + } + //pretty_log(Verbose, "The pmm is initialized, using pmm_alloc_frame"); + return (void *) pmm_alloc_frame(); +} + void *pmm_alloc_area(size_t size) { size_t requested_frames = get_number_of_pages_from_size(size); From 30af96077a32597c335563d6f91cfffe9b04a553 Mon Sep 17 00:00:00 2001 From: Ivan Gualandri Date: Sat, 20 Apr 2024 15:37:57 +0100 Subject: [PATCH 03/14] Add functions to check if an address is reserved during early init --- src/include/main.h | 3 +++ src/include/utils/utils.h | 1 + src/kernel/main.c | 38 ++++++++++++++++++++++++++++++++------ src/kernel/mem/bitmap.c | 12 +----------- src/kernel/mem/mmap.c | 8 ++++++++ src/kernel/mem/pmm.c | 20 +++++++++++--------- src/utils/utils.c | 26 ++++++++++++++++++++++++++ 7 files changed, 82 insertions(+), 26 deletions(-) diff --git a/src/include/main.h b/src/include/main.h index 53571cca..0934c4a3 100644 --- a/src/include/main.h +++ b/src/include/main.h @@ -2,6 +2,9 @@ #define __MAIN_H_ #include +#include + +extern struct multiboot_tag *tag_start; void _init_basic_system(unsigned long addr); void kernel_start(unsigned long addr, unsigned long magic); diff --git a/src/include/utils/utils.h b/src/include/utils/utils.h index 521d136f..bac7d24d 100644 --- a/src/include/utils/utils.h +++ b/src/include/utils/utils.h @@ -4,5 +4,6 @@ #include bool load_module_hh(struct multiboot_tag_module *loaded_module); +bool _is_address_in_multiboot(uint64_t address); #endif diff --git a/src/kernel/main.c b/src/kernel/main.c index 1bfd3df7..562f4c49 100644 --- a/src/kernel/main.c +++ b/src/kernel/main.c @@ -62,11 +62,38 @@ struct multiboot_tag_new_acpi *tagnew_acpi = NULL; struct multiboot_tag_mmap *tagmmap = NULL; struct multiboot_tag *tagacpi = NULL; struct multiboot_tag_module *loaded_module = NULL; +struct multiboot_tag *tag_start = NULL; uint64_t elf_module_start_hh = 0; uintptr_t higherHalfDirectMapBase; +const char *multiboot_names[] = { + "Multiboot End", + "Boot command line", + "Boot loader name", + "Modules", + "Basic Memory Information", + "Bios Boot Device", + "Memory Map", + "VBE Info", + "Framebuffer Info", + "EFI amd64 entry address tag of Multiboot2 header", + "APM Table", + " ", + " ", + " ", + "ACPI Old RSDP", + "ACPI New RSDP", + " ", + " ", + " ", + " ", + " ", + "Image load base physical address", + " " +}; + void _init_basic_system(unsigned long addr){ struct multiboot_tag* tag; uint32_t mbi_size = *(uint32_t *) (addr + _HIGHER_HALF_KERNEL_MEM_START); @@ -82,6 +109,7 @@ void _init_basic_system(unsigned long addr){ memory_size_in_bytes = (tagmem->mem_upper + 1024) * 1024; //Print mmap_info pretty_logf(Verbose, "Memory map Size: 0x%x, Entry size: 0x%x, EntryVersion: 0x%x", tagmmap->size, tagmmap->entry_size, tagmmap->entry_version); + tag_start = (struct multiboot_tag *) (addr + _HIGHER_HALF_KERNEL_MEM_START + 8); _mmap_parse(tagmmap); pmm_setup(addr, mbi_size); @@ -113,17 +141,18 @@ void _init_basic_system(unsigned long addr){ parse_SDT((uint64_t) descriptor, MULTIBOOT_TAG_TYPE_ACPI_NEW); validate_SDT((char *) descriptor, sizeof(RSDPDescriptor20)); } - + tag_start = (struct multiboot_tag *) (addr + _HIGHER_HALF_KERNEL_MEM_START + 8); + pretty_logf(Verbose, " Tag start: 0x%x", tag_start); for (tag=(struct multiboot_tag *) (addr + _HIGHER_HALF_KERNEL_MEM_START + 8); tag->type != MULTIBOOT_TAG_TYPE_END; tag = (struct multiboot_tag *) ((multiboot_uint8_t *) tag + ((tag->size + 7) & ~7))){ switch(tag->type){ case MULTIBOOT_TAG_TYPE_MODULE: loaded_module = (struct multiboot_tag_module *) tag; - pretty_logf(Verbose, " \t[Tag 0x%x] Size: 0x%x - mod_start: 0x%x : mod_end: 0x%x" , loaded_module->type, loaded_module->size, loaded_module->mod_start, loaded_module->mod_end); + pretty_logf(Verbose, " \t[Tag 0x%x] (%s): Size: 0x%x - mod_start: 0x%x : mod_end: 0x%x" , loaded_module->type, multiboot_names[loaded_module->type], loaded_module->size, loaded_module->mod_start, loaded_module->mod_end); break; default: - pretty_logf(Verbose, "\t[Tag 0x%x] Size: 0x%x", tag->type, tag->size); + pretty_logf(Verbose, "\t[Tag 0x%x] (%s): Size: 0x%x", tag->type, multiboot_names[tag->type], tag->size); break; } } @@ -181,10 +210,7 @@ void kernel_start(unsigned long addr, unsigned long magic){ _syscalls_init(); //_sc_putc('c', 0); //asm("int $0x80"); - //higherHalfDirectMapBase is where we will the Direct Mapping of physical memory will start. - higherHalfDirectMapBase = ((uint64_t) HIGHER_HALF_ADDRESS_OFFSET + VM_KERNEL_MEMORY_PADDING); _mmap_setup(); - hhdm_map_physical_memory(); kernel_settings.kernel_uptime = 0; kernel_settings.paging.page_root_address = p4_table; uint64_t p4_table_phys_address = (uint64_t) p4_table - _HIGHER_HALF_KERNEL_MEM_START; diff --git a/src/kernel/mem/bitmap.c b/src/kernel/mem/bitmap.c index 55354e0c..8c35c7e0 100644 --- a/src/kernel/mem/bitmap.c +++ b/src/kernel/mem/bitmap.c @@ -44,18 +44,8 @@ void _initialize_bitmap ( uint64_t end_of_reserved_area ) { memory_map = malloc(bitmap_size / 8 + 1); #else memory_map_phys_addr = _mmap_determine_bitmap_region(end_of_reserved_area, bitmap_size / 8 + 1); - //pretty_logf(Verbose, "memory_map_phys_addr: 0x%x", memory_map_phys_addr); - //uint64_t *test_var = hhdm_get_variable(memory_map_phys_addr); - //test_var[0] = 5; - //pretty_logf(Verbose, "0x%x: test_var: 0x%x", test_var, test_var[0]); - //memory_map = memory_map_phys_addr; memory_map = (uint64_t *) hhdm_get_variable(memory_map_phys_addr); - pretty_logf(Verbose, "memory_map: 0x%x - memory_map_phys_addr: 0x%x", memory_map,memory_map_phys_addr); - //uint64_t end_of_mapped_physical_memory = end_of_mapped_memory - _HIGHER_HALF_KERNEL_MEM_START; - //memory_map = (uint64_t *) (memory_map_phys_addr + _HIGHER_HALF_KERNEL_MEM_START); - #endif - pretty_logf(Verbose, "Number of entries: 0x%d", number_of_entries); for (uint32_t i=0; i #include #include +#include #include extern uint32_t used_frames; @@ -39,6 +40,7 @@ void _mmap_parse(struct multiboot_tag_mmap *mmap_root){ } void _mmap_setup(){ + //TODO: see issue: https://github.com/dreamos82/Dreamos64/issues/209 count_physical_reserved=0; if(used_frames > 0){ uint32_t counter = 0; @@ -61,6 +63,12 @@ bool _mmap_is_address_in_available_space(uint64_t address, uint64_t upper_limit) if(current_entry->addr + current_entry->len < address + upper_limit) { if(current_entry->type == _MMAP_AVAILABLE) { //pretty_logf(Verbose, "Entry 0x%x is in an available space (with size: 0x%x", address, upper_limit ); + // The address is in an available area, but we need to check if it is not overwriting something important. + uint64_t multiboot_address =_is_address_in_multiboot(address); + if(multiboot_address != 0) { + pretty_log(Verbose, " This address is reserved by multiboot"); + return false; + } return true; } } diff --git a/src/kernel/mem/pmm.c b/src/kernel/mem/pmm.c index a8eb35a9..b452f9e7 100644 --- a/src/kernel/mem/pmm.c +++ b/src/kernel/mem/pmm.c @@ -78,23 +78,25 @@ void *pmm_alloc_frame(){ void *pmm_prepare_new_pagetable() { - pretty_logf(Verbose, "pmm_initialized: %d", pmm_initialized ); if ( !pmm_initialized) { - if( _mmap_is_address_in_available_space(anon_physical_memory_loc, PAGE_DIR_SIZE) ) { + while((!_mmap_is_address_in_available_space(anon_physical_memory_loc, PAGE_DIR_SIZE)) && anon_physical_memory_loc < memory_size_in_bytes) { + pretty_logf(Verbose, " Current address: 0x%x not available trying next", anon_memory_loc); + anon_memory_loc += PAGE_DIR_SIZE; + anon_physical_memory_loc += PAGE_DIR_SIZE; + } + anon_memory_loc += PAGE_DIR_SIZE; + anon_physical_memory_loc += PAGE_DIR_SIZE; + return (void *) (anon_physical_memory_loc - PAGE_DIR_SIZE); + /*if( _mmap_is_address_in_available_space(anon_physical_memory_loc, PAGE_DIR_SIZE) ) { // This space should be potentially safe - //pretty_log(Verbose, " Current_address is mapped and in the available memory area" ); - if(anon_physical_memory_loc >= 0x18c000) { - pretty_log(Verbose, "Overwriting the module"); - } anon_memory_loc += PAGE_DIR_SIZE; anon_physical_memory_loc += PAGE_DIR_SIZE; return (void *) (anon_physical_memory_loc - PAGE_DIR_SIZE); } else { - // mmm... what should i do now? // i suppose this shouldn't happen pretty_log(Fatal, " New location is not in available area, this most likely shouldn't happen"); - } - /*} /*else { + }*/ + /*} else { // This is the tricky part, i need to map new memory. still in the anon area // I need to check that it is not any of the reserved memory locations (i.e. faramebuffer) // If yes it should probably panic diff --git a/src/utils/utils.c b/src/utils/utils.c index 960894a7..497d554a 100644 --- a/src/utils/utils.c +++ b/src/utils/utils.c @@ -1,6 +1,7 @@ #include #include #include +#include #include #include @@ -27,3 +28,28 @@ bool load_module_hh (struct multiboot_tag_module *loaded_module) { //pretty_logf(Verbose, " loaded_module_address: 0x%x", &loaded_module); return _is_elf; } + +/** + * T + * + * + * @param address address to check + * @return 0 if the address is not in multiboot or the first available address; + */ +bool _is_address_in_multiboot(uint64_t address) { + struct multiboot_tag *tag = tag_start; + for (tag = tag_start; + tag->type != MULTIBOOT_TAG_TYPE_END; + tag = (struct multiboot_tag *) ((multiboot_uint8_t *) tag + ((tag->size + 7) & ~7))){ + if (tag->type == MULTIBOOT_TAG_TYPE_MODULE) { + struct multiboot_tag_module *loaded_module = (struct multiboot_tag_module *) tag; + if (address >= loaded_module->mod_start && address <= loaded_module->mod_end) { + pretty_logf(Verbose, "This address: 0x%x is reserved by a multiboot module", address ); + return true; + } + } + //pretty_log(Verbose, " entry not corresponding" ); + } +// + return false; +} From 6762e0dded71c567624082bfadf319bf5da65a5f Mon Sep 17 00:00:00 2001 From: Ivan Gualandri Date: Mon, 22 Apr 2024 19:10:38 +0100 Subject: [PATCH 04/14] Move is address mapped in vmm_mapping.c --- .../kernel/arch/common/mem/vmm_mapping.h | 2 + src/kernel/arch/x86_64/mem/vmm_mapping.c | 45 ++++++++++++++++++ src/kernel/mem/vmm.c | 46 ------------------- 3 files changed, 47 insertions(+), 46 deletions(-) diff --git a/src/include/kernel/arch/common/mem/vmm_mapping.h b/src/include/kernel/arch/common/mem/vmm_mapping.h index 20ee8cb9..38e67e27 100644 --- a/src/include/kernel/arch/common/mem/vmm_mapping.h +++ b/src/include/kernel/arch/common/mem/vmm_mapping.h @@ -13,4 +13,6 @@ void *map_vaddress(void *address, size_t flags, uint64_t *pml4_root); int unmap_vaddress(void *address); int unmap_vaddress_hh(void *address, uint64_t *pml4_root); +uint8_t is_phyisical_address_mapped(uintptr_t physical_address, uintptr_t virtual_address); + #endif diff --git a/src/kernel/arch/x86_64/mem/vmm_mapping.c b/src/kernel/arch/x86_64/mem/vmm_mapping.c index dcdac280..18708fcb 100644 --- a/src/kernel/arch/x86_64/mem/vmm_mapping.c +++ b/src/kernel/arch/x86_64/mem/vmm_mapping.c @@ -172,6 +172,7 @@ void *map_phys_to_virt_addr(void* physical_address, void* address, size_t flags) void *map_vaddress(void *virtual_address, size_t flags, uint64_t *pml4_root){ pretty_logf(Verbose, "address: 0x%x", virtual_address); + //TODO need to check if i can just use the phys alloc here void *new_addr = pmm_prepare_new_pagetable(); return map_phys_to_virt_addr_hh(new_addr, virtual_address, flags, pml4_root); } @@ -265,3 +266,47 @@ int unmap_vaddress_hh(void *address, uint64_t *pml4_root) { void identity_map_phys_address(void *physical_address, size_t flags) { map_phys_to_virt_addr(physical_address, physical_address, flags); } + +uint8_t is_phyisical_address_mapped(uintptr_t physical_address, uintptr_t virtual_address) { + uint16_t pml4_e = PML4_ENTRY((uint64_t) virtual_address); + uint64_t *pml4_table = (uint64_t *) (SIGN_EXTENSION | ENTRIES_TO_ADDRESS(510l, 510l, 510l, 510l)); + if (!(pml4_table[pml4_e] & PRESENT_BIT)) { + return PHYS_ADDRESS_NOT_MAPPED; + } + + uint16_t pdpr_e = PDPR_ENTRY((uint64_t) virtual_address); + uint64_t *pdpr_table = (uint64_t *) (SIGN_EXTENSION | ENTRIES_TO_ADDRESS(510l, 510l, 510l, (uint64_t) pml4_e)); + if (!(pdpr_table[pdpr_e] & PRESENT_BIT)) { + return PHYS_ADDRESS_NOT_MAPPED; + } + + uint16_t pd_e = PD_ENTRY((uint64_t) virtual_address); + uint64_t *pd_table = (uint64_t*) (SIGN_EXTENSION | ENTRIES_TO_ADDRESS(510l, 510l, pml4_e, (uint64_t) pdpr_e)); + if (!(pd_table[pd_e] & PRESENT_BIT)) { + return PHYS_ADDRESS_NOT_MAPPED; + } +#if SMALL_PAGES == 0 + else { + if (ALIGN_PHYSADDRESS(pd_table[pd_e]) == ALIGN_PHYSADDRESS(physical_address)) { + return PHYS_ADDRESS_MAPPED; + } else { + return PHYS_ADDRESS_MISMATCH; + } + } +#endif + +#if SMALL_PAGES == 1 + uint16_t pt_e = PT_ENTRY((uint64_t) virtual_address); + uint64_t *pt_table = (uint64_t *) (SIGN_EXTENSION | ENTRIES_TO_ADDRESS(510l, (uint64_t) pml4_e, (uint64_t) pdpr_e, (uint64_t) pd_e)); + if ( !(pt_table[pt_e] & PRESENT_BIT )) { + return PHYS_ADDRESS_NOT_MAPPED; + } else { + if (ALIGN_PHYSADDRESS(pt_table[pt_e]) == ALIGN_PHYSADDRESS(physical_address)) { + return PHYS_ADDRESS_MAPPED; + } else { + return PHYS_ADDRESS_MISMATCH; + } + } +#endif + return 0; +} diff --git a/src/kernel/mem/vmm.c b/src/kernel/mem/vmm.c index dd7f4554..fa60fe9b 100644 --- a/src/kernel/mem/vmm.c +++ b/src/kernel/mem/vmm.c @@ -216,52 +216,6 @@ void vmm_free(void *address) { return; } - -// TODO: maybe thsi should be moved in the arch/mapping part and use the hhdm? -uint8_t is_phyisical_address_mapped(uintptr_t physical_address, uintptr_t virtual_address) { - uint16_t pml4_e = PML4_ENTRY((uint64_t) virtual_address); - uint64_t *pml4_table = (uint64_t *) (SIGN_EXTENSION | ENTRIES_TO_ADDRESS(510l, 510l, 510l, 510l)); - if (!(pml4_table[pml4_e] & PRESENT_BIT)) { - return PHYS_ADDRESS_NOT_MAPPED; - } - - uint16_t pdpr_e = PDPR_ENTRY((uint64_t) virtual_address); - uint64_t *pdpr_table = (uint64_t *) (SIGN_EXTENSION | ENTRIES_TO_ADDRESS(510l, 510l, 510l, (uint64_t) pml4_e)); - if (!(pdpr_table[pdpr_e] & PRESENT_BIT)) { - return PHYS_ADDRESS_NOT_MAPPED; - } - - uint16_t pd_e = PD_ENTRY((uint64_t) virtual_address); - uint64_t *pd_table = (uint64_t*) (SIGN_EXTENSION | ENTRIES_TO_ADDRESS(510l, 510l, pml4_e, (uint64_t) pdpr_e)); - if (!(pd_table[pd_e] & PRESENT_BIT)) { - return PHYS_ADDRESS_NOT_MAPPED; - } -#if SMALL_PAGES == 0 - else { - if (ALIGN_PHYSADDRESS(pd_table[pd_e]) == ALIGN_PHYSADDRESS(physical_address)) { - return PHYS_ADDRESS_MAPPED; - } else { - return PHYS_ADDRESS_MISMATCH; - } - } -#endif - -#if SMALL_PAGES == 1 - uint16_t pt_e = PT_ENTRY((uint64_t) virtual_address); - uint64_t *pt_table = (uint64_t *) (SIGN_EXTENSION | ENTRIES_TO_ADDRESS(510l, (uint64_t) pml4_e, (uint64_t) pdpr_e, (uint64_t) pd_e)); - if ( !(pt_table[pt_e] & PRESENT_BIT )) { - return PHYS_ADDRESS_NOT_MAPPED; - } else { - if (ALIGN_PHYSADDRESS(pt_table[pt_e]) == ALIGN_PHYSADDRESS(physical_address)) { - return PHYS_ADDRESS_MAPPED; - } else { - return PHYS_ADDRESS_MISMATCH; - } - } -#endif - return 0; -} - //TODO implement this function or remove it uint8_t check_virt_address_status(uint64_t virtual_address) { (void)virtual_address; From 0b25cf060a1bc36c2845bf3394bcc0eab3e8ab98 Mon Sep 17 00:00:00 2001 From: Ivan Gualandri Date: Tue, 23 Apr 2024 19:18:56 +0100 Subject: [PATCH 05/14] Fix expand heap function --- src/kernel/mem/kheap.c | 22 +++++++--------------- src/kernel/mem/vmm.c | 4 ++++ 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/src/kernel/mem/kheap.c b/src/kernel/mem/kheap.c index b3156512..b4d74329 100644 --- a/src/kernel/mem/kheap.c +++ b/src/kernel/mem/kheap.c @@ -31,7 +31,7 @@ void initialize_kheap(){ kernel_heap_current_pos = kernel_heap_start; kernel_heap_end = kernel_heap_start; //TODO: Should we use PAGE_SIZE for the initial heap size? - kernel_heap_current_pos->size = 0x1000; + kernel_heap_current_pos->size = PAGE_SIZE_IN_BYTES; kernel_heap_current_pos->is_free = true; kernel_heap_current_pos->next = NULL; kernel_heap_current_pos->prev = NULL; @@ -94,22 +94,16 @@ void *kmalloc(size_t size) { } void expand_heap(size_t required_size) { - // This function expand the heap in case more space is needed. - pretty_logf(Verbose, "called size: %d current_end: 0x%x - end_of_mapped_memory: 0x%x", required_size, kernel_heap_end, end_of_mapped_memory); // The first thing to do is compute how many page we need for this expansion size_t number_of_pages = get_number_of_pages_from_size(required_size); + // This function expand the heap in case more space is needed. + pretty_logf(Verbose, "called size: 0x%x number of pages: 0x%x current_end: 0x%x - end_of_mapped_memory: 0x%x", required_size, number_of_pages, kernel_heap_end, end_of_mapped_memory); // Then check where the heap ends uint64_t heap_end = compute_kheap_end(); - if ( heap_end > end_of_mapped_memory ) { - //end_of_mapped memory marks the end of the memory mapped by the kernel loader. - //if the new heap address is above that, we need to map a new one, otherwise we can just mark it as used. - //That part temporary, it needs to be reviewed when the memory mapping will be reviewed. - // This function no longer need to use end_of_mapped_memory, since now the heap reside somewhere else. - map_vaddress_range((uint64_t *) heap_end, VMM_FLAGS_WRITE_ENABLE | VMM_FLAGS_PRESENT, number_of_pages, NULL); - } - // We need to update the tail first - // It starts at the end of the current heap - KHeapMemoryNode *new_tail = (KHeapMemoryNode *) heap_end; + pretty_logf(Verbose, "heap_end: 0x%x - end_of_mapped_memory: 0x%x", heap_end, end_of_mapped_memory); + //To expand the heap we can just rely on the VMM, since this is the kernel HEAP we pass null as vmm_info data. + KHeapMemoryNode *new_tail = (KHeapMemoryNode *) vmm_alloc(required_size, VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE, NULL); + //TODO check that the list navigation is still working new_tail->next = NULL; new_tail->prev = kernel_heap_end; new_tail->size = KERNEL_PAGE_SIZE * number_of_pages; @@ -121,7 +115,6 @@ void expand_heap(size_t required_size) { if ( available_merges & MERGE_LEFT) { merge_memory_nodes(new_tail->prev, new_tail); } - } uint64_t compute_kheap_end() { @@ -192,7 +185,6 @@ uint8_t can_merge(KHeapMemoryNode *cur_node) { } } - return available_merges; } diff --git a/src/kernel/mem/vmm.c b/src/kernel/mem/vmm.c index fa60fe9b..99bd059b 100644 --- a/src/kernel/mem/vmm.c +++ b/src/kernel/mem/vmm.c @@ -128,6 +128,9 @@ void *vmm_alloc_at(uint64_t base_address, size_t size, size_t flags, VmmInfo *vm uintptr_t address_to_return = vmm_info->status.next_available_address; if (base_address != 0 && base_address > address_to_return) { + // I have specified a base_address, so i want an allocationat that given address + // This design is problematic, it will be reimplemented in the future + // For now i rely in the fact that the address pace on a 64bit architecture is very big. And i don't worry about holes, or overlapping. if ( !is_address_aligned(base_address, PAGE_SIZE_IN_BYTES) ) { pretty_logf(Fatal, " Error: base_address 0x%x is not aligned with: 0x%x", base_address, PAGE_SIZE_IN_BYTES); } @@ -196,6 +199,7 @@ bool is_address_stack(size_t flags) { } void vmm_free(void *address) { + //TODO: not finished yet pretty_logf(Verbose, "To Be implemented address provided is: 0x%x", address); VmmContainer *selected_container = vmm_container_root; From 9c353728d74c85713f5b237ef5783f4beb55437f Mon Sep 17 00:00:00 2001 From: Ivan Gualandri Date: Thu, 2 May 2024 23:48:48 +0100 Subject: [PATCH 06/14] Add variable names in vm.h --- src/include/kernel/arch/x86_64/vm.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/include/kernel/arch/x86_64/vm.h b/src/include/kernel/arch/x86_64/vm.h index faffe160..85d6033e 100644 --- a/src/include/kernel/arch/x86_64/vm.h +++ b/src/include/kernel/arch/x86_64/vm.h @@ -40,17 +40,17 @@ #define PAGE_ENTRY_FLAGS PRESENT_BIT | WRITE_BIT #endif -void page_fault_handler(uint64_t); +void page_fault_handler( uint64_t error_code ); void initialize_vm(); -void clean_new_table(uint64_t *); +void clean_new_table( uint64_t *table_to_clean ); -void invalidate_page_table(uint64_t *); +void invalidate_page_table( uint64_t *table_address ); -void load_cr3(void*); +void load_cr3( void* cr3_value ); -uint64_t ensure_address_in_higher_half( uint64_t ); +uint64_t ensure_address_in_higher_half( uint64_t address); -bool is_address_higher_half(uint64_t); +bool is_address_higher_half(uint64_t address); #endif From 4ff6e47b072ff7fca3526049fe905d6e7cd2beff Mon Sep 17 00:00:00 2001 From: Ivan Gualandri Date: Sun, 5 May 2024 19:37:08 +0100 Subject: [PATCH 07/14] Add new parameter to esnure_address_in_higher_half To support the VM_TYPE_MMIO area that has been reserved for all MMIO and acpi stuff. --- src/include/kernel/arch/x86_64/vm.h | 9 +++++-- src/include/kernel/mem/vmm.h | 7 +++--- src/kernel/arch/x86_64/cpu/acpi.c | 37 ++++++++++++++++++----------- src/kernel/arch/x86_64/cpu/ioapic.c | 10 ++++---- src/kernel/arch/x86_64/cpu/lapic.c | 2 +- src/kernel/arch/x86_64/cpu/madt.c | 4 ++-- src/kernel/arch/x86_64/system/vm.c | 27 +++++++++++++++------ src/kernel/main.c | 14 ++++++----- src/kernel/mem/vmm.c | 2 +- 9 files changed, 71 insertions(+), 41 deletions(-) diff --git a/src/include/kernel/arch/x86_64/vm.h b/src/include/kernel/arch/x86_64/vm.h index 85d6033e..182b79b8 100644 --- a/src/include/kernel/arch/x86_64/vm.h +++ b/src/include/kernel/arch/x86_64/vm.h @@ -16,7 +16,9 @@ #define ALIGN_PHYSADDRESS(address)(address & (~(PAGE_ALIGNMENT_MASK))) -#define HIGHER_HALF_ADDRESS_OFFSET 0xFFFF800000000000 +#define MMIO_HIGHER_HALF_ADDRESS_OFFSET 0xFFFF800000000000 +#define MMIO_RESERVED_SPACE_SIZE 0x280000000 +#define HIGHER_HALF_ADDRESS_OFFSET (MMIO_HIGHER_HALF_ADDRESS_OFFSET + MMIO_RESERVED_SPACE_SIZE) #define PRESENT_BIT 1 #define WRITE_BIT 0b10 @@ -40,6 +42,9 @@ #define PAGE_ENTRY_FLAGS PRESENT_BIT | WRITE_BIT #endif +#define VM_TYPE_MEMORY 0 +#define VM_TYPE_MMIO 1 + void page_fault_handler( uint64_t error_code ); void initialize_vm(); @@ -50,7 +55,7 @@ void invalidate_page_table( uint64_t *table_address ); void load_cr3( void* cr3_value ); -uint64_t ensure_address_in_higher_half( uint64_t address); +uint64_t ensure_address_in_higher_half( uint64_t address, uint8_t type); bool is_address_higher_half(uint64_t address); #endif diff --git a/src/include/kernel/mem/vmm.h b/src/include/kernel/mem/vmm.h index 9818aa44..a09baf42 100644 --- a/src/include/kernel/mem/vmm.h +++ b/src/include/kernel/mem/vmm.h @@ -20,7 +20,8 @@ #define VM_KERNEL_MEMORY_PADDING PAGE_SIZE_IN_BYTES -#define VMM_RESERVED_SPACE_SIZE 0x14480000000 +#define VMM_RESERVED_SPACE_SIZE 0x14200000000 +// #define VMM_RESERVED_SPACE_SIZE 0x14480000000 #define VPTR(x) (void*)((uint64_t)(x)) @@ -30,7 +31,7 @@ typedef enum { VMM_FLAGS_WRITE_ENABLE = (1 << 1), VMM_FLAGS_USER_LEVEL = (1 << 2), VMM_FLAGS_ADDRESS_ONLY = (1 << 7), - VMM_FLAGS_STACK = (1 << 8) + VMM_FLAGS_STACK = (1 << 8), } paging_flags_t; typedef enum { @@ -58,7 +59,7 @@ typedef struct VmmInfo { size_t start_of_vmm_space; /**< The starting addres ofthe vmm space */ - uintptr_t root_table_hhdm; /** the root page table loaded from the direct map */ + uintptr_t root_table_hhdm; /**< the root page table loaded from the direct map */ struct VmmStatus { size_t vmm_items_per_page; /**< Number of page items contained in one page */ diff --git a/src/kernel/arch/x86_64/cpu/acpi.c b/src/kernel/arch/x86_64/cpu/acpi.c index ee590c86..8065e30a 100644 --- a/src/kernel/arch/x86_64/cpu/acpi.c +++ b/src/kernel/arch/x86_64/cpu/acpi.c @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -30,39 +31,47 @@ void parse_SDT(uint64_t address, uint8_t type) { void parse_RSDT(RSDPDescriptor *descriptor){ pretty_logf(Verbose, "- descriptor Address: 0x%x", descriptor->RsdtAddress); - map_phys_to_virt_addr((void *) ALIGN_PHYSADDRESS(descriptor->RsdtAddress), (void *) ensure_address_in_higher_half(descriptor->RsdtAddress), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); _bitmap_set_bit_from_address(ALIGN_PHYSADDRESS(descriptor->RsdtAddress)); - rsdt_root = (RSDT *) ensure_address_in_higher_half((uint64_t) descriptor->RsdtAddress); + rsdt_root = (RSDT *) ensure_address_in_higher_half((uint64_t) descriptor->RsdtAddress, VM_TYPE_MMIO); + + //rsdt_root = (RSDT_*) vmm_alloc(KERNEL_PAGE_SIZE, VMM_FLAGS_MMIO | VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); + //rsdt_root = (RSDT *) vmm_alloc(header.Length, VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE | VMM_FLAGS_MMIO, NULL); //ensure_address_in_higher_half((uint64_t) descriptor->RsdtAddress); + map_phys_to_virt_addr((void *) ALIGN_PHYSADDRESS(descriptor->RsdtAddress), rsdt_root, VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); + //rsdt_root = (RSDT *) hhdm_get_variable((uintptr_t) descriptor->RsdtAddress); ACPISDTHeader header = rsdt_root->header; - pretty_logf(Verbose, "- RSDT_Signature: %.4s - Length: %d", header.Signature, header.Length); + pretty_logf(Verbose, "- RSDT_Signature: %.4s - Length: %d - HH addr: 0x%x", header.Signature, header.Length, rsdt_root); + //while(1) sdt_version = RSDT_V1; // Ok here we are, and we have mapped the "head of rsdt", it will stay most likely in one page, but there is no way // to know the length of the whole table before mapping its header. So now we are able to check if we need to map extra pages size_t required_extra_pages = (header.Length / KERNEL_PAGE_SIZE) + 1; if (required_extra_pages > 1) { + //uintptr_t rsdt_extra = (uintptr_t) vmm_alloc(header.Length, VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE | VMM_FLAGS_MMIO); //pretty_logf(Verbose, "- RSDT_PAGES_NEEDED: %d", required_extra_pages); for (size_t j = 1; j < required_extra_pages; j++) { uint64_t new_physical_address = descriptor->RsdtAddress + (j * KERNEL_PAGE_SIZE); - map_phys_to_virt_addr((void *) ALIGN_PHYSADDRESS(new_physical_address), (void *) ensure_address_in_higher_half(new_physical_address), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); + map_phys_to_virt_addr((void *) ALIGN_PHYSADDRESS(new_physical_address), (void *) ensure_address_in_higher_half(new_physical_address, VM_TYPE_MMIO), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); + // map_phys_to_virt_addr((void *) ALIGN_PHYSADDRESS(new_physical_address), (void *) rsdt_extra, VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); _bitmap_set_bit_from_address(ALIGN_PHYSADDRESS(new_physical_address)); + //rsdt_extra += KERNEL_PAGE_SIZE); } } rsdtTablesTotal = (header.Length - sizeof(ACPISDTHeader)) / sizeof(uint32_t); pretty_logf(Verbose, "- Total rsdt Tables: %d", rsdtTablesTotal); for(uint32_t i=0; i < rsdtTablesTotal; i++) { - map_phys_to_virt_addr((void *) ALIGN_PHYSADDRESS(rsdt_root->tables[i]), (void *) ensure_address_in_higher_half(rsdt_root->tables[i]), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); - ACPISDTHeader *tableHeader = (ACPISDTHeader *) ensure_address_in_higher_half(rsdt_root->tables[i]); - pretty_logf(Verbose, "\t%d): Signature: %.4s", i, tableHeader->Signature); + map_phys_to_virt_addr((void *) ALIGN_PHYSADDRESS(rsdt_root->tables[i]), (void *) ensure_address_in_higher_half(rsdt_root->tables[i], VM_TYPE_MMIO), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); + ACPISDTHeader *tableHeader = (ACPISDTHeader *) ensure_address_in_higher_half(rsdt_root->tables[i], VM_TYPE_MMIO); + pretty_logf(Verbose, "\t%d): Signature: %.4s Length: %d phys_addr: 0x%x", i, tableHeader->Signature, tableHeader->Length, rsdt_root->tables[i]); } } void parse_RSDTv2(RSDPDescriptor20 *descriptor){ pretty_logf(Verbose, "- Descriptor physical address: 0x%x", ALIGN_PHYSADDRESS(descriptor->XsdtAddress)); - map_phys_to_virt_addr((void *) ALIGN_PHYSADDRESS(descriptor->XsdtAddress), (void *) ensure_address_in_higher_half(descriptor->XsdtAddress), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); + map_phys_to_virt_addr((void *) ALIGN_PHYSADDRESS(descriptor->XsdtAddress), (void *) ensure_address_in_higher_half(descriptor->XsdtAddress, VM_TYPE_MMIO), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); _bitmap_set_bit_from_address(ALIGN_PHYSADDRESS(descriptor->XsdtAddress)); - xsdt_root = (XSDT *) ensure_address_in_higher_half((uint64_t) descriptor->XsdtAddress); + xsdt_root = (XSDT *) ensure_address_in_higher_half((uint64_t) descriptor->XsdtAddress, VM_TYPE_MMIO); pretty_logf(Verbose, "- XSDT_Length: 0x%x", descriptor->Length); ACPISDTHeader header = xsdt_root->header; pretty_logf(Verbose, "- XSDT_Signature: %.4s", header.Signature); @@ -73,7 +82,7 @@ void parse_RSDTv2(RSDPDescriptor20 *descriptor){ if (required_extra_pages > 1) { for (size_t j = 1; j < required_extra_pages; j++) { uint64_t new_physical_address = descriptor->XsdtAddress + (j * KERNEL_PAGE_SIZE); - map_phys_to_virt_addr((uint64_t *) new_physical_address, (uint64_t *) ensure_address_in_higher_half(new_physical_address), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); + map_phys_to_virt_addr((uint64_t *) new_physical_address, (uint64_t *) ensure_address_in_higher_half(new_physical_address, VM_TYPE_MMIO), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); _bitmap_set_bit_from_address(ALIGN_PHYSADDRESS(new_physical_address)); } } @@ -82,9 +91,9 @@ void parse_RSDTv2(RSDPDescriptor20 *descriptor){ pretty_logf(Verbose, "- Total xsdt Tables: %d", rsdtTablesTotal); for(uint32_t i=0; i < rsdtTablesTotal; i++) { - map_phys_to_virt_addr((uint64_t *) ALIGN_PHYSADDRESS(xsdt_root->tables[i]), (uint64_t *) ensure_address_in_higher_half(xsdt_root->tables[i]), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); + map_phys_to_virt_addr((uint64_t *) ALIGN_PHYSADDRESS(xsdt_root->tables[i]), (uint64_t *) ensure_address_in_higher_half(xsdt_root->tables[i], VM_TYPE_MMIO), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); _bitmap_set_bit_from_address(ALIGN_PHYSADDRESS(xsdt_root->tables[i])); - ACPISDTHeader *tableHeader = (ACPISDTHeader *) ensure_address_in_higher_half(xsdt_root->tables[i]); + ACPISDTHeader *tableHeader = (ACPISDTHeader *) ensure_address_in_higher_half(xsdt_root->tables[i], VM_TYPE_MMIO); pretty_logf(Verbose, "\t%d): Signature: %.4s", i, tableHeader->Signature); } @@ -99,10 +108,10 @@ ACPISDTHeader* get_SDT_item(char* table_name) { ACPISDTHeader *tableItem; switch(sdt_version) { case RSDT_V1: - tableItem = (ACPISDTHeader *) ensure_address_in_higher_half(rsdt_root->tables[i]); + tableItem = (ACPISDTHeader *) ensure_address_in_higher_half(rsdt_root->tables[i], VM_TYPE_MMIO); break; case RSDT_V2: - tableItem = (ACPISDTHeader *) ensure_address_in_higher_half(xsdt_root->tables[i]); + tableItem = (ACPISDTHeader *) ensure_address_in_higher_half(xsdt_root->tables[i], VM_TYPE_MMIO); break; default: pretty_log(Fatal, "That should not happen, PANIC"); diff --git a/src/kernel/arch/x86_64/cpu/ioapic.c b/src/kernel/arch/x86_64/cpu/ioapic.c index a1e54100..92abe482 100644 --- a/src/kernel/arch/x86_64/cpu/ioapic.c +++ b/src/kernel/arch/x86_64/cpu/ioapic.c @@ -18,15 +18,15 @@ void init_ioapic(MADT *madt_table){ io_apic_source_override_array_size = 0; if(item != NULL) { pretty_logf(Verbose, "IOAPIC Item address: 0x%x Type: 0x%x - length: 0x%x", item, item->type, item->length); - IO_APIC_Item *ioapic_item = (IO_APIC_Item *) ( ensure_address_in_higher_half((uint64_t) item + sizeof(MADT_Item))); - if (is_phyisical_address_mapped(ALIGN_PHYSADDRESS((uint64_t) item), ensure_address_in_higher_half((uint64_t) item)) == PHYS_ADDRESS_NOT_MAPPED) { - map_phys_to_virt_addr((void *) ALIGN_PHYSADDRESS((uint64_t) item), (void *)ensure_address_in_higher_half((uint64_t) item),VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); + IO_APIC_Item *ioapic_item = (IO_APIC_Item *) ( ensure_address_in_higher_half((uint64_t) item + sizeof(MADT_Item), VM_TYPE_MMIO)); + if (is_phyisical_address_mapped(ALIGN_PHYSADDRESS((uint64_t) item), ensure_address_in_higher_half((uint64_t) item, VM_TYPE_MMIO)) == PHYS_ADDRESS_NOT_MAPPED) { + map_phys_to_virt_addr((void *) ALIGN_PHYSADDRESS((uint64_t) item), (void *)ensure_address_in_higher_half((uint64_t) item, VM_TYPE_MMIO),VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); } pretty_logf(Verbose, "IOAPIC_ID: 0x%x, Address: 0x%x", ioapic_item->ioapic_id, ioapic_item->address ); pretty_logf(Verbose, "Global_System_Interrupt_Base: 0x%x", ioapic_item->global_system_interrupt_base); - pretty_logf(Verbose, "Higher Half Address: 0x%x", ensure_address_in_higher_half(ioapic_item->address)); + pretty_logf(Verbose, "Higher Half Address: 0x%x", ensure_address_in_higher_half(ioapic_item->address, VM_TYPE_MMIO)); io_apic_base_address = ioapic_item->address; - io_apic_hh_base_address = ensure_address_in_higher_half(ioapic_item->address); + io_apic_hh_base_address = ensure_address_in_higher_half(ioapic_item->address, VM_TYPE_MMIO); // This one should be mapped in the higher half ?? //map_phys_to_virt_addr(VPTR(io_apic_base_address), VPTR(io_apic_base_address), 0); map_phys_to_virt_addr(VPTR(io_apic_base_address), (void *) io_apic_hh_base_address, VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); diff --git a/src/kernel/arch/x86_64/cpu/lapic.c b/src/kernel/arch/x86_64/cpu/lapic.c index 609eb63c..f9f9f65f 100644 --- a/src/kernel/arch/x86_64/cpu/lapic.c +++ b/src/kernel/arch/x86_64/cpu/lapic.c @@ -28,7 +28,7 @@ void init_apic() { pretty_logf(Verbose, "APIC MSR Return value: 0x%X", msr_output); pretty_logf(Verbose, "APIC MSR Base Address: 0x%X", (msr_output&APIC_BASE_ADDRESS_MASK)); apic_base_address = (msr_output&APIC_BASE_ADDRESS_MASK); - apic_hh_base_address = ensure_address_in_higher_half(apic_base_address); + apic_hh_base_address = ensure_address_in_higher_half(apic_base_address, VM_TYPE_MMIO); pretty_logf(Verbose, "(%s): apic_hh_base_address: 0x%x", __FUNCTION__, apic_hh_base_address); if(apic_base_address == 0) { pretty_log(Error, "ERROR: cannot determine apic base address"); diff --git a/src/kernel/arch/x86_64/cpu/madt.c b/src/kernel/arch/x86_64/cpu/madt.c index ddf0da2f..e27b31c1 100644 --- a/src/kernel/arch/x86_64/cpu/madt.c +++ b/src/kernel/arch/x86_64/cpu/madt.c @@ -19,10 +19,10 @@ void map_madt(MADT* table){ } uint64_t madt_address = ((uint64_t) table + sizeof(MADT)); - map_phys_to_virt_addr((void *) ALIGN_PHYSADDRESS(madt_address), (void *) ensure_address_in_higher_half(madt_address), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); + map_phys_to_virt_addr((void *) ALIGN_PHYSADDRESS(madt_address), (void *) ensure_address_in_higher_half(madt_address, VM_TYPE_MMIO), VMM_FLAGS_PRESENT | VMM_FLAGS_WRITE_ENABLE); _bitmap_set_bit_from_address(ALIGN_PHYSADDRESS(madt_address)); pretty_logf(Verbose, "Sizeof MADT struct: 0x%x", sizeof(MADT)); - madt_base = (MADT_Item *) ensure_address_in_higher_half((uint64_t)madt_address); + madt_base = (MADT_Item *) ensure_address_in_higher_half((uint64_t)madt_address, VM_TYPE_MMIO); is_madt_mapped = true; } diff --git a/src/kernel/arch/x86_64/system/vm.c b/src/kernel/arch/x86_64/system/vm.c index 7bb21dc0..751d3154 100644 --- a/src/kernel/arch/x86_64/system/vm.c +++ b/src/kernel/arch/x86_64/system/vm.c @@ -1,8 +1,9 @@ -#include -#include #include -#include #include +#include +#include +#include +#include extern uint32_t FRAMEBUFFER_MEMORY_SIZE; @@ -63,13 +64,25 @@ void invalidate_page_table( uint64_t *table_address ) { * * * @param address the physical address we want to map + * @param type the accepted values are: #VM_TYPE_MEMORY #VM_TYPE_MMIO * @return virtuial address in the higher half */ -uint64_t ensure_address_in_higher_half( uint64_t address ) { - if ( address > HIGHER_HALF_ADDRESS_OFFSET ) { - return address; +uint64_t ensure_address_in_higher_half( uint64_t address, uint8_t type) { + uint64_t offset = 0; + + if ( type == VM_TYPE_MEMORY ) { + offset = HIGHER_HALF_ADDRESS_OFFSET; + } else if ( type == VM_TYPE_MMIO ){ + offset = MMIO_HIGHER_HALF_ADDRESS_OFFSET; + } else { + return 0; + } + + if ( address > offset + VM_KERNEL_MEMORY_PADDING) { + return address; + } else { + return address + offset + VM_KERNEL_MEMORY_PADDING; } - return address + HIGHER_HALF_ADDRESS_OFFSET; } bool is_address_higher_half(uint64_t address) { diff --git a/src/kernel/main.c b/src/kernel/main.c index 562f4c49..78330ebf 100644 --- a/src/kernel/main.c +++ b/src/kernel/main.c @@ -112,6 +112,12 @@ void _init_basic_system(unsigned long addr){ tag_start = (struct multiboot_tag *) (addr + _HIGHER_HALF_KERNEL_MEM_START + 8); _mmap_parse(tagmmap); pmm_setup(addr, mbi_size); + kernel_settings.kernel_uptime = 0; + kernel_settings.paging.page_root_address = p4_table; + uint64_t p4_table_phys_address = (uint64_t) p4_table - _HIGHER_HALF_KERNEL_MEM_START; + kernel_settings.paging.hhdm_page_root_address = (uint64_t*) hhdm_get_variable( (uintptr_t) p4_table_phys_address); + //pretty_logf(Verbose, "p4_table[510]: %x - ADDRESS: %x", p4_table[510], kernel_settings.paging.hhdm_page_root_address[510]); + vmm_init(VMM_LEVEL_SUPERVISOR, NULL); //Print framebuffer info pretty_logf(Verbose, "Framebuffer info: (type: 0x%x) Address: 0x%x", tagfb->common.framebuffer_type, tagfb->common.framebuffer_addr); @@ -186,6 +192,7 @@ void kernel_start(unsigned long addr, unsigned long magic){ } higherHalfDirectMapBase = ((uint64_t) HIGHER_HALF_ADDRESS_OFFSET + VM_KERNEL_MEMORY_PADDING); pretty_logf(Verbose, "HigherHalf Initial entries: pml4: %d, pdpr: %d, pd: %d", PML4_ENTRY((uint64_t) higherHalfDirectMapBase), PDPR_ENTRY((uint64_t) higherHalfDirectMapBase), PD_ENTRY((uint64_t) higherHalfDirectMapBase)); + pretty_logf(Verbose, "Using page size: 0x%x" , PAGE_SIZE_IN_BYTES); pretty_logf(Verbose, "Kernel End: 0x%x - Physical: 0x%x", (unsigned long)&_kernel_end, (unsigned long)&_kernel_physical_end); _init_basic_system(addr); @@ -211,12 +218,7 @@ void kernel_start(unsigned long addr, unsigned long magic){ //_sc_putc('c', 0); //asm("int $0x80"); _mmap_setup(); - kernel_settings.kernel_uptime = 0; - kernel_settings.paging.page_root_address = p4_table; - uint64_t p4_table_phys_address = (uint64_t) p4_table - _HIGHER_HALF_KERNEL_MEM_START; - kernel_settings.paging.hhdm_page_root_address = (uint64_t*) hhdm_get_variable( (uintptr_t) p4_table_phys_address); - //pretty_logf(Verbose, "p4_table[510]: %x - ADDRESS: %x", p4_table[510], kernel_settings.paging.hhdm_page_root_address[510]); - vmm_init(VMM_LEVEL_SUPERVISOR, NULL); + initialize_kheap(); kernel_settings.paging.page_generation = 0; diff --git a/src/kernel/mem/vmm.c b/src/kernel/mem/vmm.c index 99bd059b..1acd8ff4 100644 --- a/src/kernel/mem/vmm.c +++ b/src/kernel/mem/vmm.c @@ -150,7 +150,7 @@ void *vmm_alloc_at(uint64_t base_address, size_t size, size_t flags, VmmInfo *vm pretty_logf(Verbose, "Flags PRESENT(%d) - WRITE(%d) - USER(%d)", flags & VMM_FLAGS_PRESENT, flags & VMM_FLAGS_WRITE_ENABLE, flags & VMM_FLAGS_USER_LEVEL); - if (!is_address_only(flags) ) { + if ( !is_address_only(flags) ) { size_t required_pages = get_number_of_pages_from_size(size); size_t arch_flags = vm_parse_flags(flags); From 7238d35c2eebd718f9246953543262db1f38f68f Mon Sep 17 00:00:00 2001 From: Ivan Gualandri Date: Mon, 6 May 2024 11:46:09 +0100 Subject: [PATCH 08/14] Finish multiboot types name translation list --- src/kernel/main.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/kernel/main.c b/src/kernel/main.c index 78330ebf..1be3f86e 100644 --- a/src/kernel/main.c +++ b/src/kernel/main.c @@ -80,18 +80,18 @@ const char *multiboot_names[] = { "Framebuffer Info", "EFI amd64 entry address tag of Multiboot2 header", "APM Table", - " ", - " ", - " ", + "EFI 32-bit system table pointer", + "EFI 64-bit system table pointer", + "SMBIOS tables", "ACPI Old RSDP", "ACPI New RSDP", - " ", - " ", - " ", - " ", - " ", + "Networking information", + "EFI memory map", + "EFI boot services not terminated", + "EFI 32-bit image handle pointer", + "EFI 64-bit image handle pointer", "Image load base physical address", - " " + "Image load base physical address" }; void _init_basic_system(unsigned long addr){ From 4847d1a5089961a3a9c909a16268795a75fc58da Mon Sep 17 00:00:00 2001 From: Ivan Gualandri Date: Mon, 6 May 2024 19:17:12 +0100 Subject: [PATCH 09/14] Align the initialization of anon memory address --- src/kernel/framebuffer/framebuffer.c | 10 +++++----- src/kernel/mem/pmm.c | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/kernel/framebuffer/framebuffer.c b/src/kernel/framebuffer/framebuffer.c index 7ce7a369..87b233e4 100644 --- a/src/kernel/framebuffer/framebuffer.c +++ b/src/kernel/framebuffer/framebuffer.c @@ -36,7 +36,6 @@ uint32_t number_of_lines; void map_framebuffer(struct framebuffer_info fbdata) { uint32_t fb_entries = fbdata.memory_size / PAGE_SIZE_IN_BYTES; - uint32_t fb_entries_mod = fbdata.memory_size % PAGE_SIZE_IN_BYTES; uint64_t phys_address = (uint64_t) fbdata.phys_address; @@ -45,7 +44,7 @@ void map_framebuffer(struct framebuffer_info fbdata) { uint32_t pml4 = PML4_ENTRY(_FRAMEBUFFER_MEM_START); #if SMALL_PAGES == 1 uint32_t fb_pd_entries = fb_entries / VM_PAGES_PER_TABLE; - uint32_t pt = PT_ENTRY(_FRAMEBUFFER_MEM_START); + //uint32_t pt = PT_ENTRY(_FRAMEBUFFER_MEM_START); #endif if(p4_table[pml4] == 0x00l || p3_table_hh[pdpr] == 0x00l){ @@ -78,14 +77,15 @@ void map_framebuffer(struct framebuffer_info fbdata) { } #elif SMALL_PAGES == 0 + uint32_t fb_entries_mod = fbdata.memory_size % PAGE_SIZE_IN_BYTES; if(fb_entries_mod != 0){ fb_entries++; } for(int j=0; fb_entries > 0; j++){ fb_entries--; - if((p2_table[pd+j] < phys_address - || p2_table[pd+j] > (phys_address + fbdata.memory_size)) - || p2_table[pd+j] == 0x00l){ + if( (p2_table[pd+j] < phys_address + || p2_table[pd+j] > (phys_address + fbdata.memory_size) ) + || p2_table[pd+j] == 0x00l ) { p2_table[pd+j] = (phys_address + (j * PAGE_SIZE_IN_BYTES)) | PAGE_ENTRY_FLAGS; } } diff --git a/src/kernel/mem/pmm.c b/src/kernel/mem/pmm.c index b452f9e7..cfb5528d 100644 --- a/src/kernel/mem/pmm.c +++ b/src/kernel/mem/pmm.c @@ -30,8 +30,8 @@ void pmm_setup(uint64_t addr, uint32_t size){ // addr = address of multiboot structre // size = size of the structure pretty_logf(Verbose, "addr: 0x%x, size: 0x%x", addr,size); - anon_memory_loc = (uint64_t) (&_kernel_end + PAGE_SIZE_IN_BYTES); - anon_physical_memory_loc = (uint64_t) (&_kernel_physical_end + PAGE_SIZE_IN_BYTES) ; + anon_memory_loc = (uint64_t) align_up( (size_t) (&_kernel_end + PAGE_SIZE_IN_BYTES), PAGE_SIZE_IN_BYTES); + anon_physical_memory_loc = (uint64_t) align_up( (size_t) (&_kernel_physical_end + PAGE_SIZE_IN_BYTES), PAGE_SIZE_IN_BYTES ); pretty_logf(Verbose, "anon_memory_loc: 0x%x, anon_physical_memory_loc: 0x%x", anon_memory_loc, anon_physical_memory_loc); From 78b56f92f2e1527db4d85dfd711a1bf4512b5222 Mon Sep 17 00:00:00 2001 From: Ivan Gualandri Date: Mon, 6 May 2024 19:26:01 +0100 Subject: [PATCH 10/14] Fix tests --- build/Common.mk | 1 + tests/test_common.c | 11 +++++++++-- tests/test_kheap.c | 1 + tests/test_mem.c | 17 +++++++++-------- tests/test_vm.c | 7 ++++++- 5 files changed, 26 insertions(+), 11 deletions(-) diff --git a/build/Common.mk b/build/Common.mk index caa13d8b..b85e0021 100644 --- a/build/Common.mk +++ b/build/Common.mk @@ -40,6 +40,7 @@ TESTFLAGS := -std=gnu99 \ -I src/include/kernel/arch/x86_64 \ -I src/include/kernel/arch/common/mem \ -I src/include/sys \ + -I src/include/utils \ -DSMALL_PAGES=$(SMALL_PAGES) \ -D_TEST_=1 diff --git a/tests/test_common.c b/tests/test_common.c index 8d8c3286..a718147f 100644 --- a/tests/test_common.c +++ b/tests/test_common.c @@ -21,9 +21,9 @@ uint32_t _compute_kernel_entries(uint64_t end_of_kernel_area){ printf("kernel_mod_entries: 0x%X\n", kernel_mod_entries); if ( kernel_mod_entries != 0){ return kernel_entries + 2; - } + } return kernel_entries + 1; - + } void *map_vaddress(void *address, unsigned int flags){ @@ -60,6 +60,13 @@ void spinlock_release(spinlock_t *lock) { return; } +void hhdm_map_physical_memory() { +} + +bool _is_address_in_multiboot(uint64_t address) { + return false; +} + void spinlock_free(spinlock_t* spinlock) { return; } diff --git a/tests/test_kheap.c b/tests/test_kheap.c index a680fe0b..91a019db 100644 --- a/tests/test_kheap.c +++ b/tests/test_kheap.c @@ -14,6 +14,7 @@ struct multiboot_tag_basic_meminfo *tagmem; struct multiboot_tag_mmap *mmap_root; uint64_t _kernel_end = 0x1190AC; +uint64_t _kernel_physical_end = 0x1190AC; uint64_t kheap_size = 8 * PAGE_SIZE; int main(){ diff --git a/tests/test_mem.c b/tests/test_mem.c index 2e5cf317..99aecd01 100644 --- a/tests/test_mem.c +++ b/tests/test_mem.c @@ -1,5 +1,8 @@ #include +#include #include +#include +#include #include #include #include @@ -8,16 +11,8 @@ #include #include #include -#include -#include #include -struct multiboot_tag_basic_meminfo *tagmem; -struct multiboot_tag_mmap *mmap_root; -//unsigned long _kernel_physical_end __attribute__((section(".mySection"))) = 0x9ABCDEF0; -unsigned long _kernel_physical_end = 0x1190AC; -uint64_t _kernel_end = 0x1190AC; - extern uint64_t *memory_map; extern uint32_t number_of_entries; extern uint32_t bitmap_size; @@ -25,6 +20,12 @@ extern uint32_t used_frames; extern uint32_t mmap_number_of_entries; extern multiboot_memory_map_t *mmap_entries; +struct multiboot_tag_basic_meminfo *tagmem; +struct multiboot_tag_mmap *mmap_root; +//unsigned long _kernel_physical_end __attribute__((section(".mySection"))) = 0x9ABCDEF0; +uint64_t _kernel_end = 0x1190AC; +uint64_t _kernel_physical_end = 0x1190AC; + int main() { test_pmm_initialize(); test_pmm(); diff --git a/tests/test_vm.c b/tests/test_vm.c index d14787a0..a3eec7dc 100644 --- a/tests/test_vm.c +++ b/tests/test_vm.c @@ -20,9 +20,14 @@ typedef enum { int main() { test_is_address_higher_half(); + test_ensure_address_in_higher_half(); test_vm_parse_flags(); } - + +void test_ensure_address_in_higher_half() { + +} + void test_is_address_higher_half() { printf("Testing is address higher_half\n"); From cb50344cd3d5759695cd8a0d49b72d1c1c0224cd Mon Sep 17 00:00:00 2001 From: Ivan Gualandri Date: Mon, 6 May 2024 19:27:56 +0100 Subject: [PATCH 11/14] Add changes to kheap file --- src/kernel/mem/kheap.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/kernel/mem/kheap.c b/src/kernel/mem/kheap.c index b4d74329..aa002432 100644 --- a/src/kernel/mem/kheap.c +++ b/src/kernel/mem/kheap.c @@ -94,6 +94,7 @@ void *kmalloc(size_t size) { } void expand_heap(size_t required_size) { +#ifndef _TEST_ // The first thing to do is compute how many page we need for this expansion size_t number_of_pages = get_number_of_pages_from_size(required_size); // This function expand the heap in case more space is needed. @@ -115,6 +116,7 @@ void expand_heap(size_t required_size) { if ( available_merges & MERGE_LEFT) { merge_memory_nodes(new_tail->prev, new_tail); } +#endif } uint64_t compute_kheap_end() { From b24a807d9fac300c2eb13f831e8c996efa7ce929 Mon Sep 17 00:00:00 2001 From: Ivan G <59960116+dreamos82@users.noreply.github.com> Date: Mon, 6 May 2024 20:27:34 +0100 Subject: [PATCH 12/14] Update FUNDING.yml --- .github/FUNDING.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index cfa9900a..e10ac4a9 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -3,7 +3,7 @@ github: dreamos82 patreon: inuyasha82 open_collective: # Replace with a single Open Collective username -ko_fi: # Replace with a single Ko-fi username +ko_fi: dreamos82 tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry liberapay: # Replace with a single Liberapay username From a048ea1d607d6629ab4e362e4d01ac7530de64d1 Mon Sep 17 00:00:00 2001 From: Ivan Gualandri Date: Tue, 7 May 2024 00:02:44 +0100 Subject: [PATCH 13/14] minor updates to documentation --- docs/kernel/Initialization.md | 5 ++- docs/kernel/MemoryManagement.md | 54 +++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 3 deletions(-) create mode 100644 docs/kernel/MemoryManagement.md diff --git a/docs/kernel/Initialization.md b/docs/kernel/Initialization.md index adafcbb3..aa7bc6de 100644 --- a/docs/kernel/Initialization.md +++ b/docs/kernel/Initialization.md @@ -37,12 +37,11 @@ The sequence of component that are intialized (refer to `src/main.c`): * Load the PSF font from memory * Basic System Initialization: - Parse the multiboot information received from the bootloader. - - Parse the mmap and initialize the physical memory manager, marking the pmm areas as busy. + - Parse the mmap and initialize + - Initia physical memory manager, marking the pmm areas as busy and setup the hhdm. - Initialize the physical memory manager, marking the area in the mmap as already taken. - Validate and parse the SDT tables - - This section needs to be reviewed and check if all the steps are required * Finish mapping the Framebuffer (there is a potential bug here, need to chek what i do while mapping it) -* Set the Higher Half direct map * Initialize the kernel VMM * Initialize the kernel heap * Initialize the apic diff --git a/docs/kernel/MemoryManagement.md b/docs/kernel/MemoryManagement.md new file mode 100644 index 00000000..a40d7155 --- /dev/null +++ b/docs/kernel/MemoryManagement.md @@ -0,0 +1,54 @@ +# Memory Management + +The memory management will be rewritten in the future. + +Currently Dreamos64 avail of virtual memory, providing every process with its own address space. + +The memory is divided in lower half for _users_ level and the higher half for the _supervisor_ level. + +The kernel is loaded at -2G. + +Current `PAGE_SIZE` is 2M. + + +## Initialization workflow + +* `mmap_parse` -> Initialize global variable for reading mmap, and print its content. +* `pmm_setup` initialize the phyiscal memory manager layer + - It first calls `initialize_bitmap` + - Then + +## Physical memory + +The physical memory level, is manged using a simple bitmap algorithm. The memory allocated is returned in chunks of PAGE_SIZE. + +There are two levels on the phyiscal memory manager: + +* the bitmap level that contains the function to set/clear the bits in the bitmap and these functions should be used only by the pmm. +* the pmm level instead contains the function to allocate and free pages of physical memory. + +### Memory map + +The memory map is based on the one obtained from the multiboot, and during initialization. + +## Paging + +Paging is provided with fixed size paging (only one size of page is supported at time). The size can be configured between 4k and 2M pages, although lately only 2M pages have been tested. + +It avails of `x86_64`paging mechanism. + +### Higher Hald Direct Map (HHDM) + +An hhdm is provided to the kernel as convenience. + +## Virtual Memory Manager + +It sucks, but for now it does its job (partially!) + +Currently only the allocation of virtual memory is implemented. There is no `vmm_free` implemented yet. + +## KHeap + +This is the kernel heap, this is used by the kernel when it needs to allocate resources. + + From 44b212a751144d4c552855a02e1283375a98a5eb Mon Sep 17 00:00:00 2001 From: Ivan Gualandri Date: Tue, 7 May 2024 00:28:35 +0100 Subject: [PATCH 14/14] Update tests --- tests/test_vm.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tests/test_vm.c b/tests/test_vm.c index a3eec7dc..90576f6a 100644 --- a/tests/test_vm.c +++ b/tests/test_vm.c @@ -7,6 +7,7 @@ #include +void test_ensure_address_in_higher_half(); void test_is_address_higher_half(); void test_vm_parse_flags(); @@ -25,6 +26,20 @@ int main() { } void test_ensure_address_in_higher_half() { + printf("Testing ensure_addres_in_higher_half\n"); + uint64_t test_address = 0x100000; + printf("\t [test_vm](%s): Should return (0) for type not recognized and address: 0x%x\n", __FUNCTION__, test_address); + test_address = ensure_address_in_higher_half(test_address, 3); + assert(test_address == 0); + test_address = ensure_address_in_higher_half(0x100000, VM_TYPE_MMIO); + printf("\t [test_vm](%s): Should return (0x%x) for type MMIO and address=0x%x \n", __FUNCTION__, test_address, 0x100000); + assert(test_address == 0xffff800000300000); + test_address = ensure_address_in_higher_half(0x100000, VM_TYPE_MEMORY); + printf("\t [test_vm](%s): Should return (0x%lx) for type MEMORY and address=0x%x\n", __FUNCTION__, test_address, 0x100000); + assert(test_address == 0xffff800280300000); + test_address = ensure_address_in_higher_half(0xffff800280300000, VM_TYPE_MEMORY); + printf("\t [test_vm](%s): Should return (0x%lx) for type MEMORY and address=0x%lx\n", __FUNCTION__, test_address, 0xffff800280300000); + assert(test_address == 0xffff800280300000); } @@ -39,7 +54,7 @@ void test_is_address_higher_half() { printf("\t [test_vm](is_address_higher_half): Should return false (0) for 0xffff100000 - %d\n", is_hh); assert(is_hh == false); is_hh = is_address_higher_half(0xFFFF800000000000); - printf("\t [test_vm](is_address_higher_half): Should return false (0) 0xFFFF800000000000 - %d\n", is_hh); + printf("\t [test_vm](is_address_higher_half): Should return true (1) 0xFFFF800000000000 - %d\n", is_hh); assert(is_hh == true); }