Skip to content

Commit 1195a6c

Browse files
committed
move memory discovery to the Architecture trait
Memory discovery (finding how much we have, where etc...) shouldn't be hardcoded in `mm/mod.rs`. Instead it should be in the Architecture trait so that each platform can do its thing (riscv/arm => device tree, x86 => multiboot info or e820). Originaly I just wanted a `memory_regions -> impl Iterator<Item=T>` method in the Architecture trait. But oh boy no... Currently a method that returns an impl Trait is not possible (rustc 1.59.0-nightly). In the future, this should be possible, see: rust-lang/rust#63066 Meanwhile, this is what I came up with. Since I can't return an impl Trait but I can pass one as parameter, I have a `for_all_memory_regions` method that creates the iterator and then calls the closure with that parameter. Code that uses that is a bit more ugly and less clear than if I could have returned an impl Trait, but it should do for now. This MUST be fixed when the above issue is resolved. Also the type signatures are a bit *complex*, this is due to my vicious fight with the rust borrow-checking-and-mighty-type-checking gods of Rust. Last but not least, a quote from the great: Tout est simple, tout est complexe, a vous de trouver la solution, une solution peut en cacher une autre -- Garnier
1 parent 434bfa4 commit 1195a6c

File tree

5 files changed

+119
-88
lines changed

5 files changed

+119
-88
lines changed

src/arch/mod.rs

+9-3
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,21 @@
1+
12
#[cfg(target_arch = "arm")]
23
mod arm32;
34
#[cfg(target_arch = "riscv64")]
45
mod riscv64;
56

7+
68
use cfg_if::cfg_if;
79

810
use crate::mm;
911

1012
#[cfg(target_arch = "riscv64")]
1113
pub type MemoryImpl = riscv64::sv39::PageTable;
1214

13-
pub fn new_arch() -> impl Architecture {
15+
pub fn new_arch(info: usize) -> impl Architecture {
1416
cfg_if! {
1517
if #[cfg(target_arch = "riscv64")] {
16-
riscv64::Riscv64::new()
18+
riscv64::Riscv64::new(info)
1719
} else if #[cfg(target_arch = "arm")] {
1820
arm32::Arm32::new()
1921
} else {
@@ -24,8 +26,12 @@ pub fn new_arch() -> impl Architecture {
2426

2527
pub trait Architecture {
2628
unsafe extern "C" fn _start() -> !;
27-
2829
fn init_interrupts(&mut self);
30+
31+
fn new(info: usize) -> Self;
32+
33+
fn for_all_memory_regions<F: FnMut(&mut dyn Iterator<Item=(usize, usize)>)>(&self, f: F);
34+
fn for_all_reserved_memory_regions<F: FnMut(&mut dyn Iterator<Item=(usize, usize)>)>(&self, f: F);
2935
}
3036

3137
pub trait ArchitectureMemory {

src/arch/riscv64/mod.rs

+33-5
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ use super::Architecture;
22
use crate::drivers::plic::plic_handler;
33
use core::arch::asm;
44

5+
56
pub mod sv39;
67

78
#[no_mangle]
@@ -20,13 +21,11 @@ static mut INTERRUPT_VECTOR: &[extern "C" fn()] = &[
2021
plic_handler,
2122
];
2223

23-
pub struct Riscv64 {}
24+
pub struct Riscv64 {
25+
device_tree: fdt::Fdt<'static>,
26+
}
2427

2528
impl Riscv64 {
26-
pub fn new() -> Self {
27-
Self {}
28-
}
29-
3029
fn set_sstatus_sie(&self) {
3130
unsafe {
3231
asm!("csrrs zero, sstatus, {}", in(reg)1 << 1);
@@ -58,13 +57,42 @@ impl Riscv64 {
5857
}
5958
}
6059

60+
6161
impl Architecture for Riscv64 {
6262
#[naked]
6363
#[no_mangle]
6464
unsafe extern "C" fn _start() -> ! {
6565
asm!("la sp, STACK_START", "call k_main", options(noreturn));
6666
}
6767

68+
fn new(info: usize) -> Self {
69+
let device_tree = unsafe { fdt::Fdt::from_ptr(info as *const u8).unwrap() };
70+
71+
Self { device_tree }
72+
}
73+
74+
fn for_all_memory_regions<F: FnMut(&mut dyn Iterator<Item=(usize, usize)>)>(&self, mut f: F) {
75+
let memory = self.device_tree.memory();
76+
let mut regions = memory
77+
.regions()
78+
.map(|region| {
79+
(region.starting_address as usize, region.size.unwrap_or(0))
80+
});
81+
82+
f(&mut regions);
83+
}
84+
85+
fn for_all_reserved_memory_regions<F: FnMut(&mut dyn Iterator<Item=(usize, usize)>)>(&self, mut f: F) {
86+
let reserved_memory = self.device_tree.find_node("/reserved-memory").unwrap();
87+
88+
let mut regions = reserved_memory
89+
.children()
90+
.flat_map(|child| child.reg().unwrap())
91+
.map(|region| (region.starting_address as usize, region.size.unwrap_or(0)));
92+
93+
f(&mut regions);
94+
}
95+
6896
fn init_interrupts(&mut self) {
6997
self.set_sstatus_sie();
7098
self.set_sie_ssie();

src/main.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
#![feature(fn_align)]
44
#![feature(naked_functions)]
55
#![feature(custom_test_frameworks)]
6+
#![feature(associated_type_defaults)]
7+
#![feature(type_alias_impl_trait)]
68
#![test_runner(crate::kernel_tests::runner)]
79
#![reexport_test_harness_main = "ktests_launch"]
810

@@ -27,7 +29,7 @@ extern "C" fn k_main(_core_id: usize, device_tree_ptr: usize) -> ! {
2729
#[cfg(test)]
2830
ktests_launch();
2931

30-
let mut arch = arch::new_arch();
32+
let mut arch = arch::new_arch(device_tree_ptr);
3133

3234
kprintln!("GoOSe is booting");
3335

@@ -44,9 +46,7 @@ extern "C" fn k_main(_core_id: usize, device_tree_ptr: usize) -> ! {
4446
}
4547
plic.set_threshold(0);
4648

47-
let device_tree = unsafe { fdt::Fdt::from_ptr(device_tree_ptr as *const u8).unwrap() };
48-
49-
let mut memory = mm::MemoryManager::<arch::MemoryImpl>::new(&device_tree);
49+
let mut memory = mm::MemoryManager::<arch::MemoryImpl>::new(&arch);
5050
memory.map_address_space();
5151

5252
kprintln!("Virtual memory enabled!");

src/mm/mod.rs

+17-14
Original file line numberDiff line numberDiff line change
@@ -33,16 +33,19 @@ pub fn is_kernel_page(base: usize) -> bool {
3333
}
3434

3535
// TODO: shall this be moved to arch/riscv (in the MemoryImpl) ?
36-
pub fn is_reserved_page(base: usize, device_tree: &fdt::Fdt) -> bool {
37-
let reserved_memory = device_tree.find_node("/reserved-memory").unwrap();
38-
let mut reserved_pages = reserved_memory
39-
.children()
40-
.flat_map(|child| child.reg().unwrap())
41-
.map(|region| (region.starting_address as usize, region.size.unwrap_or(0)));
42-
43-
reserved_pages.any(|(region_start, region_size)| {
44-
base >= region_start && base <= (region_start + region_size)
45-
})
36+
pub fn is_reserved_page(base: usize, arch: &impl arch::Architecture) -> bool {
37+
let mut is_res = false;
38+
39+
40+
arch.for_all_reserved_memory_regions(|regions| {
41+
is_res = regions
42+
.map(|(start, size)| (start, size)) // this is a weird hack to fix a type error.
43+
.any(|(region_start, region_size)| {
44+
base >= region_start && base <= (region_start + region_size)
45+
})
46+
});
47+
48+
return is_res;
4649
}
4750

4851
pub struct MemoryManager<'alloc, T: arch::ArchitectureMemory> {
@@ -51,12 +54,12 @@ pub struct MemoryManager<'alloc, T: arch::ArchitectureMemory> {
5154
}
5255

5356
impl<'alloc, T: arch::ArchitectureMemory> MemoryManager<'alloc, T> {
54-
pub fn new(device_tree: &fdt::Fdt) -> Self {
57+
pub fn new(arch: &impl arch::Architecture) -> Self {
5558
let mut page_manager =
56-
page_manager::PageManager::from_device_tree(&device_tree, T::get_page_size());
57-
let arch = T::new(&mut page_manager);
59+
page_manager::PageManager::from_arch_info(arch, T::get_page_size());
60+
let arch_mem = T::new(&mut page_manager);
5861

59-
Self { page_manager, arch }
62+
Self { page_manager, arch: arch_mem }
6063
}
6164

6265
fn map(&mut self, to: usize, from: usize, perms: Permissions) {

src/mm/page_manager.rs

+56-62
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ use super::page_alloc::{AllocatorError, PageAllocator};
22
use crate::kprintln;
33
use crate::mm;
44
use core::mem;
5+
use crate::Architecture;
56

67
#[derive(Debug, PartialEq)]
78
pub enum PageKind {
@@ -55,18 +56,19 @@ pub struct PageManager<'a> {
5556

5657
impl<'a> PageManager<'a> {
5758
fn count_pages(
58-
regions: impl Iterator<Item = fdt::standard_nodes::MemoryRegion>,
59+
arch: &impl Architecture,
5960
page_size: usize,
6061
) -> usize {
61-
regions
62-
.map(|region| {
63-
(region.starting_address, unsafe {
64-
region.starting_address.add(region.size.unwrap_or(0))
65-
})
66-
})
67-
.map(|(start, end)| (start as usize, end as usize))
68-
.flat_map(|(start, end)| (start..end).step_by(page_size))
69-
.count()
62+
let mut count = 0;
63+
64+
arch.for_all_memory_regions(|regions| {
65+
count = regions
66+
.map(|(start, end)| (start as usize, end as usize))
67+
.flat_map(|(start, end)| (start..end).step_by(page_size))
68+
.count();
69+
});
70+
71+
count
7072
}
7173

7274
fn align_up(addr: usize, alignment: usize) -> usize {
@@ -75,12 +77,12 @@ impl<'a> PageManager<'a> {
7577

7678
fn phys_addr_to_physical_page(
7779
phys_addr: usize,
78-
device_tree: &fdt::Fdt,
80+
arch: &impl Architecture,
7981
page_size: usize,
8082
) -> PhysicalPage {
8183
let kind = if mm::is_kernel_page(phys_addr) {
8284
PageKind::Kernel
83-
} else if mm::is_reserved_page(phys_addr, device_tree) {
85+
} else if mm::is_reserved_page(phys_addr, arch) {
8486
PageKind::Reserved
8587
} else {
8688
PageKind::Unused
@@ -96,44 +98,42 @@ impl<'a> PageManager<'a> {
9698
/// Look for `pages_needed` contiguous unused pages, beware of pages that are in use by the
9799
/// kernel.
98100
fn find_contiguous_unused_pages(
99-
device_tree: &fdt::Fdt,
101+
arch: &impl Architecture,
100102
pages_needed: usize,
101103
page_size: usize,
102104
) -> Option<usize> {
103-
let memory = device_tree.memory();
104-
105-
let physical_pages = memory
106-
.regions()
107-
.map(|region| {
108-
(region.starting_address, unsafe {
109-
region.starting_address.add(region.size.unwrap_or(0))
110-
})
111-
})
112-
.map(|(start, end)| (start as usize, end as usize))
113-
.flat_map(|(start, end)| (start..end).step_by(page_size))
114-
.map(|base| Self::phys_addr_to_physical_page(base, device_tree, page_size));
115-
116-
let mut first_page_addr: usize = 0;
117-
let mut consecutive_pages: usize = 0;
105+
let mut found = None;
118106

119-
for page in physical_pages {
120-
if consecutive_pages == 0 {
121-
first_page_addr = page.base;
122-
}
107+
arch.for_all_memory_regions(|regions| {
108+
let physical_pages = regions
109+
.flat_map(|(start, end)| (start..end).step_by(page_size))
110+
.map(|base| Self::phys_addr_to_physical_page(base, arch, page_size));
123111

124-
if page.is_used() {
125-
consecutive_pages = 0;
126-
continue;
127-
}
112+
let mut first_page_addr: usize = 0;
113+
let mut consecutive_pages: usize = 0;
128114

129-
consecutive_pages += 1;
115+
for page in physical_pages {
116+
if consecutive_pages == 0 {
117+
first_page_addr = page.base;
118+
}
119+
120+
if page.is_used() {
121+
consecutive_pages = 0;
122+
continue;
123+
}
130124

131-
if consecutive_pages == pages_needed {
132-
return Some(first_page_addr);
125+
consecutive_pages += 1;
126+
127+
if consecutive_pages == pages_needed {
128+
found = Some(first_page_addr);
129+
return
130+
}
133131
}
134-
}
135132

136-
None
133+
return;
134+
});
135+
136+
found
137137
}
138138

139139
/// TLDR: Initialize a [`PageAllocator`] from the device tree.
@@ -143,42 +143,36 @@ impl<'a> PageManager<'a> {
143143
/// - Second (in [`Self::find_contiguous_unused_pages`]), look through our pages for a contiguous
144144
/// space large enough to hold all our metadata.
145145
/// - Lastly store our metadata there, and mark pages as unused or kernel.
146-
pub fn from_device_tree(device_tree: &fdt::Fdt, page_size: usize) -> Self {
147-
let memory_node = device_tree.memory();
148-
149-
let page_count = Self::count_pages(memory_node.regions(), page_size);
146+
pub fn from_arch_info(arch: &impl Architecture, page_size: usize) -> Self {
147+
let page_count = Self::count_pages(arch, page_size);
150148
let metadata_size = page_count * mem::size_of::<PhysicalPage>();
151149
let pages_needed = Self::align_up(metadata_size, page_size) / page_size;
152150
kprintln!("total of {:?} pages", page_count);
153151
kprintln!("metadata_size: {:?}", metadata_size);
154152
kprintln!("pages_needed: {:?}", pages_needed);
155153

156154
let metadata_addr =
157-
Self::find_contiguous_unused_pages(device_tree, pages_needed, page_size).unwrap();
155+
Self::find_contiguous_unused_pages(arch, pages_needed, page_size).unwrap();
158156
kprintln!("metadata_addr: {:X}", metadata_addr);
159157

160158
let metadata: &mut [PhysicalPage] =
161159
unsafe { core::slice::from_raw_parts_mut(metadata_addr as *mut _, page_count) };
162160

163-
let physical_pages = memory_node
164-
.regions()
165-
.map(|region| {
166-
(region.starting_address, unsafe {
167-
region.starting_address.add(region.size.unwrap_or(0))
168-
})
169-
})
170-
.map(|(start, end)| (start as usize, end as usize))
171-
.flat_map(|(start, end)| (start..end).step_by(page_size))
172-
.map(|base| Self::phys_addr_to_physical_page(base, device_tree, page_size));
173-
174-
for (i, page) in physical_pages.enumerate() {
175-
metadata[i] = page;
176-
}
161+
arch.for_all_memory_regions(|regions| {
162+
let physical_pages = regions
163+
.flat_map(|(start, end)| (start..end).step_by(page_size))
164+
.map(|base| Self::phys_addr_to_physical_page(base, arch, page_size));
177165

178-
return Self {
166+
for (i, page) in physical_pages.enumerate() {
167+
metadata[i] = page;
168+
}
169+
});
170+
171+
172+
Self {
179173
metadata,
180174
page_size,
181-
};
175+
}
182176
}
183177

184178
pub fn page_size(&self) -> usize {

0 commit comments

Comments
 (0)