Skip to content

Commit

Permalink
Support freeing pages (#120)
Browse files Browse the repository at this point in the history
  • Loading branch information
nuta authored Dec 15, 2021
1 parent e101d81 commit 3fb1b3a
Show file tree
Hide file tree
Showing 6 changed files with 220 additions and 10 deletions.
41 changes: 41 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions libs/kerla_utils/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,7 @@ path = "lib.rs"
no_std = []

[dependencies]
log = "0.4"
spin = "0.9.2"
crossbeam = { version = "0.8.1", default-features = false }
bitvec = { version = "0.22", default-features = false }
71 changes: 71 additions & 0 deletions libs/kerla_utils/bitmap_allocator.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
use bitvec::prelude::*;

use crate::alignment::align_up;

const PAGE_SIZE: usize = 4096;

pub struct BitMapAllocator {
bitmap: spin::Mutex<&'static mut BitSlice<Lsb0, u8>>,
base: usize,
end: usize,
}

impl BitMapAllocator {
/// # Safety
///
/// The caller must ensure that the memory passed to this function is
/// aligned to a page boundary.
pub unsafe fn new(base: *mut u8, base_paddr: usize, len: usize) -> BitMapAllocator {
let num_pages = align_up(len, PAGE_SIZE) / PAGE_SIZE;
let bitmap_reserved_len = align_up(num_pages / 8, PAGE_SIZE);
let bitmap_actual_len = (num_pages / 8) - (bitmap_reserved_len / PAGE_SIZE);
let bitmap =
BitSlice::from_slice_mut(core::slice::from_raw_parts_mut(base, bitmap_actual_len))
.expect("you have too much memory");

debug_assert!(bitmap_reserved_len >= bitmap_actual_len);
bitmap.set_all(false);

BitMapAllocator {
bitmap: spin::Mutex::new(bitmap),
base: base_paddr + bitmap_reserved_len,
end: base_paddr + len - bitmap_reserved_len,
}
}

pub fn includes(&mut self, ptr: usize) -> bool {
self.base <= ptr && ptr < self.end
}

pub fn alloc_pages(&mut self, order: usize) -> Option<usize> {
let num_pages = 1 << order;
let mut bitmap = self.bitmap.lock();
let mut off = 0;
while let Some(first_zero) = bitmap[off..].first_zero() {
let start = off + first_zero;
let end = off + first_zero + num_pages;
if end > bitmap.len() {
break;
}

if bitmap[start..end].not_any() {
bitmap[start..end].set_all(true);
return Some(self.base + start * PAGE_SIZE);
}

off += first_zero + 1;
}

None
}

pub fn free_pages(&mut self, ptr: usize, order: usize) {
let num_pages = 1 << order;
let off = (ptr - self.base) / PAGE_SIZE;

let mut bitmap = self.bitmap.lock();

debug_assert!(bitmap[off..(off + num_pages)].all(), "double free");
bitmap[off..(off + num_pages)].set_all(false);
}
}
16 changes: 15 additions & 1 deletion libs/kerla_utils/bump_allocator.rs
Original file line number Diff line number Diff line change
@@ -1,18 +1,28 @@
const PAGE_SIZE: usize = 4096;

pub struct BumpAllocator {
base: usize,
current: usize,
end: usize,
}

impl BumpAllocator {
pub fn new(_base: *mut u8, base_paddr: usize, len: usize) -> BumpAllocator {
/// # Safety
///
/// The caller must ensure that the memory passed to this function is
/// aligned to a page boundary.
pub unsafe fn new(_base: *mut u8, base_paddr: usize, len: usize) -> BumpAllocator {
BumpAllocator {
base: base_paddr,
current: base_paddr,
end: base_paddr + len,
}
}

pub fn includes(&mut self, ptr: usize) -> bool {
self.base <= ptr && ptr < self.end
}

pub fn alloc_pages(&mut self, order: usize) -> Option<usize> {
let len = PAGE_SIZE * (1 << order);
if self.current + len >= self.end {
Expand All @@ -23,4 +33,8 @@ impl BumpAllocator {
self.current += len;
Some(ptr)
}

pub fn free_pages(&mut self, _ptr: usize, _order: usize) {
// Not supported.
}
}
4 changes: 4 additions & 0 deletions libs/kerla_utils/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,12 @@ extern crate std;
#[macro_use]
extern crate alloc;

#[macro_use]
extern crate log;

pub mod alignment;
pub mod bitmap;
pub mod bitmap_allocator;
pub mod buddy_allocator;
pub mod bump_allocator;
pub mod byte_size;
Expand Down
96 changes: 87 additions & 9 deletions runtime/page_allocator.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,19 @@
use core::ops::Deref;

use crate::{address::PAddr, arch::PAGE_SIZE, bootinfo::RamArea, spinlock::SpinLock};
use arrayvec::ArrayVec;
use bitflags::bitflags;
use kerla_utils::bump_allocator::BumpAllocator as Allocator;
use kerla_utils::alignment::is_aligned;
use kerla_utils::byte_size::ByteSize;
// TODO:

use kerla_utils::bitmap_allocator::BitMapAllocator as Allocator;

// TODO: Fix bugs in use the buddy allocator.
// use kerla_utils::buddy_allocator::BuddyAllocator as Allocator;

// Comment out the following line to use BumpAllocator.
// use kerla_utils::bump_allocator::BumpAllocator as Allocator;

static ZONES: SpinLock<ArrayVec<Allocator, 8>> = SpinLock::new(ArrayVec::new_const());

fn num_pages_to_order(num_pages: usize) -> usize {
Expand Down Expand Up @@ -37,25 +45,96 @@ bitflags! {
#[derive(Debug)]
pub struct PageAllocError;

pub struct OwnedPages {
paddr: PAddr,
num_pages: usize,
}

impl OwnedPages {
fn new(paddr: PAddr, num_pages: usize) -> OwnedPages {
OwnedPages { paddr, num_pages }
}
}

impl Deref for OwnedPages {
type Target = PAddr;

fn deref(&self) -> &Self::Target {
&self.paddr
}
}

impl Drop for OwnedPages {
fn drop(&mut self) {
free_pages(self.paddr, self.num_pages);
}
}

pub fn alloc_pages(num_pages: usize, flags: AllocPageFlags) -> Result<PAddr, PageAllocError> {
let order = num_pages_to_order(num_pages);
let mut zones = ZONES.lock();
for i in 0..zones.len() {
if let Some(paddr) = zones[i].alloc_pages(order).map(PAddr::new) {
for zone in zones.iter_mut() {
if let Some(paddr) = zone.alloc_pages(order).map(PAddr::new) {
if flags.contains(AllocPageFlags::ZEROED) {
unsafe {
paddr
.as_mut_ptr::<u8>()
.write_bytes(0, num_pages * PAGE_SIZE);
}
}

return Ok(paddr);
}
}

Err(PageAllocError)
}

pub fn alloc_pages_owned(
num_pages: usize,
flags: AllocPageFlags,
) -> Result<OwnedPages, PageAllocError> {
let order = num_pages_to_order(num_pages);
let mut zones = ZONES.lock();
for zone in zones.iter_mut() {
if let Some(paddr) = zone.alloc_pages(order).map(PAddr::new) {
if flags.contains(AllocPageFlags::ZEROED) {
unsafe {
paddr
.as_mut_ptr::<u8>()
.write_bytes(0, num_pages * PAGE_SIZE);
}
}

return Ok(OwnedPages::new(paddr, num_pages));
}
}

Err(PageAllocError)
}

/// The caller must ensure that the pages are not already freed. Keep holding
/// `OwnedPages` to free the pages in RAII basis.
pub fn free_pages(paddr: PAddr, num_pages: usize) {
if cfg!(debug_assertions) {
// Poison the memory.
unsafe {
paddr
.as_mut_ptr::<u8>()
.write_bytes(0xa5, num_pages * PAGE_SIZE);
}
}

let order = num_pages_to_order(num_pages);
let mut zones = ZONES.lock();
for zone in zones.iter_mut() {
if zone.includes(paddr.value()) {
zone.free_pages(paddr.value(), order);
return;
}
}
}

pub fn init(areas: &[RamArea]) {
let mut zones = ZONES.lock();
for area in areas {
Expand All @@ -65,10 +144,9 @@ pub fn init(areas: &[RamArea]) {
ByteSize::new(area.len)
);

zones.push(Allocator::new(
area.base.as_mut_ptr(),
area.base.value(),
area.len,
));
debug_assert!(is_aligned(area.base.value(), PAGE_SIZE));
let allocator =
unsafe { Allocator::new(area.base.as_mut_ptr(), area.base.value(), area.len) };
zones.push(allocator);
}
}

0 comments on commit 3fb1b3a

Please sign in to comment.