Skip to content
This repository has been archived by the owner on Apr 16, 2024. It is now read-only.

Re-write the allocator to use the linked-list-allocator crate #2

Merged
merged 3 commits into from
May 31, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@
"editor.formatOnPaste": true,

"rust-analyzer.checkOnSave.allTargets": false,

"rust-analyzer.cargo.target": "riscv32imc-unknown-none-elf",
"rust-analyzer.checkOnSave.target": "riscv32imc-unknown-none-elf",

// "rust-analyzer.cargo.target": "xtensa-esp32-none-elf",
// "rust-analyzer.checkOnSave.target": "xtensa-esp32-none-elf",
}
18 changes: 13 additions & 5 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,20 @@ version = "0.1.0"
edition = "2021"

[dependencies]
log = "0.4.17"
bare-metal = "1.0.0"

[target.'cfg(target_arch = "riscv32")'.dependencies]
critical-section = "0.2.7"
linked_list_allocator = { version = "0.9.1", default-features = false, features = ["const_mut_refs"] }
riscv = "0.8.0"

[target.xtensa-esp32-none-elf.dependencies]
critical-section = { version = "0.2.7", features = ["custom-impl"]}
xtensa-lx = { version = "0.7", features = ["esp32"] }
xtensa-lx-rt = { version = "0.11", features = ["esp32"] }
linked_list_allocator = "0.9.1"
xtensa-lx = { version = "0.7.0", features = ["esp32"] }

[target.xtensa-esp32s2-none-elf.dependencies]
linked_list_allocator = { version = "0.9.1", default-features = false, features = ["const_mut_refs"] }
xtensa-lx = { version = "0.7.0", features = ["esp32s2"] }

[target.xtensa-esp32s3-none-elf.dependencies]
linked_list_allocator = "0.9.1"
xtensa-lx = { version = "0.7.0", features = ["esp32s3"] }
51 changes: 0 additions & 51 deletions src/critical_section_xtensa_singlecore.rs

This file was deleted.

218 changes: 72 additions & 146 deletions src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,164 +1,90 @@
#![no_std]
#![feature(alloc_error_handler)]
#![cfg_attr(target_arch = "xtensa", feature(asm_experimental_arch))]

use core::alloc::{GlobalAlloc, Layout};

use log::trace;

#[cfg(target_arch = "xtensa")]
mod critical_section_xtensa_singlecore;
use core::{
alloc::{GlobalAlloc, Layout},
cell::RefCell,
ptr::{self, NonNull},
};

use bare_metal::Mutex;
use linked_list_allocator::Heap;
#[cfg(target_arch = "riscv32")]
use riscv::interrupt;
#[cfg(target_arch = "xtensa")]
critical_section::custom_impl!(critical_section_xtensa_singlecore::XtensaSingleCoreCriticalSection);

/// A simple allocator just using the internal `malloc` implementation.
/// Please note: This currently doesn't honor a non-standard aligment and will
/// silently just use the default.
pub struct EspAllocator;

unsafe impl GlobalAlloc for EspAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// we don't care about the alignment here
malloc(layout.size() as u32) as *mut u8
}

unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
free(ptr as *mut u8);
}
}
use xtensa_lx::interrupt;

#[alloc_error_handler]
fn alloc_error(layout: Layout) -> ! {
panic!("Allocator error {:?}", layout);
pub struct EspHeap {
heap: Mutex<RefCell<Heap>>,
}

#[global_allocator]
static GLOBAL: EspAllocator = EspAllocator;

#[derive(Debug, Copy, Clone)]
struct Allocation {
address: *const u8,
size: usize,
free: bool,
}

static mut ALLOCATIONS: [Option<Allocation>; 128] = [None; 128];
static mut ALLOC_INDEX: isize = -1;

extern "C" {
static _heap_start: u8;
}

pub unsafe extern "C" fn malloc(size: u32) -> *const u8 {
trace!("malloc called {}", size);

let mut candidate_addr = &_heap_start as *const u8;

critical_section::with(|_critical_section| {
let aligned_size = size + if size % 8 != 0 { 8 - size % 8 } else { 0 };

// try to find a previously freed block
let mut reused = 0 as *const u8;
for allocation in ALLOCATIONS.iter_mut() {
memory_fence();
match allocation {
Some(ref mut allocation) => {
if allocation.free && aligned_size <= allocation.size as u32 {
allocation.free = false;
reused = allocation.address;
break;
}
}
None => {}
}
impl EspHeap {
/// Crate a new UNINITIALIZED heap allocator
///
/// You must initialize this heap using the
/// [`init`](struct.EspHeap.html#method.init) method before using the
/// allocator.
pub const fn empty() -> EspHeap {
EspHeap {
heap: Mutex::new(RefCell::new(Heap::empty())),
}

if reused.is_null() {
// otherwise allocate after the highest allocated block
if ALLOC_INDEX != -1 {
candidate_addr = ALLOCATIONS[ALLOC_INDEX as usize]
.unwrap()
.address
.offset(ALLOCATIONS[ALLOC_INDEX as usize].unwrap().size as isize);
}

ALLOC_INDEX += 1;

ALLOCATIONS[ALLOC_INDEX as usize] = Some(Allocation {
address: candidate_addr,
size: aligned_size as usize,
free: false,
});
trace!("new allocation idx = {}", ALLOC_INDEX);
} else {
trace!("new allocation at reused block");
candidate_addr = reused;
}

trace!("malloc at {:p}", candidate_addr);
});

return candidate_addr;
}

pub unsafe extern "C" fn free(ptr: *const u8) {
trace!("free called {:p}", ptr);

if ptr.is_null() {
return;
}

critical_section::with(|_critical_section| {
memory_fence();

let alloced_idx = ALLOCATIONS.iter().enumerate().find(|(_, allocation)| {
memory_fence();
let addr = allocation.unwrap().address;
allocation.is_some() && addr == ptr
});

if alloced_idx.is_some() {
let alloced_idx = alloced_idx.unwrap().0;
trace!("free idx {}", alloced_idx);

if alloced_idx as isize == ALLOC_INDEX {
ALLOCATIONS[alloced_idx] = None;
ALLOC_INDEX -= 1;
} else {
ALLOCATIONS[alloced_idx] = ALLOCATIONS[alloced_idx as usize]
.take()
.and_then(|v| Some(Allocation { free: true, ..v }));
}
} else {
panic!("freeing a memory area we don't know of. {:?}", ALLOCATIONS);
}
});
}

#[no_mangle]
pub unsafe extern "C" fn calloc(number: u32, size: u32) -> *const u8 {
trace!("calloc {} {}", number, size);

let ptr = malloc(number * size);
/// Initializes the heap
///
/// This function must be called BEFORE you run any code that makes use of
/// the allocator.
///
/// `start_addr` is the address where the heap will be located.
///
/// `size` is the size of the heap in bytes.
///
/// Note that:
///
/// - The heap grows "upwards", towards larger addresses. Thus `end_addr`
/// must be larger than `start_addr`
///
/// - The size of the heap is `(end_addr as usize) - (start_addr as usize)`.
/// The allocator won't use the byte at `end_addr`.
///
/// # Safety
///
/// Obey these or Bad Stuff will happen.
///
/// - This function must be called exactly ONCE.
/// - `size > 0`
pub unsafe fn init(&self, start_addr: usize, size: usize) {
interrupt::free(|cs| self.heap.borrow(*cs).borrow_mut().init(start_addr, size));
}

let mut zp = ptr as *mut u8;
for _ in 0..(number * size) {
zp.write_volatile(0x00);
zp = zp.offset(1);
/// Returns an estimate of the amount of bytes in use.
pub fn used(&self) -> usize {
interrupt::free(|cs| self.heap.borrow(*cs).borrow_mut().used())
}

ptr as *const u8
/// Returns an estimate of the amount of bytes available.
pub fn free(&self) -> usize {
interrupt::free(|cs| self.heap.borrow(*cs).borrow_mut().free())
}
}

#[cfg(target_arch = "riscv32")]
pub fn memory_fence() {
// no-op
}
unsafe impl GlobalAlloc for EspHeap {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
interrupt::free(|cs| {
self.heap
.borrow(*cs)
.borrow_mut()
.allocate_first_fit(layout)
.ok()
.map_or(ptr::null_mut(), |allocation| allocation.as_ptr())
})
}

#[cfg(target_arch = "xtensa")]
pub fn memory_fence() {
unsafe {
core::arch::asm!("memw");
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
interrupt::free(|cs| {
self.heap
.borrow(*cs)
.borrow_mut()
.deallocate(NonNull::new_unchecked(ptr), layout)
});
}
}