Skip to content

Commit

Permalink
add enum to define the different stack types
Browse files Browse the repository at this point in the history
- switch back to the default mm::allocate
  • Loading branch information
stlankes committed Apr 16, 2020
1 parent df91821 commit 544c6e5
Show file tree
Hide file tree
Showing 6 changed files with 127 additions and 110 deletions.
2 changes: 1 addition & 1 deletion src/arch/x86_64/kernel/apic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -485,7 +485,7 @@ pub fn init_x2apic() {
pub fn init_next_processor_variables(core_id: CoreId) {
// Allocate stack and PerCoreVariables structure for the CPU and pass the addresses.
// Keep the stack executable to possibly support dynamically generated code on the stack (see https://security.stackexchange.com/a/47825).
let stack = mm::allocate(KERNEL_STACK_SIZE, mm::AllocationType::NORMAL);
let stack = mm::allocate(KERNEL_STACK_SIZE, true);
let boxed_percore = Box::new(PerCoreVariables::new(core_id));
unsafe {
intrinsics::volatile_store(&mut (*BOOT_INFO).current_stack_address, stack as u64);
Expand Down
4 changes: 2 additions & 2 deletions src/arch/x86_64/kernel/gdt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ struct Gdt {
pub fn init() {
unsafe {
// Dynamically allocate memory for the GDT.
GDT = ::mm::allocate(mem::size_of::<Gdt>(), ::mm::AllocationType::NORMAL) as *mut Gdt;
GDT = ::mm::allocate(mem::size_of::<Gdt>(), false) as *mut Gdt;

// The NULL descriptor is always the first entry.
(*GDT).entries[GDT_NULL as usize] = Descriptor::NULL;
Expand Down Expand Up @@ -95,7 +95,7 @@ pub fn add_current_core() {
// Allocate all ISTs for this core.
// Every task later gets its own IST1, so the IST1 allocated here is only used by the Idle task.
for i in 0..IST_ENTRIES {
let ist = ::mm::allocate(KERNEL_STACK_SIZE, ::mm::AllocationType::EXECUTE_DISABLE);
let ist = ::mm::allocate(KERNEL_STACK_SIZE, true);
boxed_tss.ist[i] = (ist + KERNEL_STACK_SIZE - 0x10) as u64;
}

Expand Down
173 changes: 113 additions & 60 deletions src/arch/x86_64/kernel/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use arch::x86_64::kernel::idt;
use arch::x86_64::kernel::irq;
use arch::x86_64::kernel::percore::*;
use arch::x86_64::kernel::processor;
use arch::x86_64::mm::paging::{BasePageSize, PageSize};
use arch::x86_64::mm::paging::{BasePageSize, PageSize, PageTableEntryFlags};
use config::*;
use core::{mem, ptr};
use environment;
Expand Down Expand Up @@ -60,94 +60,150 @@ struct State {
rip: usize,
}

#[derive(Default)]
pub struct TaskStacks {
/// Whether this is a boot stack
is_boot_stack: bool,
stack_size: usize,
/// Stack of the task
pub struct BootStack {
/// stack for kernel tasks
stack: usize,
/// stack to handle interrupts
ist0: usize,
}

pub struct CommonStack {
/// start address of allocated virtual memory region
virt_addr: usize,
/// start address of allocated virtual memory region
phys_addr: usize,
/// total size of all stacks
total_size: usize,
}
pub enum TaskStacks {
Boot(BootStack),
Common(CommonStack),
}

impl TaskStacks {
pub fn new(size: usize) -> Self {
let stack_size = if size < KERNEL_STACK_SIZE {
pub fn new(size: usize) -> TaskStacks {
let user_stack_size = if size < KERNEL_STACK_SIZE {
KERNEL_STACK_SIZE
} else {
align_up!(size, BasePageSize::SIZE)
};
let total_size = user_stack_size + DEFAULT_STACK_SIZE + KERNEL_STACK_SIZE;
let virt_addr =
::arch::mm::virtualmem::allocate(total_size + 4 * BasePageSize::SIZE).unwrap();
let phys_addr = ::arch::mm::physicalmem::allocate(total_size).unwrap();

debug!("Create stack with a size of {} KB", stack_size >> 10);
debug!(
"Create stacks at {:#X} with a size of {} KB",
virt_addr,
total_size >> 10
);

// Allocate an executable stack to possibly support dynamically generated code on the stack (see https://security.stackexchange.com/a/47825).
let stack = ::mm::allocate(
stack_size,
::mm::AllocationType::EXECUTE_DISABLE | ::mm::AllocationType::PAGE_GUARD,
let mut flags = PageTableEntryFlags::empty();
flags.normal().writable().execute_disable();

// map IST0 into the address space
::arch::mm::paging::map::<BasePageSize>(
virt_addr + BasePageSize::SIZE,
phys_addr,
KERNEL_STACK_SIZE / BasePageSize::SIZE,
flags,
);
debug!("Allocating stack {:#X}", stack);
let ist0 = ::mm::allocate(
KERNEL_STACK_SIZE,
::mm::AllocationType::EXECUTE_DISABLE | ::mm::AllocationType::PAGE_GUARD,

// map kernel stack into the address space
::arch::mm::paging::map::<BasePageSize>(
virt_addr + KERNEL_STACK_SIZE + 2 * BasePageSize::SIZE,
phys_addr + KERNEL_STACK_SIZE,
DEFAULT_STACK_SIZE / BasePageSize::SIZE,
flags,
);
debug!("Allocating ist0 {:#X}", ist0);

Self {
is_boot_stack: false,
stack_size: stack_size,
stack: stack,
ist0: ist0,
}
// map user stack into the address space
::arch::mm::paging::map::<BasePageSize>(
virt_addr + KERNEL_STACK_SIZE + DEFAULT_STACK_SIZE + 3 * BasePageSize::SIZE,
phys_addr + KERNEL_STACK_SIZE + DEFAULT_STACK_SIZE,
user_stack_size / BasePageSize::SIZE,
flags,
);

TaskStacks::Common(CommonStack {
virt_addr: virt_addr,
phys_addr: phys_addr,
total_size: total_size,
})
}

pub fn from_boot_stacks() -> Self {
pub fn from_boot_stacks() -> TaskStacks {
let tss = unsafe { &(*PERCORE.tss.get()) };
let stack = tss.rsp[0] as usize + 0x10 - KERNEL_STACK_SIZE;
debug!("Using boot stack {:#X}", stack);
let ist0 = tss.ist[0] as usize + 0x10 - KERNEL_STACK_SIZE;
debug!("IST0 is located at {:#X}", ist0);

Self {
is_boot_stack: true,
stack_size: KERNEL_STACK_SIZE,
TaskStacks::Boot(BootStack {
stack: stack,
ist0: ist0,
}
})
}

#[inline]
pub fn get_stack_size(&self) -> usize {
self.stack_size
pub fn get_user_stack_size(&self) -> usize {
match self {
TaskStacks::Boot(_) => 0,
TaskStacks::Common(stacks) => {
stacks.total_size - DEFAULT_STACK_SIZE - KERNEL_STACK_SIZE
}
}
}

#[inline]
pub fn get_stack_address(&self) -> usize {
self.stack
match self {
TaskStacks::Boot(stacks) => stacks.stack,
TaskStacks::Common(stacks) => {
stacks.virt_addr + KERNEL_STACK_SIZE + 2 * BasePageSize::SIZE
}
}
}

#[inline]
pub fn get_ist0(&self) -> usize {
self.ist0
match self {
TaskStacks::Boot(stacks) => stacks.ist0,
TaskStacks::Common(stacks) => stacks.virt_addr + BasePageSize::SIZE,
}
}
}

impl Drop for TaskStacks {
fn drop(&mut self) {
if !self.is_boot_stack {
debug!(
"Deallocating stack {:#X} and ist0 {:#X}",
self.stack, self.ist0
);
::mm::deallocate(
self.stack,
self.stack_size,
::mm::AllocationType::EXECUTE_DISABLE | ::mm::AllocationType::PAGE_GUARD,
);
::mm::deallocate(
self.ist0,
KERNEL_STACK_SIZE,
::mm::AllocationType::EXECUTE_DISABLE | ::mm::AllocationType::PAGE_GUARD,
);
// we should never deallocate a boot stack
match self {
TaskStacks::Boot(_) => {}
TaskStacks::Common(stacks) => {
debug!(
"Deallocating stacks at {:#X} with a size of {} KB",
stacks.virt_addr,
stacks.total_size >> 10,
);

::arch::mm::paging::unmap::<BasePageSize>(
stacks.virt_addr,
stacks.total_size / BasePageSize::SIZE + 4,
);
::arch::mm::virtualmem::deallocate(
stacks.virt_addr,
stacks.total_size + 4 * BasePageSize::SIZE,
);
::arch::mm::physicalmem::deallocate(stacks.phys_addr, stacks.total_size);
}
}
}
}

impl Clone for TaskStacks {
fn clone(&self) -> TaskStacks {
match self {
TaskStacks::Boot(_) => TaskStacks::new(0),
TaskStacks::Common(stacks) => {
TaskStacks::new(stacks.total_size - DEFAULT_STACK_SIZE - KERNEL_STACK_SIZE)
}
}
}
}
Expand All @@ -168,7 +224,7 @@ impl TaskTLS {
// We allocate in BasePageSize granularity, so we don't have to manually impose an
// additional alignment for TLS variables.
let memory_size = align_up!(tls_allocation_size, BasePageSize::SIZE);
let ptr = ::mm::allocate(memory_size, ::mm::AllocationType::EXECUTE_DISABLE);
let ptr = ::mm::allocate(memory_size, true);

// The tls_pointer is the address to the end of the TLS area requested by the task.
let tls_pointer = ptr + align_up!(tls_size, 32);
Expand Down Expand Up @@ -224,11 +280,7 @@ impl Drop for TaskTLS {
"Deallocate TLS at 0x{:x} (size 0x{:x})",
self.address, self.size
);
mm::deallocate(
self.address,
self.size,
::mm::AllocationType::EXECUTE_DISABLE,
);
mm::deallocate(self.address, self.size);
}
}

Expand Down Expand Up @@ -270,10 +322,11 @@ impl TaskFrame for Task {

unsafe {
// Mark the entire stack with 0xCD.
ptr::write_bytes(self.stacks.stack as *mut u8, 0xCD, DEFAULT_STACK_SIZE);
//ptr::write_bytes(self.stacks.get_stack_address() as *mut u8, 0xCD, DEFAULT_STACK_SIZE);

// Set a marker for debugging at the very top.
let mut stack = (self.stacks.stack + DEFAULT_STACK_SIZE - 0x10) as *mut usize;
let mut stack =
(self.stacks.get_stack_address() + DEFAULT_STACK_SIZE - 0x10) as *mut usize;
*stack = 0xDEAD_BEEFusize;

// Put the leave_task function on the stack.
Expand Down
52 changes: 8 additions & 44 deletions src/mm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -236,66 +236,30 @@ pub fn print_information() {
virtual_address
}*/

bitflags! {
pub struct AllocationType: usize {
/// code execution shall be disabled for memory referenced
const EXECUTE_DISABLE = 1 << 0;
/// create a page guard before and after the memory reference
const PAGE_GUARD = 1 << 1;
}
}

impl AllocationType {
/// An empty set of flags for memory allocation.
pub const NORMAL: AllocationType = AllocationType { bits: 0 };
}

pub fn allocate(sz: usize, alloc: AllocationType) -> usize {
pub fn allocate(sz: usize, no_execution: bool) -> usize {
let size = align_up!(sz, BasePageSize::SIZE);

let physical_address = arch::mm::physicalmem::allocate(size).unwrap();
let virtual_address = if alloc.contains(AllocationType::PAGE_GUARD) {
arch::mm::virtualmem::allocate(size + 2 * BasePageSize::SIZE).unwrap()
} else {
arch::mm::virtualmem::allocate(size).unwrap()
};

let count = size / BasePageSize::SIZE;

let mut flags = PageTableEntryFlags::empty();
flags.normal().writable();
if alloc.contains(AllocationType::EXECUTE_DISABLE) {
if no_execution {
flags.execute_disable();
}

if alloc.contains(AllocationType::PAGE_GUARD) {
arch::mm::paging::map::<BasePageSize>(
virtual_address + BasePageSize::SIZE,
physical_address,
count,
flags,
);
let virtual_address = arch::mm::virtualmem::allocate(size).unwrap();

virtual_address + BasePageSize::SIZE
} else {
arch::mm::paging::map::<BasePageSize>(virtual_address, physical_address, count, flags);
arch::mm::paging::map::<BasePageSize>(virtual_address, physical_address, count, flags);

virtual_address
}
virtual_address
}

pub fn deallocate(virtual_address: usize, sz: usize, alloc: AllocationType) {
pub fn deallocate(virtual_address: usize, sz: usize) {
let size = align_up!(sz, BasePageSize::SIZE);

if let Some(entry) = arch::mm::paging::get_page_table_entry::<BasePageSize>(virtual_address) {
arch::mm::paging::unmap::<BasePageSize>(virtual_address, size / BasePageSize::SIZE);
if alloc.contains(AllocationType::PAGE_GUARD) {
arch::mm::virtualmem::deallocate(
virtual_address - BasePageSize::SIZE,
size + 2 * BasePageSize::SIZE,
);
} else {
arch::mm::virtualmem::deallocate(virtual_address, size);
}
arch::mm::virtualmem::deallocate(virtual_address, size);
arch::mm::physicalmem::deallocate(entry.address(), size);
} else {
panic!(
Expand Down
4 changes: 2 additions & 2 deletions src/scheduler/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -292,10 +292,10 @@ impl PerCoreScheduler {
#[cfg(target_arch = "x86_64")]
pub fn set_current_kernel_stack(&self) {
let current_task_borrowed = self.current_task.borrow();
let stack_size = current_task_borrowed.stacks.get_stack_size();
let tss = unsafe { &mut (*PERCORE.tss.get()) };

tss.rsp[0] = (current_task_borrowed.stacks.get_stack_address() + stack_size - 0x10) as u64;
tss.rsp[0] =
(current_task_borrowed.stacks.get_stack_address() + DEFAULT_STACK_SIZE - 0x10) as u64;
tss.ist[0] = (current_task_borrowed.stacks.get_ist0() + KERNEL_STACK_SIZE - 0x10) as u64;
}

Expand Down
2 changes: 1 addition & 1 deletion src/scheduler/task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ impl Task {
last_stack_pointer: 0,
last_fpu_state: arch::processor::FPUState::new(),
core_id: core_id,
stacks: TaskStacks::new(task.stacks.get_stack_size()),
stacks: task.stacks.clone(),
next: None,
prev: None,
tls: task.tls.clone(),
Expand Down

0 comments on commit 544c6e5

Please sign in to comment.