Skip to content

Commit

Permalink
Kernel: TLS allocation
Browse files Browse the repository at this point in the history
Allocating a 0x200 Thread Local Storage region for every thread,
and storing a pointer to it in the ThreadStruct.
  • Loading branch information
Orycterope committed Jun 17, 2019
1 parent 0f6c835 commit 6c489ef
Show file tree
Hide file tree
Showing 6 changed files with 278 additions and 0 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

31 changes: 31 additions & 0 deletions kernel/src/process.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,11 @@ use failure::Backtrace;
use crate::frame_allocator::PhysicalMemRegion;
use crate::sync::RwLock;

pub mod thread_local_storage;
mod capabilities;
pub use self::capabilities::ProcessCapabilities;
use crate::paging::{InactiveHierarchy, InactiveHierarchyTrait};
use self::thread_local_storage::TLSManager;

/// The struct representing a process. There's one for every process.
///
Expand Down Expand Up @@ -53,6 +55,9 @@ pub struct ProcessStruct {
/// Permissions of this process.
pub capabilities: ProcessCapabilities,

/// Tracks used and free allocated Thread Local Storage regions of this process.
pub tls_manager: Mutex<TLSManager>,

/// An array of the created but not yet started threads.
///
/// When we create a thread, we return a handle to userspace containing a weak reference to the thread,
Expand Down Expand Up @@ -108,6 +113,9 @@ pub struct ThreadStruct {
/// The currently running process is indirectly kept alive by the `CURRENT_THREAD` global in scheduler.
pub process: Arc<ProcessStruct>,

/// Pointer to the Thread Local Storage region of this thread.
pub tls: VirtualAddress,

/// Argument passed to the entrypoint on first schedule.
pub arg: usize
}
Expand Down Expand Up @@ -418,6 +426,7 @@ impl ProcessStruct {
threads: SpinLockIRQ::new(Vec::new()),
phandles: SpinLockIRQ::new(HandleTable::default()),
killed: AtomicBool::new(false),
tls_manager: Mutex::new(TLSManager::default()),
thread_maternity: SpinLock::new(Vec::new()),
capabilities
}
Expand Down Expand Up @@ -465,6 +474,7 @@ impl ProcessStruct {
phandles: SpinLockIRQ::new(HandleTable::default()),
killed: AtomicBool::new(false),
thread_maternity: SpinLock::new(Vec::new()),
tls_manager: Mutex::new(TLSManager::default()),
capabilities: ProcessCapabilities::default(),
}
)
Expand Down Expand Up @@ -517,6 +527,8 @@ impl ThreadStruct {
/// The thread's only strong reference is stored in the process' maternity,
/// and we return only a weak to it, that can directly be put in a thread_handle.
pub fn new(belonging_process: &Arc<ProcessStruct>, ep: VirtualAddress, stack: VirtualAddress, arg: usize) -> Result<Weak<Self>, KernelError> {
// get its process memory
let mut pmemory = belonging_process.pmemory.lock();

// allocate its kernel stack
let kstack = KernelStack::allocate_stack()?;
Expand All @@ -527,12 +539,16 @@ impl ThreadStruct {
// the state of the process, Stopped
let state = ThreadStateAtomic::new(ThreadState::Stopped);

// allocate its thread local storage region
let tls = belonging_process.tls_manager.lock().allocate_tls(&mut pmemory)?;

let t = Arc::new(
ThreadStruct {
state,
kstack,
hwcontext : empty_hwcontext,
process: Arc::clone(belonging_process),
tls,
arg
}
);
Expand Down Expand Up @@ -599,12 +615,20 @@ impl ThreadStruct {
// the saved esp will be overwritten on schedule-out anyway
let hwcontext = SpinLockIRQ::new(ThreadHardwareContext::default());

// create our thread local storage region
let tls = {
let mut pmemory = process.pmemory.lock();
let mut tls_manager = process.tls_manager.lock();
tls_manager.allocate_tls(&mut pmemory).expect("Failed to allocate TLS for first thread")
};

let t = Arc::new(
ThreadStruct {
state,
kstack,
hwcontext,
process: Arc::clone(&process),
tls,
arg: 0
}
);
Expand Down Expand Up @@ -668,7 +692,14 @@ impl ThreadStruct {
}

impl Drop for ThreadStruct {
/// Late thread death notifications:
///
/// * notifies our process that our TLS can be re-used.
fn drop(&mut self) {
unsafe {
// safe: we're being dropped, our TLS will not be reused by us.
self.process.tls_manager.lock().free_tls(self.tls);
}
// todo this should be a debug !
info!("💀 Dropped a thread : {}", self.process.name)
}
Expand Down
186 changes: 186 additions & 0 deletions kernel/src/process/thread_local_storage.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
//! TLS manager
//!
//! # Abstract
//!
//! For each thread of a process, the kernel allocates a 0x200-bytes "Thread Local Storage"
//! memory region in UserLand. In this region resides the 0x100-bytes IPC command buffer,
//! which is used by the user for passing IPC arguments, and a pointer to the user-controlled
//! "thread context", which will likely be used for holding userspace thread local variables.
//!
//! Each thread in a process has its own private TLS, and from userspace its address can be found out
//! at anytime by reading an architecture-specific register (aarch64 uses `tpidrro_el0`, x86 uses the
//! `gs` segment selector).
//!
//! # Location
//!
//! The TLS content is defined by the [TLS] structure. It is a 0x200-bytes memory area that leaves
//! in UserLand so it can be accessed and modified by the user.
//! The user is allowed to access and modify the TLS of other thread from its process if it
//! manages to find the location of their TLS, but this is not advised, as it serves little purpose.
//!
//! Kernel-side, each thread holds a raw pointer to its TLS (`*mut TLS`) in its [ThreadStruct].
//! This pointer is used by the kernel to get the thread's `ipc_command_buffer` address,
//! and is restored as part of hardware context on every context-switch.
//!
//! # Allocation
//!
//! Each process holds a [TLSManager] in its ProcessStruct, which manages the TLSs for this process,
//! keeps track of which ones are in-use and which ones are free, and try to re-use free TLSs when
//! spawning a thread.
//!
//! When a thread is being created, it asks its process's `TLSManager` via [allocate_tls] to get a pointer
//! to its TLS, and saves it in the `ThreadStruct`.
//!
//! When a thread dies, it notifies its process's `TLSManager` via [free_tls], so its TLS can be re-used.
//!
//! TLSs are only 0x200 bytes, so the `TLSManager` groups them together to fit inside a page,
//! and will allocate a new page every time it is full and cannot satisfy a TLS allocation.
//!
//! [TLS]: sunrise_libkern::TLS
//! [TLSManager]: thread_local_storage::TLSManager
//! [allocate_TLS]: thread_local_storage::TLSManager::allocate_tls
//! [free_TLS]: thread_local_storage::TLSManager::free_tls
use crate::VirtualAddress;
use crate::PAGE_SIZE;
use crate::paging::process_memory::ProcessMemory;
use crate::paging::MappingAccessRights;
use crate::error::KernelError;
use sunrise_libutils::bit_array_first_zero;
use sunrise_libkern::{MemoryType, TLS};
use core::mem::size_of;
use bit_field::BitArray;
use alloc::vec::Vec;

/// Manages a page containing 8 TLS
///
/// A TLS being only 0x200 bytes, the kernel aggregates the TLSs of a same process in groups of 8
/// so that they fit in one page.
///
/// # Memory leak
///
/// Dropping this struct will leak the page, until the process is killed and all its memory is freed.
/// See [TLSManager] for more on this topic.
#[derive(Debug)]
struct TLSPage {
/// Address of the page, in UserLand.
page_address: VirtualAddress,
/// Bitmap indicating if the TLS is in use (`1`) or free (`0`).
usage: [u8; PAGE_SIZE / size_of::<TLS>() / 8]
}

impl TLSPage {

/// Allocates a new page holing 8 TLS.
///
/// The page is user read-write, and its memory type is `ThreadLocal`.
///
/// # Error
///
/// Fails if the allocation fails.
fn new(pmemory: &mut ProcessMemory) -> Result<Self, KernelError> {
let addr = pmemory.find_available_space(PAGE_SIZE)?;
pmemory.create_regular_mapping(addr, PAGE_SIZE, MemoryType::ThreadLocal, MappingAccessRights::u_rw())?;
Ok(TLSPage {
page_address: addr,
usage: [0u8; PAGE_SIZE / size_of::<TLS>() / 8]
})
}

/// Finds an available slot in the TLSPage, bzero it, marks it allocated, and gives back a pointer to it.
///
/// If no slot was available, this function returns `None`.
///
/// The returned TLS still has to be bzeroed, has it may contain the data of a previous thread.
fn allocate_tls(&mut self) -> Option<VirtualAddress> {
let index = bit_array_first_zero(&self.usage)?;
self.usage.set_bit(index, true);
Some(self.page_address + index * size_of::<TLS>())
}

/// Marks a TLS in this TLSPage as free so it can be used by the next spawned thread.
///
/// # Panics
///
/// Panics if `address` does not fall in this TLSPage, not a valid offset, or marked already free.
fn free_tls(&mut self, address: VirtualAddress) {
debug_assert!(address.floor() == self.page_address, "Freed TLS ptr is outside of TLSPage.");
debug_assert!(address.addr() % size_of::<TLS>() == 0, "Freed TLS ptr is not TLS size aligned.");
let index = (address - self.page_address) / size_of::<TLS>();
debug_assert!(self.usage.get_bit(index), "Freed TLS was not marked occupied");
self.usage.set_bit(index, false);
}
}

// size_of::<TLS>() is expected to divide PAGE_SIZE evenly.
const_assert_eq!(PAGE_SIZE % size_of::<TLS>(), 0);

/// TLS allocator
///
/// Each process holds a `TLSManager` in its [ProcessStruct].
///
/// When a thread is being created, we ask the `TLSManager` to allocate a TLS for it, and when
/// it dies we give it back to the manager so it can be re-used the next time this process spawns a thread.
///
/// When all of its TLS are occupied, the `TLSManager` will expend its memory by allocating a new page.
///
/// # Memory leak
///
/// The `TLSManager` will never free the pages it manages, and they are leaked when the `TLSManager` is dropped.
/// They will become available again after the process dies and its [ProcessMemory] is freed.
///
/// A `TLSManager` will always be dropped at process's death, at the same time as the `ProcessMemory`.
/// This prevents a dependency in the order in which the `TLSManager` and the `ProcessMemory` are dropped.
///
/// [ProcessStruct]: crate::process::ProcessStruct
#[derive(Debug, Default)]
pub struct TLSManager {
/// Vec of tracked pages. When all slots are occupied, we allocate a new page.
tls_pages: Vec<TLSPage>
}

impl TLSManager {
/// Allocates a new TLS.
///
/// This function will try to re-use free TLSs, and will only allocate when all TLS are in use.
///
/// The returned TLS still has to be bzeroed, has it may contain the data of a previous thread.
///
/// # Error
///
/// Fails if the allocation fails.
pub fn allocate_tls(&mut self, pmemory: &mut ProcessMemory) -> Result<VirtualAddress, KernelError> {
for tls_page in &mut self.tls_pages {
if let Some(tls) = tls_page.allocate_tls() {
return Ok(tls);
}
}
// no free slot, we need to allocate a new page.
let mut new_tls_page = TLSPage::new(pmemory)?;
let tls = new_tls_page.allocate_tls().expect("Empty TLSPage can't allocate");
self.tls_pages.push(new_tls_page);
Ok(tls)
}


/// Mark this TLS as free, so it can be re-used by future spawned thread.
///
/// # Unsafety
///
/// The TLS will be reassigned, so it must never be used again after calling this function.
///
/// # Panics
///
/// Panics if the TLS is not managed by this TLSManager, doesn't have a valid offset, or is already marked free.
pub unsafe fn free_tls(&mut self, tls: VirtualAddress) {
// round down ptr to find out which page in belongs to.
let tls_page_ptr = tls.floor();
for tls_page in &mut self.tls_pages {
if tls_page.page_address == tls_page_ptr {
tls_page.free_tls(tls);
return;
}
}
panic!("Freed TLS {:?} is not in TLSManager.", tls);
}
}
8 changes: 8 additions & 0 deletions kernel/src/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use crate::i386::process_switch::process_switch;
use crate::sync::{Lock, SpinLockIRQ, SpinLockIRQGuard};
use core::sync::atomic::Ordering;
use crate::error::{UserspaceError};
use sunrise_libkern::TLS;

/// An Arc to the currently running thread.
///
Expand Down Expand Up @@ -314,5 +315,12 @@ pub fn scheduler_first_schedule<F: FnOnce()>(current_thread: Arc<ThreadStruct>,
crate::i386::instructions::interrupts::sti();
}

// memset the TLS, to clear previous owner's data.
// we do it here so don't have to CrossProcessMap it earlier.
unsafe {
// safe: we manage this memory, ptr is aligned, and 0 is valid for every field of the TLS.
core::ptr::write_bytes(get_current_thread().tls.addr() as *mut TLS, 0u8, 1);
}

jump_to_entrypoint()
}
4 changes: 4 additions & 0 deletions libkern/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,7 @@ bitfield = "0.13"
[dependencies.lazy_static]
features = ["spin_no_std"]
version = "1.3.0"

[dependencies.static_assertions]
version = "0.3.1"
features = ["nightly"]
Loading

0 comments on commit 6c489ef

Please sign in to comment.