Skip to content

Commit

Permalink
TLS: safety notices
Browse files Browse the repository at this point in the history
  • Loading branch information
Orycterope committed Aug 5, 2019
1 parent e9420ca commit aa1b5eb
Show file tree
Hide file tree
Showing 9 changed files with 122 additions and 48 deletions.
36 changes: 23 additions & 13 deletions kernel/src/cpu_locals.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,12 +121,12 @@ static CPU_LOCAL_REGIONS: Once<Vec<CpuLocalRegion>> = Once::new();
/// # Panics
///
/// Panics if `cpu_id` is greater than the `cpu_count` that was supplied to [`init_cpu_locals`].
pub fn get_cpu_locals_ptr_for_core(cpu_id: usize) -> *mut u8 {
pub fn get_cpu_locals_ptr_for_core(cpu_id: usize) -> *const u8 {
CPU_LOCAL_REGIONS.r#try()
.expect("CPU_LOCAL_REGIONS not initialized")
.get(cpu_id)
.unwrap_or_else(|| panic!("cpu locals not initialized for cpu id {}", cpu_id))
.tcb()
.tcb() as *const ThreadControlBlock as *const u8
}

/// Initializes cpu locals during early boot stage.
Expand Down Expand Up @@ -188,7 +188,7 @@ pub fn init_cpu_locals(cpu_count: usize) {
.expect("GDT not initialized")
.lock();
gdt.table[GdtIndex::KTls as usize].set_base(
cpu_local_regions[0].tcb() as usize as u32
cpu_local_regions[0].tcb() as *const _ as usize as u32
);
gdt.commit(None, None, None, None, None, None);

Expand Down Expand Up @@ -244,10 +244,12 @@ impl CpuLocalRegion {
///
/// For TLS to work, the value stored at this address should be the address itself, i.e.
/// having a pointer pointing to itself.
fn tcb(&self) -> *mut u8 {
fn tcb(&self) -> &ThreadControlBlock {
unsafe {
// safe: guaranteed to be aligned, and still in the allocation
(self.ptr as *mut u8).add(self.tcb_offset)
// safe: - guaranteed to be aligned, and still in the allocation,
// - no one should ever have a mut reference to the ThreadControlBlock after its
// initialisation.
&*((self.ptr + self.tcb_offset) as *const ThreadControlBlock)
}
}

Expand Down Expand Up @@ -299,10 +301,14 @@ impl CpuLocalRegion {
tcb_offset + size_of::<ThreadControlBlock>(),
tcb_align
).unwrap();
let alloc = unsafe { alloc_zeroed(layout) };
let alloc = unsafe {
// safe: layout.size >= sizeof::<TCB> -> layout.size != 0
alloc_zeroed(layout)
};
assert!(!alloc.is_null(), "cpu_locals: failed static area allocation");

unsafe {
// safe: everything is done within our allocation, u8 is always aligned.
// copy data
core::ptr::copy_nonoverlapping(
block_src as *const [u8] as *const u8,
Expand All @@ -317,19 +323,23 @@ impl CpuLocalRegion {
tp_self_ptr: alloc.add(tcb_offset) as *const ThreadControlBlock
}
);
Self {
ptr: alloc as usize,
layout,
tcb_offset
}
};
Self {
ptr: alloc as usize,
layout,
tcb_offset
}
}
}

impl Drop for CpuLocalRegion {
/// Dropping a CpuLocalRegion deallocates it.
fn drop(&mut self) {
unsafe { dealloc(self.ptr as *mut u8, self.layout) };
unsafe {
// safe: - self.ptr is obviously allocated.
// - self.layout is the same argument that was used for alloc.
dealloc(self.ptr as *mut u8, self.layout)
};
}
}

Expand Down
4 changes: 2 additions & 2 deletions kernel/src/i386/gdt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
//! | Index | Found in | Maps to | Purpose |
//! |--------------------------|----------------------------------------|--------------------------------|-------------------------------------------------------------------|
//! | [`GdtIndex::Null`] | nowhere (hopefully) | _ | _ |
//! | [`GdtIndex::KCode`] | `cs`, while in kernel code | flat: `0x00000000..0xffffffff` | kernel's code segment | |
//! | [`GdtIndex::KCode`] | `cs`, while in kernel code | flat: `0x00000000..0xffffffff` | kernel's code segment |
//! | [`GdtIndex::KData`] | `ds`, `es`, while in kernel code | flat: `0x00000000..0xffffffff` | kernel's data segment |
//! | [`GdtIndex::KTls`] | `gs`, while in kernel code | kernel's cpu-locals | kernel sets-up cpu-locals at this address |
//! | [`GdtIndex::KStack`] | `ss`, while in kernel code | flat: `0x00000000..0xffffffff` | kernel's stack segment |
Expand Down Expand Up @@ -257,7 +257,7 @@ pub fn init_gdt() {
fault_task.eip = 0; // will be set by IDT init.
let fault_task_ref: &'static TssStruct = unsafe {
// creating a static ref to tss.
// kinda-safe: the tss is in a static so it is 'static, but is behind a lock
// safety: the tss is in a static so it is 'static, but is behind a lock
// and will still be accessed by the hardware with no consideration for the lock.
(&*fault_task as *const TssStruct).as_ref().unwrap()
};
Expand Down
9 changes: 7 additions & 2 deletions kernel/src/i386/process_switch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -274,9 +274,14 @@ pub unsafe fn prepare_for_first_schedule(t: &ThreadStruct, entrypoint: usize, us
/// At this point, interrupts are still off. This function should ensure the thread is properly
/// switched (set up ESP0, IOPB and whatnot) and call [`scheduler_first_schedule`].
///
/// # Safety:
///
/// * Interrupts must be disabled.
/// * Arguments must respect the [`prepare_for_first_schedule`] ABI, and be popped into registers.
///
/// [`scheduler_first_schedule`]: crate::scheduler::scheduler_first_schedule.
#[naked]
fn first_schedule() {
unsafe fn first_schedule() {
// just get the ProcessStruct pointer in $edi, the entrypoint in $eax, and call a rust function
unsafe {
asm!("
Expand Down Expand Up @@ -311,7 +316,7 @@ fn first_schedule() {

// call the scheduler to finish the high-level process switch mechanics
unsafe {
// safety: interrupts are off
// safe: interrupts are off
crate::scheduler::scheduler_first_schedule(current, || jump_to_entrypoint(entrypoint, userspace_stack, userspace_arg));
}

Expand Down
8 changes: 7 additions & 1 deletion kernel/src/panic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,16 @@ pub enum PanicOrigin<'a> {
/// Takes a panic origin, so we can personalize the kernel panic message.
pub fn kernel_panic(panic_origin: &PanicOrigin) -> ! {

// TODO: permanently_disable_interrupts shouldn't be unsafe.
// BODY: disabling interrupts doesn't break any safety guidelines, and is perfectly safe as far as rustc is concerned.
// Disable interrupts forever!
unsafe { sync::permanently_disable_interrupts(); }
// Don't deadlock in the logger
unsafe { SerialLogger.force_unlock(); }
unsafe {
// safe: All CPUs are halted at this point, and interrupts are stopped.
// Any code relying on locked mutex will not run anymore, so unlocking mutexes is fine now.
SerialLogger.force_unlock();
}

// Get the process we were running, and its name. Gonna be quite useful.
let current_thread = try_get_current_thread();
Expand Down
4 changes: 2 additions & 2 deletions kernel/src/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ where

let whoami = if !Arc::ptr_eq(&process_b, &proc) {
unsafe {
// safety: interrupts are off
// safety: interrupts are disabled by the interrupt_lock.
process_switch(process_b, proc)
}
} else {
Expand All @@ -306,7 +306,7 @@ where
// If previously running thread had deleted all other references to itself, this
// is where its drop actually happens
unsafe {
// safety: interrupts are off.
// safety: interrupts are disabled by the interrupt_lock.
set_current_thread(whoami.clone(), || lock.lock())
}
}
Expand Down
2 changes: 1 addition & 1 deletion libkern/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ pub struct TLS {
/// Buffer used for IPC. Kernel reads, interprets, and copies data from/to it.
pub ipc_command_buffer: IpcBuffer,
/// reserved or unknown.
_reserved1: [u8; 0x200 - size_of::<IpcBuffer>() - 2 * size_of::<usize>() - (16 - size_of::<*mut TLS>())],
_reserved1: [u8; 0x200 - 16 - size_of::<IpcBuffer>() - size_of::<usize>()],
/// User controlled pointer to thread context. Not observed by the kernel.
pub ptr_thread_context: usize,
}
Expand Down
7 changes: 6 additions & 1 deletion libuser/src/syscalls.rs
Original file line number Diff line number Diff line change
Expand Up @@ -463,11 +463,16 @@ pub fn map_mmio_region(physical_address: usize, size: usize, virtual_address: us
///
/// `fs` is used instead of `gs`, because reasons.
///
/// # Safety
///
/// `address` should point to a valid TLS image, unique to the current thread.
/// Setting `gs` to random data, malformed image, or shared image is UB.
///
/// # Errors
///
/// * The whole initial design of TLS on x86 should be considered an error.
/// * No returned error otherwise.
pub fn set_thread_area(address: usize) -> Result<(), KernelError> {
pub unsafe fn set_thread_area(address: usize) -> Result<(), KernelError> {
unsafe {
syscall(nr::SetThreadArea, address, 0, 0, 0, 0, 0)?;
Ok(())
Expand Down
42 changes: 30 additions & 12 deletions libuser/src/thread_local_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -163,12 +163,20 @@ impl TlsElf {

/// Calls [`syscalls::set_thread_area`] with the address of this TlsElf's [`ThreadControlBlock`].
///
/// # Safety
///
/// The TlsElf should not be enabled_for_current_thread by any other thread.
/// Having a TLS shared by multiple threads is UB.
///
/// # Panics
///
/// Panics if the syscall returned an error, as this is unrecoverable.
pub fn enable_for_current_thread(&self) {
pub unsafe fn enable_for_current_thread(&self) {
unsafe {
syscalls::set_thread_area(self.static_region.tcb() as usize)
// safe: TlsElf is RAII so self is a valid well-formed TLS region.
// However, we cannot guarantee that it's not used by anybody else,
// so propagate this constraint.
syscalls::set_thread_area(self.static_region.tcb() as *const _ as usize)
.expect("Cannot set thread TLS pointer");
}
}
Expand Down Expand Up @@ -219,10 +227,12 @@ impl ThreadLocalStaticRegion {
///
/// For TLS to work, the value stored at this address should be the address itself, i.e.
/// having a pointer pointing to itself.
fn tcb(&self) -> *mut u8 {
fn tcb(&self) -> &ThreadControlBlock {
unsafe {
// safe: guaranteed to be aligned, and still in the allocation
(self.ptr as *mut u8).add(self.tcb_offset)
// safe: - guaranteed to be aligned, and still in the allocation,
// - no one should ever have a mut reference to the ThreadControlBlock after its
// initialisation.
&*((self.ptr + self.tcb_offset) as *const ThreadControlBlock)
}
}

Expand Down Expand Up @@ -274,10 +284,14 @@ impl ThreadLocalStaticRegion {
tcb_offset + size_of::<ThreadControlBlock>(),
tcb_align
).unwrap();
let alloc = unsafe { alloc_zeroed(layout) };
let alloc = unsafe {
// safe: layout.size >= sizeof::<TCB> -> layout.size != 0
alloc_zeroed(layout)
};
assert!(!alloc.is_null(), "thread_locals: failed static area allocation");

unsafe {
// safe: everything is done within our allocation, u8 is always aligned.
// copy data
core::ptr::copy_nonoverlapping(
block_src as *const [u8] as *const u8,
Expand All @@ -292,19 +306,23 @@ impl ThreadLocalStaticRegion {
tp_self_ptr: alloc.add(tcb_offset) as *const ThreadControlBlock
}
);
Self {
ptr: alloc as usize,
layout,
tcb_offset
}
};
Self {
ptr: alloc as usize,
layout,
tcb_offset
}
}
}

impl Drop for ThreadLocalStaticRegion {
/// Dropping a ThreadLocalStaticRegion deallocates it.
fn drop(&mut self) {
unsafe { dealloc(self.ptr as *mut u8, self.layout) };
unsafe {
// safe: - self.ptr is obviously allocated.
// - self.layout is the same argument that was used for alloc.
dealloc(self.ptr as *mut u8, self.layout)
};
}
}

Expand Down
58 changes: 44 additions & 14 deletions libuser/src/threads.rs
Original file line number Diff line number Diff line change
Expand Up @@ -143,12 +143,29 @@ fn get_my_tls_region() -> *mut TLS {
}


/// Get a pointer to this thread's [ThreadContext], from the [TLS] region pointed to by `fs`.
#[inline]
pub fn get_my_thread_context() -> *mut ThreadContext {
/// Get a reference to this thread's [ThreadContext], from the [TLS] region pointed to by `fs`.
///
/// # Panics
///
/// Panics if the thread context hasn't been initialized yet.
/// This happens immediately in the startup of a thread, and relatively early for the main thread.
pub fn get_my_thread_context() -> &'static ThreadContext {
// read the last bytes of TLS region and treat it as a pointer
let context_ptr = unsafe {
// safe: - get_my_tls returns a valid 0x200 aligned ptr,
// - .ptr_thread_context is correctly aligned in the TLS region to usize.
(*get_my_tls_region()).ptr_thread_context as *const ThreadContext
};
// The TLS region is initially memset to 0 by the kernel.
// If the context_ptr is 0 it means it hasn't been written yet.
debug_assert!(!context_ptr.is_null(), "thread context not initialized yet");
// create a ref
unsafe {
// safe: just pointer arithmetic
&(*get_my_tls_region()).ptr_thread_context as *const usize as *mut _
// safe: the context will never be accessed mutably after its allocation,
// it is guaranteed to live for the whole lifetime of the current thread,
// it is guaranteed to be well-formed since we allocated it ourselves,
// => creating a ref is safe.
&*(context_ptr)
}
}

Expand Down Expand Up @@ -240,19 +257,25 @@ impl Thread {
extern "fastcall" fn thread_trampoline(thread_context_addr: usize) -> ! {
debug!("starting from new thread, context at address {:#010x}", thread_context_addr);
// first save the address of our context in our TLS region
unsafe { (*get_my_tls_region()).ptr_thread_context = thread_context_addr };
unsafe {
// safe: - get_my_tls returns a valid 0x200 aligned ptr,
// - .ptr_thread_context is correctly aligned in the TLS region to usize,
// - we're a private fn, thread_context_addr is guaranteed by our caller to point to the context.
(*get_my_tls_region()).ptr_thread_context = thread_context_addr
};

let thread_context_addr = thread_context_addr as *mut ThreadContext;
// use get_my_thread_context to create a ref for us
let thread_context = get_my_thread_context();

// make gs point to our tls
let tls_elf = unsafe { &(*thread_context_addr).tls_elf };
tls_elf.r#try().unwrap().enable_for_current_thread();

// call the routine saved in the context, passing it the arg saved in the context
unsafe {
((*thread_context_addr).entry_point)((*thread_context_addr).arg)
// safe: this module guarantees that the TLS region is unique to this thread.
thread_context.tls_elf.r#try().unwrap().enable_for_current_thread();
}

// call the routine saved in the context, passing it the arg saved in the context
(thread_context.entry_point)(thread_context.arg);

debug!("exiting thread");
syscalls::exit_thread()
}
Expand Down Expand Up @@ -281,10 +304,17 @@ pub extern fn init_main_thread(handle: ThreadHandle) {
// save the handle in our context
MAIN_THREAD_CONTEXT.thread_handle.call_once(|| handle);
// save the address of our context in our TLS region
unsafe { (*get_my_tls_region()).ptr_thread_context = &MAIN_THREAD_CONTEXT as *const ThreadContext as usize };
unsafe {
// safe: - get_my_tls returns a valid 0x200 aligned ptr,
// - .ptr_thread_context is correctly aligned in the TLS region to usize,
(*get_my_tls_region()).ptr_thread_context = &MAIN_THREAD_CONTEXT as *const ThreadContext as usize
};

// allocate, enable elf TLS, and save it in our context
let tls_elf = TlsElf::allocate();
tls_elf.enable_for_current_thread();
unsafe {
// safe: this module guarantees that the TLS region is unique to this thread.
tls_elf.enable_for_current_thread();
}
MAIN_THREAD_CONTEXT.tls_elf.call_once(move || tls_elf);
}

0 comments on commit aa1b5eb

Please sign in to comment.