From 7333eafb1ddaa522e9639ad12674d3e77412d95a Mon Sep 17 00:00:00 2001 From: Orycterope Date: Mon, 12 Aug 2019 17:30:50 +0000 Subject: [PATCH] todo-bot: touch TLS todos Because the TLS PR is so big, todobot bailed on it. Make a PR that only touches every `// todo:` we opened in #339, so todobot picks them again. --- kernel/src/i386/gdt.rs | 94 +++++++++++++++++------------------ kernel/src/paging/arch/mod.rs | 2 +- kernel/src/panic.rs | 16 +++--- libuser/src/threads.rs | 26 +++++----- 4 files changed, 69 insertions(+), 69 deletions(-) diff --git a/kernel/src/i386/gdt.rs b/kernel/src/i386/gdt.rs index 57faad508..324ec696e 100644 --- a/kernel/src/i386/gdt.rs +++ b/kernel/src/i386/gdt.rs @@ -446,53 +446,53 @@ impl MainTask { /// /// The only exception to this is double faulting, which does use Hardware Task Switching, and /// for which we allocate a second TSS, see [DOUBLE_FAULT_TASK]. -// todo: per-cpu TSSs / GDT -// body: There are multiple things that aren't ideal about the way we handle TSSs. -// body: -// body: ## Initialization -// body: -// body: TSSs must always be initialized with an iopb_offset of `size_of::()`, -// body: so that the TSS's data is not interpreted as the iopb. -// body: -// body: However, because MAIN_TASK has a huge iopb (0x2001 bytes), we want it to live in the -// body: .bss, and be lazy initialized (iopb_offset value, and iopb array memset to 0xFF). -// body: `lazy_static` seems appropriate for that, and we should use it, so we cannot *forget* to -// body: initialize a TSS. -// body: -// body: DOUBLE_FAULT_TASK could be statically initialized, except for the `cr3` field. -// body: -// body: ## Per-cpu -// body: -// body: But we will likely want a MAIN and DOUBLE_FAULT TSS per core. However, they cannot trivially -// body: be put behind a `#[thread_local]`, as they are initialized with the GDT, before cpu-locals -// body: are initialized. It might be possible to make them `#[thread_local]` with some -// body: post-initialization routine that switches to using the MAIN and DOUBLE_FAULT_TASK in the -// body: cpu-local memory area instead of the static early one, after cpu-local have been initialized, -// body: for core 0. -// body: The static early one could do without an iopb, since we're not going to userspace with it. -// body: -// body: For other cores, having a `#[thead_local]` inside a `lazy_static!` seems to work, but I don't -// body: yet know how cores are going to be started, whether they allocate/initialize their own -// body: GDT + MAIN + DOUBLE_FAULT TSS, if it their parent core do it. -// body: -// body: Because of these unknowns, the search for a good design for TSSs/GDT is postponed. -// body: -// body: ## Locking -// body: -// body: Since the TSSs are supposed to be cpu-local, there is no reason for them to have a mutex -// body: around them. An ideal design would be lock-less, which can either be achieved with `#[thread_local]`, -// body: or some custom wrapper around an UnsafeCell just for TSSs. -// body: -// body: ## DOUBLE_FAULT's cr3 -// body: -// body: The DOUBLE_FAULT TSS(s)'s cr3 must point to a valid page directory, which will remain valid -// body: (i.e. not be freed) for the entire lifetime of the kernel, and possibly updated when kernel -// body: page tables are modified. -// body: -// body: For now, because we have no such hierarchy, we always make DOUBLE_FAULT's cr3 point -// body: to the current cr3, and update it when we switch page table hierarchies. However the current -// body: way we do kernel paging is not viable for SMP, and we might finally implement such a hierarchy -// body: for SMP, we could then make DOUBLE_FAULT TSS(s) point to it. +// TODO: per-cpu TSSs / GDT +// BODY: There are multiple things that aren't ideal about the way we handle TSSs. +// BODY: +// BODY: ## Initialization +// BODY: +// BODY: TSSs must always be initialized with an iopb_offset of `size_of::()`, +// BODY: so that the TSS's data is not interpreted as the iopb. +// BODY: +// BODY: However, because MAIN_TASK has a huge iopb (0x2001 bytes), we want it to live in the +// BODY: .bss, and be lazy initialized (iopb_offset value, and iopb array memset to 0xFF). +// BODY: `lazy_static` seems appropriate for that, and we should use it, so we cannot *forget* to +// BODY: initialize a TSS. +// BODY: +// BODY: DOUBLE_FAULT_TASK could be statically initialized, except for the `cr3` field. +// BODY: +// BODY: ## Per-cpu +// BODY: +// BODY: But we will likely want a MAIN and DOUBLE_FAULT TSS per core. However, they cannot trivially +// BODY: be put behind a `#[thread_local]`, as they are initialized with the GDT, before cpu-locals +// BODY: are initialized. It might be possible to make them `#[thread_local]` with some +// BODY: post-initialization routine that switches to using the MAIN and DOUBLE_FAULT_TASK in the +// BODY: cpu-local memory area instead of the static early one, after cpu-local have been initialized, +// BODY: for core 0. +// BODY: The static early one could do without an iopb, since we're not going to userspace with it. +// BODY: +// BODY: For other cores, having a `#[thead_local]` inside a `lazy_static!` seems to work, but I don't +// BODY: yet know how cores are going to be started, whether they allocate/initialize their own +// BODY: GDT + MAIN + DOUBLE_FAULT TSS, if it their parent core do it. +// BODY: +// BODY: Because of these unknowns, the search for a good design for TSSs/GDT is postponed. +// BODY: +// BODY: ## Locking +// BODY: +// BODY: Since the TSSs are supposed to be cpu-local, there is no reason for them to have a mutex +// BODY: around them. An ideal design would be lock-less, which can either be achieved with `#[thread_local]`, +// BODY: or some custom wrapper around an UnsafeCell just for TSSs. +// BODY: +// BODY: ## DOUBLE_FAULT's cr3 +// BODY: +// BODY: The DOUBLE_FAULT TSS(s)'s cr3 must point to a valid page directory, which will remain valid +// BODY: (i.e. not be freed) for the entire lifetime of the kernel, and possibly updated when kernel +// BODY: page tables are modified. +// BODY: +// BODY: For now, because we have no such hierarchy, we always make DOUBLE_FAULT's cr3 point +// BODY: to the current cr3, and update it when we switch page table hierarchies. However the current +// BODY: way we do kernel paging is not viable for SMP, and we might finally implement such a hierarchy +// BODY: for SMP, we could then make DOUBLE_FAULT TSS(s) point to it. pub static MAIN_TASK: Mutex = Mutex::new(MainTask::empty()); /// Double fault TSS diff --git a/kernel/src/paging/arch/mod.rs b/kernel/src/paging/arch/mod.rs index add70092f..7b1c7205d 100644 --- a/kernel/src/paging/arch/mod.rs +++ b/kernel/src/paging/arch/mod.rs @@ -7,5 +7,5 @@ pub use self::i386::table::{ActiveHierarchy, InactiveHierarchy}; pub use self::i386::entry::I386Entry as Entry; pub use self::i386::entry::I386EntryFlags as EntryFlags; pub use self::i386::is_paging_on; -pub use self::i386::{read_cr2, read_cr3}; // TODO: expose current page directory's address in an arch-independant way. +pub use self::i386::{read_cr2, read_cr3}; // todo: expose current page directory's address in an arch-independant way. pub use self::i386::lands::{KernelLand, UserLand, RecursiveTablesLand}; diff --git a/kernel/src/panic.rs b/kernel/src/panic.rs index d54d2b87f..5ab3c4c72 100644 --- a/kernel/src/panic.rs +++ b/kernel/src/panic.rs @@ -61,8 +61,8 @@ pub enum PanicOrigin<'a> { /// Takes a panic origin, so we can personalize the kernel panic message. pub fn kernel_panic(panic_origin: &PanicOrigin) -> ! { - // TODO: permanently_disable_interrupts shouldn't be unsafe. - // BODY: disabling interrupts doesn't break any safety guidelines, and is perfectly safe as far as rustc is concerned. + // todo: permanently_disable_interrupts shouldn't be unsafe. + // body: disabling interrupts doesn't break any safety guidelines, and is perfectly safe as far as rustc is concerned. // Disable interrupts forever! unsafe { sync::permanently_disable_interrupts(); } // Don't deadlock in the logger @@ -183,12 +183,12 @@ pub fn kernel_panic(panic_origin: &PanicOrigin) -> ! { let _ = writeln!(SerialLogger, "Panic handler: Failed to get kernel elf symbols"); } - // todo: Kernel Stack dump update - // body: Update the kernel stack dump functions to be compatible the new and improved - // body: kernel panic. - // body: - // body: Now that know the origin (userspace or kernelspace) in the panic, this should - // body: be easy, and we can finally have userspace stack dumps that actually work. + // TODO: Kernel Stack dump update + // BODY: Update the kernel stack dump functions to be compatible the new and improved + // BODY: kernel panic. + // BODY: + // BODY: Now that know the origin (userspace or kernelspace) in the panic, this should + // BODY: be easy, and we can finally have userspace stack dumps that actually work. let stackdump_source = None; // Then print the stack diff --git a/libuser/src/threads.rs b/libuser/src/threads.rs index 1e115826d..b3d68483d 100644 --- a/libuser/src/threads.rs +++ b/libuser/src/threads.rs @@ -206,14 +206,14 @@ impl Thread { /// Allocates the stack, sets up the context and TLS, and calls `svcCreateThread`. /// /// [`start`]: Thread::start - // TODO: Libuser Thread stack guard - // BODY: Currently the stack of every non-main thread is allocated in the heap, and no page - // BODY: guard protects from stack-overflowing and rewriting all the heap. - // BODY: - // BODY: This is of course terrible for security, as with this stack overflowing is U.B. - // BODY: - // BODY: The simpler way to fix this would be to continue allocating the stack on the heap, - // BODY: but remap the last page with no permissions with the yet unimplemented svcMapMemory syscall. + // todo: Libuser Thread stack guard + // body: Currently the stack of every non-main thread is allocated in the heap, and no page + // body: guard protects from stack-overflowing and rewriting all the heap. + // body: + // body: This is of course terrible for security, as with this stack overflowing is U.B. + // body: + // body: The simpler way to fix this would be to continue allocating the stack on the heap, + // body: but remap the last page with no permissions with the yet unimplemented svcMapMemory syscall. pub fn create(entry: fn (usize) -> (), arg: usize) -> Result { let tls_elf = Once::new(); @@ -294,11 +294,11 @@ extern "fastcall" fn thread_trampoline(thread_context_addr: usize) -> ! { impl Drop for Thread { fn drop(&mut self) { - // todo: Properly free resource after thread detach - // body: When detaching a thread, we should ensure that the associated resources (stack, - // body: handle, context, etc...) are properly freed before the Process exits. This can be - // body: done by adding the ThreadContext to a global Vec<> of ThreadContext that gets freed - // body: when the main thread (or the last thread alive?) exits. + // TODO: Properly free resource after thread detach + // BODY: When detaching a thread, we should ensure that the associated resources (stack, + // BODY: handle, context, etc...) are properly freed before the Process exits. This can be + // BODY: done by adding the ThreadContext to a global Vec<> of ThreadContext that gets freed + // BODY: when the main thread (or the last thread alive?) exits. } }