diff --git a/Cargo.lock b/Cargo.lock index 0238b856cc202..177131fe7413e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -141,8 +141,11 @@ checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" [[package]] name = "bitflags" version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +source = "git+https://github.com/bitflags/bitflags#c2ba43141307a97484518ad7728534855e1a51f2" +dependencies = [ + "compiler_builtins", + "rustc-std-workspace-core", +] [[package]] name = "bitmaps" @@ -1220,8 +1223,7 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "fortanix-sgx-abi" version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c56c422ef86062869b2d57ae87270608dc5929969dd130a6e248979cf4fb6ca6" +source = "git+https://github.com/fortanix/rust-sgx?branch=raoul/edmm#4d629550dfa71e6a5830fc914c1c2d40caa677a6" dependencies = [ "compiler_builtins", "rustc-std-workspace-core", @@ -4864,6 +4866,16 @@ dependencies = [ "syn", ] +[[package]] +name = "sgx-isa" +version = "0.3.3" +source = "git+https://github.com/fortanix/rust-sgx?branch=raoul/edmm#4d629550dfa71e6a5830fc914c1c2d40caa677a6" +dependencies = [ + "bitflags", + "compiler_builtins", + "rustc-std-workspace-core", +] + [[package]] name = "sha-1" version = "0.8.2" @@ -5018,6 +5030,7 @@ version = "0.0.0" dependencies = [ "addr2line", "alloc", + "bitflags", "cfg-if 0.1.10", "compiler_builtins", "core", @@ -5033,6 +5046,7 @@ dependencies = [ "profiler_builtins", "rand 0.7.3", "rustc-demangle", + "sgx-isa", "unwind", "wasi", ] diff --git a/Cargo.toml b/Cargo.toml index f961d3e9b97be..e70dcaea68237 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -96,6 +96,10 @@ cargo = { path = "src/tools/cargo" } rustfmt-nightly = { path = "src/tools/rustfmt" } [patch.crates-io] +fortanix-sgx-abi = { git = "https://github.com/fortanix/rust-sgx", branch = "raoul/edmm" } +sgx-isa = { git = "https://github.com/fortanix/rust-sgx", branch = "raoul/edmm" } +bitflags = { git = "https://github.com/bitflags/bitflags" } + # See comments in `src/tools/rustc-workspace-hack/README.md` for what's going on # here rustc-workspace-hack = { path = 'src/tools/rustc-workspace-hack' } diff --git a/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs b/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs index 6365e5650e471..7c85fcb7934b4 100644 --- a/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs +++ b/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs @@ -50,6 +50,8 @@ pub fn target() -> Target { "EH_FRM_LEN", "TEXT_BASE", "TEXT_SIZE", + "UNMAPPED_BASE", + "UNMAPPED_SIZE", ]; let opts = TargetOptions { os: "unknown".into(), diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index 22ca7ed09b42a..4ae91a8be016b 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -40,6 +40,8 @@ dlmalloc = { version = "0.2.1", features = ['rustc-dep-of-std'] } [target.x86_64-fortanix-unknown-sgx.dependencies] fortanix-sgx-abi = { version = "0.3.2", features = ['rustc-dep-of-std'] } +sgx-isa = { version = "0.3.2", optional = false, features = ['rustc-dep-of-std', 'nightly'] } +bitflags = { version = "1.2.1", features = ['rustc-dep-of-std'] } [target.'cfg(all(any(target_arch = "x86_64", target_arch = "aarch64"), target_os = "hermit"))'.dependencies] hermit-abi = { version = "0.1.17", features = ['rustc-dep-of-std'] } diff --git a/library/std/src/sys/sgx/abi/entry.S b/library/std/src/sys/sgx/abi/entry.S index f61bcf06f0815..9fc08d0fac9fc 100644 --- a/library/std/src/sys/sgx/abi/entry.S +++ b/library/std/src/sys/sgx/abi/entry.S @@ -46,6 +46,10 @@ IMAGE_BASE: globvar HEAP_BASE 8 /* The heap size in bytes */ globvar HEAP_SIZE 8 + /* The base address (relative to enclave start) of the dynamic memory area */ + globvar UNMAPPED_BASE 8 + /* The dynamic memory size in bytes */ + globvar UNMAPPED_SIZE 8 /* Value of the RELA entry in the dynamic table */ globvar RELA 8 /* Value of the RELACOUNT entry in the dynamic table */ diff --git a/library/std/src/sys/sgx/abi/mem.rs b/library/std/src/sys/sgx/abi/mem.rs index 1e743894a9fea..d24180097c7c2 100644 --- a/library/std/src/sys/sgx/abi/mem.rs +++ b/library/std/src/sys/sgx/abi/mem.rs @@ -14,6 +14,8 @@ extern "C" { static ENCLAVE_SIZE: usize; static HEAP_BASE: u64; static HEAP_SIZE: usize; + static UNMAPPED_SIZE: u64; + static UNMAPPED_BASE: u64; } /// Returns the base memory address of the heap @@ -89,3 +91,25 @@ pub fn is_user_range(p: *const u8, len: usize) -> bool { let base = image_base() as usize; end < base || start > base + (unsafe { ENCLAVE_SIZE } - 1) // unsafe ok: link-time constant } + +/// Returns the base memory address of the unmapped memory area. On platforms with SGXv2 features, +/// this region can be used to dynamically add enclave pages +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn unmapped_base() -> u64 { + unsafe { image_base() + UNMAPPED_BASE } +} + +/// Returns the size of the unmapped memory area +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn unmapped_size() -> u64 { + unsafe { UNMAPPED_SIZE } +} + +/// Returns whether the pointer is part of the unmapped memory range +/// `p + len` must not overflow +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn is_unmapped_range(p: *const u8, len: usize) -> bool { + let start = p as u64; + let end = start + (len as u64); + start >= unmapped_base() && end <= unmapped_base() + unmapped_size() // unsafe ok: link-time constant +} diff --git a/library/std/src/sys/sgx/abi/usercalls/mod.rs b/library/std/src/sys/sgx/abi/usercalls/mod.rs index a6a659df291fc..e30b771d933ce 100644 --- a/library/std/src/sys/sgx/abi/usercalls/mod.rs +++ b/library/std/src/sys/sgx/abi/usercalls/mod.rs @@ -259,6 +259,24 @@ pub fn alloc(size: usize, alignment: usize) -> IoResult<*mut u8> { unsafe { raw::alloc(size, alignment).from_sgx_result() } } +/// Usercall `trim`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn trim(region: *const u8, size: usize) -> IoResult<()> { + unsafe { + raw::trim(region, size).from_sgx_result()?; + Ok(()) + } +} + +/// Usercall `remove_trimmed`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn remove_trimmed(region: *const u8, size: usize) -> IoResult<()> { + unsafe { + raw::remove_trimmed(region, size).from_sgx_result()?; + Ok(()) + } +} + #[unstable(feature = "sgx_platform", issue = "56975")] #[doc(inline)] pub use self::raw::free; diff --git a/library/std/src/sys/sgx/alloc.rs b/library/std/src/sys/sgx/alloc.rs index 4aea28cb83e23..9a477f576b074 100644 --- a/library/std/src/sys/sgx/alloc.rs +++ b/library/std/src/sys/sgx/alloc.rs @@ -1,9 +1,15 @@ +use super::ext::arch; use crate::alloc::{GlobalAlloc, Layout, System}; +use crate::cmp; +use crate::fmt::{self, Debug, Formatter}; +use crate::marker::PhantomData; +use crate::mem; use crate::ptr; -use crate::sys::sgx::abi::mem as sgx_mem; +use crate::sys::sgx::abi::{mem as sgx_mem, usercalls}; use core::sync::atomic::{AtomicBool, Ordering}; use super::waitqueue::SpinMutex; +use sgx_isa::{PageType, Secinfo, SecinfoFlags}; // Using a SpinMutex because we never want to exit the enclave waiting for the // allocator. @@ -16,18 +22,32 @@ use super::waitqueue::SpinMutex; static DLMALLOC: SpinMutex> = SpinMutex::new(dlmalloc::Dlmalloc::new_with_allocator(Sgx {})); +/// System interface implementation for SGX platform struct Sgx; +impl Sgx { + const PAGE_SIZE: usize = 0x1000; + + unsafe fn allocator() -> &'static mut SGXv2Allocator { + static mut SGX2_ALLOCATOR: SGXv2Allocator = SGXv2Allocator::new(); + unsafe { &mut SGX2_ALLOCATOR } + } +} + unsafe impl dlmalloc::Allocator for Sgx { /// Allocs system resources - fn alloc(&self, _size: usize) -> (*mut u8, usize, u32) { + fn alloc(&self, size: usize) -> (*mut u8, usize, u32) { static INIT: AtomicBool = AtomicBool::new(false); + if size <= sgx_mem::heap_size() { + // No ordering requirement since this function is protected by the global lock. + if !INIT.swap(true, Ordering::Relaxed) { + return (sgx_mem::heap_base() as _, sgx_mem::heap_size(), 0); + } + } - // No ordering requirement since this function is protected by the global lock. - if !INIT.swap(true, Ordering::Relaxed) { - (sgx_mem::heap_base() as _, sgx_mem::heap_size(), 0) - } else { - (ptr::null_mut(), 0, 0) + match unsafe { Sgx::allocator().alloc(size) } { + Some(base) => (base, size, 0), + None => (ptr::null_mut(), 0, 0), } } @@ -35,16 +55,22 @@ unsafe impl dlmalloc::Allocator for Sgx { ptr::null_mut() } - fn free_part(&self, _ptr: *mut u8, _oldsize: usize, _newsize: usize) -> bool { - false + fn free_part(&self, ptr: *mut u8, oldsize: usize, newsize: usize) -> bool { + assert_eq!(oldsize % Sgx::PAGE_SIZE, 0); + assert_eq!(newsize % Sgx::PAGE_SIZE, 0); + unsafe { Sgx::allocator().free_part(ptr, oldsize, newsize).is_ok() } } - fn free(&self, _ptr: *mut u8, _size: usize) -> bool { - return false; + fn free(&self, ptr: *mut u8, size: usize) -> bool { + if !sgx_mem::is_unmapped_range(ptr, size) { + return false; + } + assert_eq!(size % Sgx::PAGE_SIZE, 0); + unsafe { Sgx::allocator().free(ptr, size).is_ok() } } fn can_release_part(&self, _flags: u32) -> bool { - false + true } fn allocates_zeros(&self) -> bool { @@ -52,7 +78,7 @@ unsafe impl dlmalloc::Allocator for Sgx { } fn page_size(&self) -> usize { - 0x1000 + Sgx::PAGE_SIZE } } @@ -96,3 +122,828 @@ pub unsafe extern "C" fn __rust_c_alloc(size: usize, align: usize) -> *mut u8 { pub unsafe extern "C" fn __rust_c_dealloc(ptr: *mut u8, size: usize, align: usize) { unsafe { crate::alloc::dealloc(ptr, Layout::from_size_align_unchecked(size, align)) } } + +struct SGXv2Allocator(Option); +unsafe impl Send for SGXv2Allocator {} + +impl SGXv2Allocator { + pub const fn new() -> SGXv2Allocator { + SGXv2Allocator(None) + } + + fn allocator(&mut self) -> &mut BuddyAllocator { + if self.0.is_none() { + let region_base = sgx_mem::unmapped_base(); + let region_size = sgx_mem::unmapped_size(); + self.0 = + Some(BuddyAllocator::new(region_base as _, region_size as _, Sgx::PAGE_SIZE).unwrap()); + } + self.0.as_mut().unwrap() + } + + pub unsafe fn alloc(&mut self, size: usize) -> Option<*mut u8> { + self.allocator().alloc::(size).ok() + } + + pub unsafe fn free(&mut self, ptr: *mut u8, size: usize) -> Result<(), Error> { + self.allocator().free::(ptr, size, 0) + } + + pub unsafe fn free_part( + &mut self, + ptr: *mut u8, + old_size: usize, + new_size: usize, + ) -> Result<(), Error> { + self.allocator().free::(ptr, old_size, new_size) + } +} + +struct Sgx2Mapper; + +impl MemoryMapper for Sgx2Mapper { + fn map_region(base: *const u8, size: usize) -> Result<(), Error> { + assert_eq!(size % Sgx::PAGE_SIZE, 0); + let flags = SecinfoFlags::from(PageType::Reg) + | SecinfoFlags::R + | SecinfoFlags::W + | SecinfoFlags::PENDING; + let secinfo = Secinfo::from(flags).into(); + for offset in (0..size as isize).step_by(Sgx::PAGE_SIZE) { + let page = unsafe { base.offset(offset) }; + + // In order to add a new page, the OS needs to issue an `eaug` instruction, after which the enclave + // needs to accept the changes with an `eaccept`. The sgx driver at time of writing only issues an `eaug` + // when a #PF within the enclave occured due to unmapped memory. By issuing an `eaccept` on + // unmapped memory, we force such a #PF. Eventually the `eaccept` instruction will be + // re-executed and succeed. + arch::eaccept(page as _, &secinfo).map_err(|_| Error::MapFailed)?; + } + + Ok(()) + } + + fn unmap_region(base: *const u8, size: usize) -> Result<(), Error> { + fn accept_trim(base: *const u8, size: usize) -> Result<(), Error> { + let flags = SecinfoFlags::from(PageType::Trim) | SecinfoFlags::MODIFIED; + let secinfo = Secinfo::from(flags).into(); + + for offset in (0..size as isize).step_by(Sgx::PAGE_SIZE) { + let page = unsafe { base.offset(offset) }; + arch::eaccept(page as _, &secinfo).map_err(|_| Error::UnmapFailed)?; + } + Ok(()) + } + + assert_eq!(size % Sgx::PAGE_SIZE, 0); + // Signal to OS that pages are no longer used and should be trimmed + usercalls::trim(base, size).map_err(|_| Error::UnmapFailed)?; + // Accept removing of pages + accept_trim(base, size).map_err(|_| Error::UnmapFailed)?; + // Let the OS remove the pages + usercalls::remove_trimmed(base, size).map_err(|_| Error::UnmapFailed)?; + Ok(()) + } + + fn page_size() -> usize { + Sgx::PAGE_SIZE + } +} + +#[derive(Debug, PartialEq, Eq)] +pub enum Error { + AlignmentError, + FreeGrowsAllocation, + SizeNotSupported, + DoubleFree, + MemoryNotManagedByAllocator, + MemorySizeNotPowerOfTwo, + MinBlockSizeLargerThanMemory, + MinBlockSizeTooSmall, + MapFailed, + UnmapFailed, + OutOfMemory, +} + +pub trait MemoryMapper { + fn map_region(base: *const u8, size: usize) -> Result<(), Error>; + + fn unmap_region(base: *const u8, size: usize) -> Result<(), Error>; + + fn page_size() -> usize; +} + +/// A small, simple allocator that can only allocate blocks of a pre-determined, specific size. +#[derive(Debug, PartialEq, Eq)] +pub struct SimpleAllocator { + memory: Region, + free_blocks: *mut u8, + next_uninit_block: *mut u8, + phantom: PhantomData, +} + +impl SimpleAllocator { + pub fn block_size() -> usize { + let t_size = mem::size_of::(); + let p_size = mem::size_of::<*mut u8>(); + cmp::max(t_size, p_size).next_power_of_two() + } + + pub fn new(memory_base: usize, memory_size: usize) -> Result, Error> { + if memory_base % Self::block_size() != 0 { + return Err(Error::AlignmentError); + } + Ok(SimpleAllocator { + memory: Region { addr: memory_base as _, size: memory_size }, + next_uninit_block: memory_base as _, + free_blocks: ptr::null_mut(), + phantom: PhantomData, + }) + } + + pub fn alloc(&mut self, content: T) -> Result<*mut T, Error> { + if (self.memory.addr as usize) % M::page_size() != 0 + || M::page_size() % Self::block_size() != 0 + { + return Err(Error::AlignmentError); + } + + unsafe { + if self.free_blocks.is_null() { + let ptr = self.next_uninit_block as *mut T; + if (ptr as *const u8) < self.memory.end() { + // There are no free memory blocks, but part of the memory region is still + // uninitialized; use a new uninitialized block + if (ptr as usize) % M::page_size() == 0 { + // Request that a new page is mapped in memory + M::map_region(ptr as _, M::page_size())?; + } + self.next_uninit_block = + (self.next_uninit_block as usize + Self::block_size()) as *mut u8; + assert_eq!((ptr as usize) % Self::block_size(), 0); + ptr::write(ptr, content); + Ok(ptr) + } else { + Err(Error::OutOfMemory) + } + } else if self.next_uninit_block < self.memory.end() { + // There are free memory blocks available, recycle one + let new_head: *mut u8 = ptr::read(self.free_blocks as _); + let ptr: *mut T = self.free_blocks as _; + self.free_blocks = new_head; + assert_eq!((ptr as usize) % Self::block_size(), 0); + ptr::write(ptr, content); + Ok(ptr) + } else { + Err(Error::OutOfMemory) + } + } + } + + pub fn free(&mut self, ptr: *mut T) { + unsafe { + ptr::write(ptr as _, self.free_blocks); + self.free_blocks = ptr as _; + } + } +} + +#[derive(PartialEq)] +pub enum Block { + Free, + Allocated, + Partitioned(*mut Block, *mut Block), +} + +impl Debug for Block { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match *self { + Block::Allocated => f.pad("A"), + Block::Free => f.pad("F"), + Block::Partitioned(l, r) => unsafe { + let s = format!("({:?}, {:?})", *l, *r); + f.pad(&s) + }, + } + } +} + +#[derive(Debug)] +pub struct BuddyAllocator { + block: *mut Block, + min_block_size: usize, + memory: Region, + allocator: SimpleAllocator, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Region { + addr: *mut u8, + size: usize, +} + +impl Region { + fn new(addr: *mut u8, size: usize) -> Region { + Region { addr, size } + } + + fn split(&self) -> (Region, Region) { + let left = Region { addr: self.addr, size: self.size / 2 }; + let right = + Region { addr: (left.addr as usize + left.size) as _, size: self.size - left.size }; + (left, right) + } + + fn join(&self, other: &Region) -> Option { + let start0 = cmp::min(self.addr, other.addr); + let start1 = cmp::max(self.addr, other.addr); + let end0 = cmp::min(self.end(), other.end()); + let end1 = cmp::max(self.end(), other.end()); + + if end0 == start1 { + Some(Region { addr: start0, size: end1 as usize - start0 as usize }) + } else { + None + } + } + + fn intersect(&self, other: &Region) -> Option { + let start = crate::cmp::max(self.addr, other.addr); + let end = crate::cmp::min(self.end(), other.end()); + if start < end { + Some(Region { addr: start, size: end as usize - start as usize }) + } else { + None + } + } + + fn subtract(&self, other: &Region) -> Option { + if other.size == 0 { + return Some(self.to_owned()); + } + if self.addr < other.addr { + let start = self.addr; + let end = crate::cmp::min(self.end() as usize, other.addr as usize); + + if start as usize != end { + return Some(Region { addr: start, size: end - start as usize }); + } + } else { + if other.end() < self.end() { + return Some(Region { + addr: other.end(), + size: self.end() as usize - other.end() as usize, + }); + } + } + None + } + + fn end(&self) -> *mut u8 { + (self.addr as usize + self.size) as _ + } + + fn contains(&self, ptr: *mut u8) -> bool { + self.addr <= ptr && ptr < self.end() + } +} + +impl BuddyAllocator { + fn tree_depth(memory_size: usize, min_block_size: usize) -> u32 { + let max_depth = memory_size.next_power_of_two().trailing_zeros(); + let block_depth = min_block_size.next_power_of_two().trailing_zeros(); + + assert!(min_block_size <= memory_size); + max_depth - block_depth + } + + fn max_metadata_entries(memory_size: usize, min_block_size: usize) -> u32 { + let depth = Self::tree_depth(memory_size, min_block_size); + (0x1u32 << (depth + 1)) - 1 + } + + fn max_metadata_size(memory_size: usize, min_block_size: usize) -> usize { + // The algorithm sometimes temporarily uses 1 additional allocation, we need to account for + // that + (Self::max_metadata_entries(memory_size, min_block_size) as usize + 1) + * SimpleAllocator::::block_size() + } + + pub fn new( + memory_base: usize, + memory_size: usize, + min_block_size: usize, + ) -> Result { + if !memory_size.is_power_of_two() { + return Err(Error::MemorySizeNotPowerOfTwo); + } + if !min_block_size.is_power_of_two() { + return Err(Error::MemorySizeNotPowerOfTwo); + } + if memory_size < min_block_size { + return Err(Error::MinBlockSizeLargerThanMemory); + } + if memory_size < Self::max_metadata_size(memory_size, min_block_size) { + return Err(Error::MinBlockSizeTooSmall); + } + + let allocator = SimpleAllocator::new( + memory_base, + Self::max_metadata_size(memory_size, min_block_size).next_power_of_two(), + )?; + let buddy = BuddyAllocator { + block: ptr::null_mut(), + min_block_size, + memory: Region::new(memory_base as _, memory_size), + allocator, + }; + Ok(buddy) + } + + unsafe fn alloc_ex( + &mut self, + memory: Region, + block: *mut Block, + alloc_size: usize, + map_memory: bool, + ) -> Result { + unsafe { + assert!(self.min_block_size <= memory.size); + if memory.size < alloc_size { + return Err(Error::OutOfMemory); + } + + match ptr::read(block) { + Block::Free => { + if 2 * alloc_size <= memory.size && self.min_block_size * 2 <= memory.size { + // Very large free block found, split region recursively + let left = self.allocator.alloc::(Block::Free)?; + let right = self.allocator.alloc::(Block::Free)?; + *block = Block::Partitioned(left, right); + self.alloc_ex::(memory, block, alloc_size, map_memory) + } else { + // Small free block is found. May split it up further to reduce internal fragmentation + if (memory.size - alloc_size) < self.min_block_size + || memory.size < 2 * self.min_block_size + { + // Use entire region + ptr::write(block, Block::Allocated); + if map_memory { + // Don't map metadata in memory. The SimpleAllocator will take care of + // that + M::map_region(memory.addr, memory.size)?; + } + Ok(memory) + } else { + // Split block + let block_left = self.allocator.alloc::(Block::Free)?; + let block_right = self.allocator.alloc::(Block::Free)?; + ptr::write(block, Block::Partitioned(block_left, block_right)); + let (memory_left, memory_right) = memory.split(); + let left_size = memory_left.size; + let alloc_left = + self.alloc_ex::(memory_left, block_left, left_size, map_memory)?; + let alloc_right = self.alloc_ex::( + memory_right, + block_right, + alloc_size - alloc_left.size, + map_memory, + )?; + // `alloc_left` should have received a complete block. `alloc_right` will + // only receive a chunk of the available mememory but as we favor the + // beginning of memory both chunks should be adjacent + Ok(alloc_left + .join(&alloc_right) + .expect("Bug: could not join adjacent regions")) + } + } + } + Block::Partitioned(block_left, block_right) => { + let (memory_left, memory_right) = memory.split(); + if let Ok(left) = + self.alloc_ex::(memory_left, block_left, alloc_size, map_memory) + { + Ok(left) + } else if let Ok(right) = + self.alloc_ex::(memory_right, block_right, alloc_size, map_memory) + { + Ok(right) + } else { + Err(Error::OutOfMemory) + } + } + Block::Allocated => Err(Error::OutOfMemory), + } + } + } + + pub fn alloc(&mut self, size: usize) -> Result<*mut u8, Error> { + if self.min_block_size < M::page_size() { + return Err(Error::MinBlockSizeTooSmall); + } + if self.block.is_null() { + // Reserve space for own book keeping + self.block = self.allocator.alloc::(Block::Free)?; + let metadata = unsafe { + self.alloc_ex::( + self.memory.to_owned(), + self.block, + Self::max_metadata_size(self.memory.size, self.min_block_size), + false, + ) + }; + assert!(metadata.is_ok()); + } + + let region = unsafe { self.alloc_ex::(self.memory.to_owned(), self.block, size, true)? }; + Ok(region.addr) + } + + unsafe fn free_ex( + &mut self, + block: *mut Block, + memory: &Region, + free: &Region, + ) -> Result<(), Error> { + unsafe { + match ptr::read(block) { + Block::Allocated => { + if let Some(_alloc) = memory.subtract(free) { + // Split block into two allocated regions and continue freeing recursively + assert_eq!(_alloc.addr, memory.addr); + let left = self.allocator.alloc::(Block::Allocated)?; + let right = self.allocator.alloc::(Block::Allocated)?; + *block = Block::Partitioned(left, right); + self.free_ex::(block, memory, free) + } else { + // Free entire memory block + ptr::write(block, Block::Free); + if M::page_size() < memory.size { + M::unmap_region(memory.addr, memory.size)?; + } + Ok(()) + } + } + Block::Partitioned(block_left, block_right) => { + let (memory_left, memory_right) = memory.split(); + if let Some(overlap) = memory_right.intersect(free) { + self.free_ex::(block_right, &memory_right, &overlap)?; + } + if let Some(overlap) = memory_left.intersect(free) { + self.free_ex::(block_left, &memory_left, &overlap)?; + } + if ptr::read(block_left) == Block::Free && ptr::read(block_right) == Block::Free + { + self.allocator.free(block_left); + self.allocator.free(block_right); + ptr::write(block, Block::Free); + if M::page_size() == memory.size { + // The left and right parts combined are exactly one page. At a lower + // level, it couldn't be unmapped as there still may be data on that + // page. Now the entire page is free, unmap it. It also isn't possible + // that the block size is larger than a page as the buddy allocator + // always halfs the available memory. If the block now spans two pages, + // it would already have been unmapped on a lower level + M::unmap_region(memory.addr, memory.size)?; + } + } + Ok(()) + } + Block::Free => Err(Error::DoubleFree), + } + } + } + + pub fn free( + &mut self, + ptr: *mut u8, + old_size: usize, + new_size: usize, + ) -> Result<(), Error> { + if !self.memory.contains(ptr) { + return Err(Error::MemoryNotManagedByAllocator); + } + if old_size < new_size { + return Err(Error::FreeGrowsAllocation); + } + assert_eq!(old_size % M::page_size(), 0); + if new_size % M::page_size() != 0 { + return Err(Error::SizeNotSupported); + }; + if new_size % self.min_block_size != 0 { + return Err(Error::SizeNotSupported); + }; + let old_alloc = Region::new(ptr, old_size); + let new_alloc = Region::new(ptr, new_size); + let free = old_alloc.subtract(&new_alloc).ok_or(Error::SizeNotSupported)?; + let memory = self.memory.to_owned(); + unsafe { self.free_ex::(self.block, &memory, &free) } + } +} + +#[cfg(test)] +mod tests { + use crate::{BuddyAllocator, Error, MemoryMapper, Region, SimpleAllocator}; + use std::alloc::GlobalAlloc; + + pub struct Linux; + + impl MemoryMapper for Linux { + fn map_region(base: *const u8, size: usize) { + if base as usize % Self::page_size() != 0 { + panic!("Cannot map a page at {:x?}", base); + } + if size as usize % Self::page_size() != 0 { + panic!("Cannot map a page of {}", size); + } + assert_eq!(size % Self::page_size(), 0); + unsafe { + libc::mprotect(base as _, size, libc::PROT_READ | libc::PROT_WRITE); + } + } + + fn unmap_region(base: *const u8, size: usize) { + assert_eq!(size % Self::page_size(), 0); + unsafe { + libc::mprotect(base as _, size, libc::PROT_NONE); + } + } + + fn page_size() -> usize { + 0x1000 + } + } + + #[test] + fn region_subtract() { + let block0 = Region { addr: 0x10_000 as _, size: 0x1000 }; + let block1 = Region { addr: 0x11_000 as _, size: 0x2000 }; + let block2 = Region { addr: 0x12_000 as _, size: 0x4000 }; + let block3 = Region { addr: 0x13_000 as _, size: 0x2000 }; + let block4 = Region { addr: 0x14_000 as _, size: 0x6000 }; + let block_null0 = Region { addr: 0x10_000 as _, size: 0 }; + let block_null1 = Region { addr: 0x14_800 as _, size: 0 }; + let block_null2 = Region { addr: 0x11_000 as _, size: 0 }; + assert_eq!(block1.subtract(&block0), Some(block1.clone())); + assert_eq!(block1.subtract(&block3), Some(block1.clone())); + assert_eq!(block1.subtract(&block1), None); + assert_eq!(block2.subtract(&block1), Some(Region { addr: 0x13_000 as _, size: 0x3000 })); + assert_eq!(block2.subtract(&block4), Some(Region { addr: 0x12_000 as _, size: 0x2000 })); + assert_eq!(block2.subtract(&block3), Some(Region { addr: 0x12_000 as _, size: 0x1000 })); + assert_eq!(block3.subtract(&block2), None); + assert_eq!(block4.subtract(&block2), Some(Region { addr: 0x16_000 as _, size: 0x4000 })); + assert_eq!(block0.subtract(&block_null0), Some(block0.clone())); + assert_eq!(block0.subtract(&block_null1), Some(block0.clone())); + assert_eq!(block0.subtract(&block_null2), Some(block0.clone())); + } + + #[test] + fn region_join() { + let block0 = Region { addr: 0x10_000 as _, size: 0x1000 }; + let block1 = Region { addr: 0x11_000 as _, size: 0x2000 }; + let block2 = Region { addr: 0x12_000 as _, size: 0x4000 }; + let block_null0 = Region { addr: 0x10_000 as _, size: 0 }; + let block_null2 = Region { addr: 0x11_000 as _, size: 0 }; + let block01 = Region { addr: 0x10_000 as _, size: 0x3000 }; + assert_eq!(block0.join(&block1), Some(block01.clone())); + assert_eq!(block1.join(&block0), Some(block01.clone())); + assert_eq!(block0.join(&block2), None); + assert_eq!(block2.join(&block0), None); + assert_eq!(block_null0.join(&block0), Some(block0.clone())); + assert_eq!(block_null2.join(&block0), Some(block0.clone())); + } + + #[test] + fn region_intersect() { + let block1 = Region { addr: 0x11_000 as _, size: 0x2000 }; + let block2 = Region { addr: 0x12_000 as _, size: 0x4000 }; + let block3 = Region { addr: 0x13_000 as _, size: 0x2000 }; + let block12 = Region { addr: 0x12_000 as _, size: 0x1000 }; + assert_eq!(block1.intersect(&block2), Some(block12.clone())); + assert_eq!(block2.intersect(&block1), Some(block12.clone())); + assert_eq!(block3.intersect(&block2), Some(block3.clone())); + assert_eq!(block2.intersect(&block3), Some(block3.clone())); + assert_eq!(block3.intersect(&block1), None); + assert_eq!(block1.intersect(&block3), None); + assert_eq!(block1.intersect(&block1), Some(block1.clone())); + } + + #[test] + fn tree_depth() { + assert_eq!(BuddyAllocator::tree_depth(1, 1), 0); + assert_eq!(BuddyAllocator::tree_depth(8, 1), 3); + assert_eq!(BuddyAllocator::tree_depth(16, 1), 4); + assert_eq!(BuddyAllocator::tree_depth(16, 2), 3); + assert_eq!(BuddyAllocator::tree_depth(16, 4), 2); + } + + #[test] + fn buddy_alloc() { + unsafe { + let memory_size = 0x10000; + let memory_base = std::alloc::System + .alloc(std::alloc::Layout::from_size_align(memory_size, memory_size).unwrap()); + Linux::unmap_region(memory_base, memory_size); + let mut space = BuddyAllocator::new(memory_base as _, memory_size, 0x1000).unwrap(); + let alloc0 = space.alloc::(0x511); + let alloc1 = space.alloc::(0x511); + assert_eq!(Ok(Region::new((memory_base as usize + 0x1000) as _, 0x1000)), alloc0); + assert_eq!(Ok(Region::new((memory_base as usize + 0x2000) as _, 0x1000)), alloc1); + assert_eq!(Ok(()), space.free::(alloc1.unwrap().addr, 0x1000, 0)); + assert_eq!(Ok(()), space.free::(alloc0.unwrap().addr, 0x1000, 0)); + } + } + + #[test] + fn buddy_alloc2() { + unsafe { + let memory_size = 0x10000; + let memory_base = std::alloc::System + .alloc(std::alloc::Layout::from_size_align(memory_size, memory_size).unwrap()); + Linux::unmap_region(memory_base, memory_size); + let mut space = BuddyAllocator::new(memory_base as _, memory_size, 0x1000).unwrap(); + let r = space.alloc::(0x8000).unwrap(); + assert_eq!(format!("{:?}", *space.block), "((((A, F), F), F), A)"); + assert_eq!(Ok(()), space.free::(r.addr, 0x8000, 0x4000)); + assert_eq!(format!("{:?}", *space.block), "((((A, F), F), F), (A, F))"); + assert_eq!(Ok(()), space.free::(r.addr, 0x4000, 0x1000)); + assert_eq!(format!("{:?}", *space.block), "((((A, F), F), F), (((A, F), F), F))"); + assert_eq!(Ok(()), space.free::(r.addr, 0x1000, 0)); + assert_eq!(format!("{:?}", *space.block), "((((A, F), F), F), F)"); + + let r0 = space.alloc::(0x2000).unwrap(); + assert_eq!(format!("{:?}", *space.block), "((((A, F), A), F), F)"); + + let r1 = space.alloc::(0x8000).unwrap(); + assert_eq!(format!("{:?}", *space.block), "((((A, F), A), F), A)"); + + let mut r2 = space.alloc::(0x4000).unwrap(); + assert_eq!(format!("{:?}", *space.block), "((((A, F), A), A), A)"); + + let r3 = space.alloc::(0x1000).unwrap(); + assert_eq!(format!("{:?}", *space.block), "((((A, A), A), A), A)"); + assert_eq!(space.alloc::(0x1000), Err(Error::OutOfMemory)); + + let new_size = 0x1000; + assert_eq!(Ok(()), space.free::(r2.addr, r2.size, new_size)); + r2.size = new_size; + assert_eq!(format!("{:?}", *space.block), "((((A, A), A), ((A, F), F)), A)"); + + assert!(space.free::(r0.addr, r0.size, 0).is_ok()); + assert_eq!(format!("{:?}", *space.block), "((((A, A), F), ((A, F), F)), A)"); + + assert!(space.free::(r1.addr, r1.size, 0).is_ok()); + assert_eq!(format!("{:?}", *space.block), "((((A, A), F), ((A, F), F)), F)"); + + assert!(space.free::(r2.addr, r2.size, 0).is_ok()); + assert_eq!(format!("{:?}", *space.block), "((((A, A), F), F), F)"); + + assert!(space.free::(r3.addr, r3.size, 0).is_ok()); + assert_eq!(format!("{:?}", *space.block), "((((A, F), F), F), F)"); + } + } + + #[test] + pub fn buddy_alloc_bruteforce() { + fn mark_allocated(base: *mut u8, size: usize) { + for index in 0..size { + let ptr = (base as usize + index) as *mut u8; + unsafe { + assert_eq!(*ptr, 0); + *ptr = 1; + } + } + } + + fn mark_free(base: *mut u8, size: usize) { + for index in 0..size { + let ptr = (base as usize + index) as *mut u8; + unsafe { + assert_eq!(*ptr, 1); + *ptr = 0; + } + } + } + + use rand::Rng; + + let memory_size = 1 * 1024 * 1024; + let memory_base = unsafe { + std::alloc::System.alloc_zeroed( + std::alloc::Layout::from_size_align(memory_size, memory_size).unwrap(), + ) + }; + Linux::unmap_region(memory_base, memory_size); + let mut space = BuddyAllocator::new(memory_base as _, memory_size, 0x1000).unwrap(); + let mut rnd = rand::thread_rng(); + let mut pointers: Vec<(*mut u8, usize)> = Vec::new(); + + for _i in 0..1000 { + if rnd.gen() { + // Allocate + let size = rnd.gen::() % (memory_size / 10); + if let Ok(region) = space.alloc::(size) { + mark_allocated(region.addr, region.size); + pointers.push((region.addr, region.size)); + } + } else { + // Free + if 0 < pointers.len() { + let idx = rnd.gen::() % pointers.len(); + let (ptr, size) = pointers.remove(idx); + mark_free(ptr, size); + assert_eq!(Ok(()), space.free::(ptr, size, 0)); + } + } + } + + while let Some((ptr, size)) = pointers.pop() { + mark_free(ptr, size); + assert_eq!(Ok(()), space.free::(ptr, size, 0)); + } + } + + #[test] + fn simple_alloc() { + unsafe { + let region = std::alloc::System + .alloc(std::alloc::Layout::from_size_align(0x1000, 0x1000).unwrap()); + Linux::unmap_region(region, 0x1000); + let mut allocator = SimpleAllocator::::new(region as _, 0x1000).unwrap(); + let mut ptrs = Vec::new(); + for i in 0..100 { + let ptr = allocator.alloc::(i).unwrap(); + assert!( + (region as *mut u32) <= ptr && ptr < (region as usize + 0x1000) as *mut u32 + ); + ptrs.push(ptr); + } + for ptr in ptrs.iter() { + allocator.free(*ptr); + } + } + } + + #[test] + fn bruteforce_simple_alloc() { + fn mark_allocated(base: *mut u8, size: usize) { + for index in 0..size { + let ptr = (base as usize + index) as *mut u8; + unsafe { + *ptr = 1; + } + } + } + + fn mark_free(base: *mut u8, size: usize) { + for index in 0..size { + let ptr = (base as usize + index) as *mut u8; + unsafe { + assert_eq!(*ptr, 1); + *ptr = 0; + } + } + } + + use rand::Rng; + use std::alloc::GlobalAlloc; + + let memory_size = 20 * 1024 * 1024; + let region = unsafe { + std::alloc::System.alloc_zeroed( + std::alloc::Layout::from_size_align(memory_size, memory_size.next_power_of_two()) + .unwrap(), + ) + }; + + Linux::unmap_region(region, memory_size); + let mut space = SimpleAllocator::::new(region as _, memory_size).unwrap(); + let mut rnd = rand::thread_rng(); + let mut ptrs = Vec::new(); + let num_runs = 10000; + for i in 0..num_runs { + let force_free = (9 * num_runs) / 10 < i; + if rnd.gen::() % 100 < 70 && !force_free { + // alloc + match space.alloc::(0) { + Ok(ptr) => { + ptrs.push(ptr); + assert!(ptr < (region as usize + memory_size) as _); + assert!(region <= ptr as _); + mark_allocated(ptr as _, SimpleAllocator::::block_size()); + } + Err(Error::OutOfMemory) => (), + _ => panic!("Unexpected error"), + } + } else { + // free + if 0 < ptrs.len() { + let idx = rnd.gen::() % ptrs.len(); + let ptr = ptrs.remove(idx); + mark_free(ptr as _, SimpleAllocator::::block_size()); + space.free(ptr); + } + } + } + } +} diff --git a/library/std/src/sys/sgx/ext/arch.rs b/library/std/src/sys/sgx/ext/arch.rs index 730db34e73328..3d4aaa8f81ec1 100644 --- a/library/std/src/sys/sgx/ext/arch.rs +++ b/library/std/src/sys/sgx/ext/arch.rs @@ -5,12 +5,18 @@ #![unstable(feature = "sgx_platform", issue = "56975")] use crate::mem::MaybeUninit; +use core::slice; /// Wrapper struct to force 16-byte alignment. #[repr(align(16))] #[unstable(feature = "sgx_platform", issue = "56975")] pub struct Align16(pub T); +/// Wrapper struct to force 64-byte alignment. +#[repr(align(64))] +#[unstable(feature = "sgx_platform", issue = "56975")] +pub struct Align64(pub T); + /// Wrapper struct to force 128-byte alignment. #[repr(align(128))] #[unstable(feature = "sgx_platform", issue = "56975")] @@ -21,8 +27,20 @@ pub struct Align128(pub T); #[unstable(feature = "sgx_platform", issue = "56975")] pub struct Align512(pub T); +use sgx_isa::Secinfo; +impl From for Align64<[u8; 64]> { + fn from(secinfo: Secinfo) -> Align64<[u8; 64]> { + let mut arr = [0; 64]; + unsafe { + arr.copy_from_slice(slice::from_raw_parts(&secinfo as *const Secinfo as *const _, 64)) + }; + Align64(arr) + } +} + const ENCLU_EREPORT: u32 = 0; const ENCLU_EGETKEY: u32 = 1; +const ENCLU_EACCEPT: u32 = 5; /// Call the `EGETKEY` instruction to obtain a 128-bit secret key. #[unstable(feature = "sgx_platform", issue = "56975")] @@ -71,3 +89,23 @@ pub fn ereport( report.assume_init() } } + +/// Call the `EACCEPT` instruction. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn eaccept(page: u64, secinfo: &Align64<[u8; 64]>) -> Result<(), u32> { + let error: u32; + unsafe { + asm!( + "enclu", + inlateout("rax") ENCLU_EACCEPT => error, + in("rbx") secinfo, + in("rcx") page, + // NOTE(#76738): ATT syntax is used to support LLVM 8 and 9. + options(att_syntax, nostack)); + } + + match error { + 0 => Ok(()), + err => Err(err), + } +}