Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clean up SGX user memory copies #101150

Merged
merged 1 commit into from
Oct 6, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
207 changes: 85 additions & 122 deletions library/std/src/sys/sgx/abi/usercalls/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
use crate::arch::asm;
use crate::cell::UnsafeCell;
use crate::cmp;
use crate::convert::TryInto;
use crate::intrinsics;
use crate::mem;
use crate::ops::{CoerceUnsized, Deref, DerefMut, Index, IndexMut};
use crate::ptr::{self, NonNull};
Expand Down Expand Up @@ -306,20 +308,35 @@ where
}
}

// Split a memory region ptr..ptr + len into three parts:
// +--------+
// | small0 | Chunk smaller than 8 bytes
// +--------+
// | big | Chunk 8-byte aligned, and size a multiple of 8 bytes
// +--------+
// | small1 | Chunk smaller than 8 bytes
// +--------+
fn region_as_aligned_chunks(ptr: *const u8, len: usize) -> (usize, usize, usize) {
let small0_size = if ptr.is_aligned_to(8) { 0 } else { 8 - ptr.addr() % 8 };
let small1_size = (len - small0_size) % 8;
let big_size = len - small0_size - small1_size;

(small0_size, big_size, small1_size)
/// Divide the slice `(ptr, len)` into three parts, where the middle part is
/// aligned to `u64`.
///
/// The return values `(prefix_len, mid_len, suffix_len)` add back up to `len`.
/// The return values are such that the memory region `(ptr + prefix_len,
/// mid_len)` is the largest possible region where `ptr + prefix_len` is aligned
/// to `u64` and `mid_len` is a multiple of the byte size of `u64`. This means
/// that `prefix_len` and `suffix_len` are guaranteed to be less than the byte
/// size of `u64`, and that `(ptr, prefix_len)` and `(ptr + prefix_len +
/// mid_len, suffix_len)` don't straddle an alignment boundary.
// Standard Rust functions such as `<[u8]>::align_to::<u64>` and
// `<*const u8>::align_offset` aren't _guaranteed_ to compute the largest
// possible middle region, and as such can't be used.
fn u64_align_to_guaranteed(ptr: *const u8, mut len: usize) -> (usize, usize, usize) {
const QWORD_SIZE: usize = mem::size_of::<u64>();

let offset = ptr as usize % QWORD_SIZE;

let prefix_len = if intrinsics::unlikely(offset > 0) { QWORD_SIZE - offset } else { 0 };
Comment on lines +325 to +329
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's somewhat challenging to audit this code for correctness if it's handrolling is_aligned_to and the like. Is there a justification for this being written this way? If there is, it would be good to leave a comment here, so some helpful Rustacean doesn't open a PR to "clean it up".

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added comment

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you.

I feel like this pattern should be a stdlib function, to be honest, given how often I see it, but this is hardly the time or place to start hashing out new public API for core::ptr.


len = match len.checked_sub(prefix_len) {
jethrogb marked this conversation as resolved.
Show resolved Hide resolved
Some(remaining_len) => remaining_len,
None => return (len, 0, 0),
};

let suffix_len = len % QWORD_SIZE;
len -= suffix_len;

(prefix_len, len, suffix_len)
}

unsafe fn copy_quadwords(src: *const u8, dst: *mut u8, len: usize) {
Expand Down Expand Up @@ -352,7 +369,13 @@ unsafe fn copy_quadwords(src: *const u8, dst: *mut u8, len: usize) {
/// - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00615.html
/// - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/processor-mmio-stale-data-vulnerabilities.html#inpage-nav-3-2-2
pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
unsafe fn copy_bytewise_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
/// Like `ptr::copy(src, dst, len)`, except it uses the Intel-recommended
/// instruction sequence for unaligned writes.
unsafe fn write_bytewise_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
if intrinsics::likely(len == 0) {
return;
}

unsafe {
let mut seg_sel: u16 = 0;
for off in 0..len {
Expand Down Expand Up @@ -380,41 +403,15 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
assert!(!src.addr().overflowing_add(len).1);
assert!(!dst.addr().overflowing_add(len).1);

if len < 8 {
// Can't align on 8 byte boundary: copy safely byte per byte
unsafe {
copy_bytewise_to_userspace(src, dst, len);
}
} else if len % 8 == 0 && dst.is_aligned_to(8) {
// Copying 8-byte aligned quadwords: copy quad word per quad word
unsafe {
copy_quadwords(src, dst, len);
}
} else {
// Split copies into three parts:
// +--------+
// | small0 | Chunk smaller than 8 bytes
// +--------+
// | big | Chunk 8-byte aligned, and size a multiple of 8 bytes
// +--------+
// | small1 | Chunk smaller than 8 bytes
// +--------+
let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len);
unsafe {
let (len1, len2, len3) = u64_align_to_guaranteed(dst, len);
let (src1, dst1) = (src, dst);
let (src2, dst2) = (src1.add(len1), dst1.add(len1));
let (src3, dst3) = (src2.add(len2), dst2.add(len2));

unsafe {
// Copy small0
copy_bytewise_to_userspace(src, dst, small0_size);

// Copy big
let big_src = src.add(small0_size);
let big_dst = dst.add(small0_size);
copy_quadwords(big_src, big_dst, big_size);

// Copy small1
let small1_src = src.add(big_size + small0_size);
let small1_dst = dst.add(big_size + small0_size);
copy_bytewise_to_userspace(small1_src, small1_dst, small1_size);
}
write_bytewise_to_userspace(src1, dst1, len1);
copy_quadwords(src2, dst2, len2);
write_bytewise_to_userspace(src3, dst3, len3);
}
}

Expand All @@ -434,87 +431,53 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
/// - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00657.html
/// - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/stale-data-read-from-xapic.html
pub(crate) unsafe fn copy_from_userspace(src: *const u8, dst: *mut u8, len: usize) {
// Copies memory region `src..src + len` to the enclave at `dst`. The source memory region
// is:
// - strictly less than 8 bytes in size and may be
// - located at a misaligned memory location
fn copy_misaligned_chunk_to_enclave(src: *const u8, dst: *mut u8, len: usize) {
let mut tmp_buff = [0u8; 16];
/// Like `ptr::copy(src, dst, len)`, except it uses only u64-aligned reads.
///
/// # Safety
/// The source memory region must not straddle an alignment boundary.
unsafe fn read_misaligned_from_userspace(src: *const u8, dst: *mut u8, len: usize) {
if intrinsics::likely(len == 0) {
return;
}

unsafe {
// Compute an aligned memory region to read from
// +--------+ <-- aligned_src + aligned_len (8B-aligned)
// | pad1 |
// +--------+ <-- src + len (misaligned)
// | |
// | |
// | |
// +--------+ <-- src (misaligned)
// | pad0 |
// +--------+ <-- aligned_src (8B-aligned)
let pad0_size = src as usize % 8;
let aligned_src = src.sub(pad0_size);

let pad1_size = 8 - (src.add(len) as usize % 8);
let aligned_len = pad0_size + len + pad1_size;

debug_assert!(len < 8);
debug_assert_eq!(aligned_src as usize % 8, 0);
debug_assert_eq!(aligned_len % 8, 0);
debug_assert!(aligned_len <= 16);

// Copy the aligned buffer to a temporary buffer
// Note: copying from a slightly different memory location is a bit odd. In this case it
// can't lead to page faults or inadvertent copying from the enclave as we only ensured
// that the `src` pointer is aligned at an 8 byte boundary. As pages are 4096 bytes
// aligned, `aligned_src` must be on the same page as `src`. A similar argument can be made
// for `src + len`
copy_quadwords(aligned_src as _, tmp_buff.as_mut_ptr(), aligned_len);

// Copy the correct parts of the temporary buffer to the destination
ptr::copy(tmp_buff.as_ptr().add(pad0_size), dst, len);
let offset: usize;
let data: u64;
// doing a memory read that's potentially out of bounds for `src`,
// this isn't supported by Rust, so have to use assembly
jethrogb marked this conversation as resolved.
Show resolved Hide resolved
asm!("
movl {src:e}, {offset:e}
andl $7, {offset:e}
andq $-8, {src}
movq ({src}), {dst}
",
src = inout(reg) src => _,
offset = out(reg) offset,
dst = out(reg) data,
options(nostack, att_syntax, readonly, pure)
);
let data = data.to_le_bytes();
ptr::copy_nonoverlapping(data.as_ptr().add(offset), dst, len);
}
}

assert!(!src.is_null());
assert!(!dst.is_null());
assert!(is_user_range(src, len));
assert!(is_enclave_range(dst, len));
assert!(!(src as usize).overflowing_add(len + 8).1);
assert!(!(dst as usize).overflowing_add(len + 8).1);
assert!(len < isize::MAX as usize);
assert!(!(src as usize).overflowing_add(len).1);
assert!(!(dst as usize).overflowing_add(len).1);

if len < 8 {
copy_misaligned_chunk_to_enclave(src, dst, len);
} else if len % 8 == 0 && src as usize % 8 == 0 {
// Copying 8-byte aligned quadwords: copy quad word per quad word
unsafe {
copy_quadwords(src, dst, len);
}
} else {
// Split copies into three parts:
// +--------+
// | small0 | Chunk smaller than 8 bytes
// +--------+
// | big | Chunk 8-byte aligned, and size a multiple of 8 bytes
// +--------+
// | small1 | Chunk smaller than 8 bytes
// +--------+
let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len);
unsafe {
let (len1, len2, len3) = u64_align_to_guaranteed(src, len);
let (src1, dst1) = (src, dst);
let (src2, dst2) = (src1.add(len1), dst1.add(len1));
let (src3, dst3) = (src2.add(len2), dst2.add(len2));

unsafe {
// Copy small0
copy_misaligned_chunk_to_enclave(src, dst, small0_size);

// Copy big
let big_src = src.add(small0_size);
let big_dst = dst.add(small0_size);
copy_quadwords(big_src, big_dst, big_size);

// Copy small1
let small1_src = src.add(big_size + small0_size);
let small1_dst = dst.add(big_size + small0_size);
copy_misaligned_chunk_to_enclave(small1_src, small1_dst, small1_size);
}
read_misaligned_from_userspace(src1, dst1, len1);
copy_quadwords(src2, dst2, len2);
read_misaligned_from_userspace(src3, dst3, len3);
}
}

Expand Down Expand Up @@ -609,9 +572,9 @@ where
/// Copies the value from user memory into enclave memory.
pub fn to_enclave(&self) -> T {
unsafe {
let mut data: T = mem::MaybeUninit::uninit().assume_init();
copy_from_userspace(self.0.get() as _, &mut data as *mut T as _, mem::size_of::<T>());
data
let mut data = mem::MaybeUninit::uninit();
copy_from_userspace(self.0.get() as _, data.as_mut_ptr() as _, mem::size_of::<T>());
data.assume_init()
Comment on lines +575 to +577
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This alone is a necessary improvement.

}
}
}
Expand Down
Loading