Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support panicking and make unsupported platforms a nop #5

Closed
wants to merge 12 commits into from
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "stacker"
version = "0.1.3"
version = "0.1.4"
authors = ["Alex Crichton <alex@alexcrichton.com>"]
build = "build.rs"
license = "MIT/Apache-2.0"
Expand Down
8 changes: 4 additions & 4 deletions build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ fn main() {
} else if target.contains("windows") {
cfg.define("WINDOWS", None);
} else {
panic!("\n\nusing currently unsupported target triple with \
stacker: {}\n\n", target);
println!("cargo:rustc-cfg=fallback");
return;
}

if target.starts_with("x86_64") {
Expand All @@ -26,8 +26,8 @@ fn main() {
cfg.file(if msvc {"src/arch/i686.asm"} else {"src/arch/i686.S"});
cfg.define("X86", None);
} else {
panic!("\n\nusing currently unsupported target triple with \
stacker: {}\n\n", target);
println!("cargo:rustc-cfg=fallback");
return;
}

cfg.include("src/arch").compile("libstacker.a");
Expand Down
157 changes: 114 additions & 43 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,34 +21,67 @@
//! // guaranteed to have at least 32K of stack
//! });
//! ```
//!
//! # Platform support
//!
//! Only Windows, MacOS and Linux are supported. Other platforms don't do anything
//! and will overflow your stack.

#![allow(improper_ctypes)]

#[macro_use]
extern crate cfg_if;
extern crate libc;

use std::cell::Cell;
use std::cell::{Cell, RefCell};

#[cfg(fallback)]
mod intern {
// needs to be unsafe to mirror the `extern` fn
// otherwise the callees either get `missing unsafe` errors
// or `unnecessary unsafe` warnings
pub unsafe fn __stacker_stack_pointer() -> usize {
panic!("not supported")
}

pub unsafe fn __stacker_switch_stacks(
_new_stack: usize,
_fnptr: unsafe extern fn(&mut &mut FnMut()),
_dataptr: &mut &mut FnMut(),
) {
panic!("not supported")
}

#[no_mangle]
extern fn __stacker_black_box(_: *const u8) {}
}

extern {
fn __stacker_stack_pointer() -> usize;
fn __stacker_switch_stacks(new_stack: usize,
fnptr: *const u8,
dataptr: *mut u8);
#[cfg(not(fallback))]
mod intern {
extern {
pub fn __stacker_stack_pointer() -> usize;
pub fn __stacker_switch_stacks(
new_stack: usize,
fnptr: unsafe extern fn(&mut &mut FnMut()),
dataptr: &mut &mut FnMut(),
);
}
}

use intern::*;

thread_local! {
static STACK_LIMIT: Cell<usize> = Cell::new(unsafe {
static STACK_LIMIT: Cell<Option<usize>> = Cell::new(unsafe {
guess_os_stack_limit()
})
}

fn get_stack_limit() -> usize {
fn get_stack_limit() -> Option<usize> {
STACK_LIMIT.with(|s| s.get())
}

fn set_stack_limit(l: usize) {
STACK_LIMIT.with(|s| s.set(l))
STACK_LIMIT.with(|s| s.set(Some(l)))
}

/// Grows the call stack if necessary.
Expand All @@ -60,48 +93,82 @@ fn set_stack_limit(l: usize) {
///
/// The closure `f` is guaranteed to run on a stack with at least `red_zone`
/// bytes, and it will be run on the current stack if there's space available.
pub fn maybe_grow<R, F: FnOnce() -> R>(red_zone: usize,
stack_size: usize,
f: F) -> R {
if remaining_stack() >= red_zone {
f()
pub fn maybe_grow<R, F: FnOnce() -> R>(red_zone: usize, stack_size: usize, f: F) -> R {
if let Some(remaining_stack_bytes) = remaining_stack() {
if remaining_stack_bytes >= red_zone {
f()
} else {
grow_the_stack(stack_size, f, remaining_stack_bytes)
}
} else {
grow_the_stack(stack_size, f)
f()
}
}

/// Queries the amount of remaining stack as interpreted by this library.
///
/// This function will return the amount of stack space left which will be used
/// to determine whether a stack switch should be made or not.
pub fn remaining_stack() -> usize {
unsafe {
__stacker_stack_pointer() - get_stack_limit()
}
pub fn remaining_stack() -> Option<usize> {
get_stack_limit().map(|limit| unsafe {
__stacker_stack_pointer() - limit
})
}

#[inline(never)]
fn grow_the_stack<R, F: FnOnce() -> R>(stack_size: usize, f: F) -> R {
fn grow_the_stack<R, F: FnOnce() -> R>(stack_size: usize, f: F, remaining_stack_bytes: usize) -> R {
let mut f = Some(f);
let mut ret = None;
unsafe {
_grow_the_stack(stack_size, &mut || {
ret = Some(f.take().unwrap()());
_grow_the_stack(stack_size, remaining_stack_bytes, &mut || {
let f: F = f.take().unwrap();
ret = Some(std::panic::catch_unwind(std::panic::AssertUnwindSafe(f)));
});
}
ret.unwrap()
match ret.unwrap() {
Ok(ret) => ret,
Err(payload) => std::panic::resume_unwind(payload),
}
}

unsafe fn _grow_the_stack(stack_size: usize, mut f: &mut FnMut()) {
// Align to 16-bytes (see below for why)
let stack_size = (stack_size + 15) / 16 * 16;
#[derive(Default)]
struct StackCache {
/// used to grow the stack exponentially
counter: usize,
/// memorize the largest ever allocated stack frame after popping it
largest: Option<Vec<u8>>,
}

// Allocate some new stack for oureslves
let mut stack = Vec::<u8>::with_capacity(stack_size);
let new_limit = stack.as_ptr() as usize + 32 * 1024;
impl StackCache {
fn allocate(&mut self, stack_size: usize) -> Vec<u8> {
if let Some(largest) = self.largest.take() {
return largest;
}
let pow = 1 << self.counter;
self.counter += 1;
// Align to 16-bytes (see below for why)
let stack_size = (stack_size * pow + 15) / 16 * 16;
Vec::with_capacity(stack_size)
}

// Save off the old stack limits
let old_limit = get_stack_limit();
fn cache(&mut self, v: Vec<u8>) {
if let Some(ref largest) = self.largest {
debug_assert!(largest.capacity() > v.capacity());
} else {
self.largest = Some(v);
}
}
}

thread_local! {
static STACK_CACHE: RefCell<StackCache> = RefCell::default();
}

unsafe fn _grow_the_stack(stack_size: usize, old_limit: usize, mut f: &mut FnMut()) {

// Allocate some new stack for oureslves
let mut stack = STACK_CACHE.with(|sc| sc.borrow_mut().allocate(stack_size));
let new_limit = stack.as_ptr() as usize;

// Prepare stack limits for the stack switch
set_stack_limit(new_limit);
Expand All @@ -116,14 +183,18 @@ unsafe fn _grow_the_stack(stack_size: usize, mut f: &mut FnMut()) {
} else {
0
};
__stacker_switch_stacks(stack.as_mut_ptr() as usize + stack_size - offset,
doit as usize as *const _,
&mut f as *mut &mut FnMut() as *mut u8);
__stacker_switch_stacks(stack.as_mut_ptr() as usize + stack.capacity() - offset,
doit,
&mut f);

// Once we've returned reset bothe stack limits and then return value same
// value the closure returned.
set_stack_limit(old_limit);

// Do not throw away this allocation. We might be on a stack boundary and end up
// pushing and popping stacks repeatedly
STACK_CACHE.with(|v| v.borrow_mut().cache(stack));

unsafe extern fn doit(f: &mut &mut FnMut()) {
f();
}
Expand All @@ -135,7 +206,7 @@ cfg_if! {
//
// https://github.com/adobe/webkit/blob/0441266/Source/WTF/wtf
// /StackBounds.cpp
unsafe fn guess_os_stack_limit() -> usize {
unsafe fn guess_os_stack_limit() -> Option<usize> {
#[cfg(target_pointer_width = "32")]
extern {
#[link_name = "__stacker_get_tib_32"]
Expand All @@ -151,12 +222,12 @@ cfg_if! {
// the struct layout of the 32-bit TIB. It looks like the struct
// layout of the 64-bit TIB is also the same for getting the stack
// limit: http://doxygen.reactos.org/d3/db0/structNT__TIB64.html
*get_tib_address().offset(2)
Some(*get_tib_address().offset(2))
}
} else if #[cfg(target_os = "linux")] {
use std::mem;

unsafe fn guess_os_stack_limit() -> usize {
unsafe fn guess_os_stack_limit() -> Option<usize> {
let mut attr: libc::pthread_attr_t = mem::zeroed();
assert_eq!(libc::pthread_attr_init(&mut attr), 0);
assert_eq!(libc::pthread_getattr_np(libc::pthread_self(),
Expand All @@ -166,18 +237,18 @@ cfg_if! {
assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackaddr,
&mut stacksize), 0);
assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
stackaddr as usize
Some(stackaddr as usize)
}
} else if #[cfg(target_os = "macos")] {
use libc::{c_void, pthread_t, size_t};

unsafe fn guess_os_stack_limit() -> usize {
libc::pthread_get_stackaddr_np(libc::pthread_self()) as usize -
libc::pthread_get_stacksize_np(libc::pthread_self()) as usize
unsafe fn guess_os_stack_limit() -> Option<usize> {
Some(libc::pthread_get_stackaddr_np(libc::pthread_self()) as usize -
libc::pthread_get_stacksize_np(libc::pthread_self()) as usize)
}
} else {
unsafe fn guess_os_stack_limit() -> usize {
panic!("cannot guess the stack limit on this platform");
unsafe fn guess_os_stack_limit() -> Option<usize> {
None
}
}
}
27 changes: 27 additions & 0 deletions tests/panic_handling.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
extern crate stacker;

const RED_ZONE: usize = 100*1024; // 100k
const STACK_PER_RECURSION: usize = 1 * 1024 * 1024; // 1MB

pub fn ensure_sufficient_stack<R, F: FnOnce() -> R + std::panic::UnwindSafe>(
f: F
) -> R {
stacker::maybe_grow(RED_ZONE, STACK_PER_RECURSION, f)
}

#[inline(never)]
fn recurse(n: usize) {
let x = [42u8; 50000];
if n == 0 {
panic!("an inconvenient time");
} else {
ensure_sufficient_stack(|| recurse(n - 1));
}
drop(x);
}

#[test]
#[should_panic]
fn foo() {
recurse(10000);
}
24 changes: 24 additions & 0 deletions tests/simple.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
extern crate stacker;

const RED_ZONE: usize = 100*1024; // 100k
const STACK_PER_RECURSION: usize = 1 * 1024 * 1024; // 1MB

pub fn ensure_sufficient_stack<R, F: FnOnce() -> R + std::panic::UnwindSafe>(
f: F
) -> R {
stacker::maybe_grow(RED_ZONE, STACK_PER_RECURSION, f)
}

#[inline(never)]
fn recurse(n: usize) {
let x = [42u8; 50000];
if n != 0 {
ensure_sufficient_stack(|| recurse(n - 1));
}
drop(x);
}

#[test]
fn foo() {
recurse(10000);
}