Skip to content

rustc: Allow safe access of shareable static muts #14862

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/libgreen/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -395,6 +395,7 @@ impl SchedPool {
///
/// This will configure the pool according to the `config` parameter, and
/// initially run `main` inside the pool of schedulers.
#[allow(unused_unsafe)] // NOTE: remove after a stage0 snap
pub fn new(config: PoolConfig) -> SchedPool {
static mut POOL_ID: AtomicUint = INIT_ATOMIC_UINT;

Expand Down
17 changes: 17 additions & 0 deletions src/libgreen/stack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,23 @@ impl StackPool {
}
}

#[cfg(not(stage0))]
fn max_cached_stacks() -> uint {
static mut AMT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
match AMT.load(atomics::SeqCst) {
0 => {}
n => return n - 1,
}
let amt = getenv("RUST_MAX_CACHED_STACKS").and_then(|s| from_str(s.as_slice()));
// This default corresponds to 20M of cache per scheduler (at the
// default size).
let amt = amt.unwrap_or(10);
// 0 is our sentinel value, so ensure that we'll never see 0 after
// initialization has run
AMT.store(amt + 1, atomics::SeqCst);
return amt;
}
#[cfg(stage0)]
fn max_cached_stacks() -> uint {
static mut AMT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
match unsafe { AMT.load(atomics::SeqCst) } {
Expand Down
54 changes: 50 additions & 4 deletions src/liblog/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ use std::mem;
use std::os;
use std::rt;
use std::slice;
use std::sync::atomics;
use std::sync::{Once, ONCE_INIT};

use directive::LOG_LEVEL_NAMES;
Expand All @@ -141,7 +142,7 @@ static DEFAULT_LOG_LEVEL: u32 = 1;
/// An unsafe constant that is the maximum logging level of any module
/// specified. This is the first line of defense to determining whether a
/// logging statement should be run.
static mut LOG_LEVEL: u32 = MAX_LOG_LEVEL;
static mut LOG_LEVEL: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;

static mut DIRECTIVES: *const Vec<directive::LogDirective> =
0 as *const Vec<directive::LogDirective>;
Expand Down Expand Up @@ -242,7 +243,29 @@ pub fn log(level: u32, loc: &'static LogLocation, args: &fmt::Arguments) {
/// safely
#[doc(hidden)]
#[inline(always)]
pub fn log_level() -> u32 { unsafe { LOG_LEVEL } }
#[cfg(not(stage0))]
pub fn log_level() -> u32 {
// We need accessing the log level to be as fast as possible, so we want to
// use as relaxed of an ordering as possible. Once the log level has been
// initialized one, it will never change. The default log level also
// indicates that we *must* log everything (or at least attempt to run
// initialization).
//
// For this reason, we do a relaxed load here. It can either read the
// initial value of LOG_LEVEL in which case the more expensive check will be
// run. It could also read the updated value of LOG_LEVEL in which case it
// reads the correct value.
//
// Also note that the log level stored is the real log level plus 1 so the
// static initialization of 0 indicates that "everything must be logged"
LOG_LEVEL.load(atomics::Relaxed) as u32 - 1
}

/// dox
#[doc(hidden)]
#[inline(always)]
#[cfg(stage0)]
pub fn log_level() -> u32 { unsafe { LOG_LEVEL.load(atomics::Relaxed) as u32 - 1 } }

/// Replaces the task-local logger with the specified logger, returning the old
/// logger.
Expand Down Expand Up @@ -282,6 +305,26 @@ pub struct LogLocation {
/// logging. This is the second layer of defense about determining whether a
/// module's log statement should be emitted or not.
#[doc(hidden)]
#[cfg(not(stage0))]
pub fn mod_enabled(level: u32, module: &str) -> bool {
static mut INIT: Once = ONCE_INIT;
INIT.doit(init);

// It's possible for many threads to be in this function, but only one of
// them will peform the global initialization. All of them will need to
// check again to whether they should really be here or not. Hence, despite
// this check being expanded manually in the logging macro, this function
// checks the log level again.
if level > log_level() { return false }

// This assertion should never get tripped unless we're in an at_exit
// handler after logging has been torn down and a logging attempt was made.
assert!(!DIRECTIVES.is_null());

enabled(level, module, unsafe { (*DIRECTIVES).iter() })
}
/// dox
#[cfg(stage0)]
pub fn mod_enabled(level: u32, module: &str) -> bool {
static mut INIT: Once = ONCE_INIT;
unsafe { INIT.doit(init); }
Expand All @@ -291,7 +334,7 @@ pub fn mod_enabled(level: u32, module: &str) -> bool {
// again to whether they should really be here or not. Hence, despite this
// check being expanded manually in the logging macro, this function checks
// the log level again.
if level > unsafe { LOG_LEVEL } { return false }
if level > log_level() { return false }

// This assertion should never get tripped unless we're in an at_exit
// handler after logging has been torn down and a logging attempt was made.
Expand Down Expand Up @@ -320,6 +363,7 @@ fn enabled(level: u32,
///
/// This is not threadsafe at all, so initialization os performed through a
/// `Once` primitive (and this function is called from that primitive).
#[allow(unused_unsafe)] // NOTE: remove after a stage0 snap
fn init() {
let mut directives = match os::getenv("RUST_LOG") {
Some(spec) => directive::parse_logging_spec(spec.as_slice()),
Expand All @@ -340,8 +384,10 @@ fn init() {
};

unsafe {
LOG_LEVEL = max_level;
LOG_LEVEL.store(max_level as uint + 1, atomics::SeqCst);
}

unsafe {
assert!(DIRECTIVES.is_null());
DIRECTIVES = mem::transmute(box directives);

Expand Down
1 change: 1 addition & 0 deletions src/libnative/io/process.rs
Original file line number Diff line number Diff line change
Expand Up @@ -932,6 +932,7 @@ fn waitpid(pid: pid_t, deadline: u64) -> IoResult<rtio::ProcessExit> {
}

#[cfg(unix)]
#[allow(unused_unsafe)] // NOTE: remove after a stage0 snap
fn waitpid(pid: pid_t, deadline: u64) -> IoResult<rtio::ProcessExit> {
use std::cmp;
use std::comm;
Expand Down
32 changes: 32 additions & 0 deletions src/libnative/io/timer_unix.rs
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,24 @@ fn helper(input: libc::c_int, messages: Receiver<Req>, _: ()) {
}

impl Timer {
#[cfg(not(stage0))]
pub fn new() -> IoResult<Timer> {
HELPER.boot(|| {}, helper);

static mut ID: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
let id = ID.fetch_add(1, atomics::Relaxed);
Ok(Timer {
id: id,
inner: Some(box Inner {
cb: None,
interval: 0,
target: 0,
repeat: false,
id: id,
})
})
}
#[cfg(stage0)]
pub fn new() -> IoResult<Timer> {
// See notes above regarding using int return value
// instead of ()
Expand Down Expand Up @@ -233,6 +251,18 @@ impl Timer {
}
}

#[cfg(not(stage0))]
fn inner(&mut self) -> Box<Inner> {
match self.inner.take() {
Some(i) => i,
None => {
let (tx, rx) = channel();
HELPER.send(RemoveTimer(self.id, tx));
rx.recv()
}
}
}
#[cfg(stage0)]
fn inner(&mut self) -> Box<Inner> {
match self.inner.take() {
Some(i) => i,
Expand All @@ -254,6 +284,7 @@ impl rtio::RtioTimer for Timer {
Timer::sleep(msecs);
}

#[allow(unused_unsafe)] // NOTE: remove after a stage0 snap
fn oneshot(&mut self, msecs: u64, cb: Box<rtio::Callback + Send>) {
let now = now();
let mut inner = self.inner();
Expand All @@ -266,6 +297,7 @@ impl rtio::RtioTimer for Timer {
unsafe { HELPER.send(NewTimer(inner)); }
}

#[allow(unused_unsafe)] // NOTE: remove after a stage0 snap
fn period(&mut self, msecs: u64, cb: Box<rtio::Callback + Send>) {
let now = now();
let mut inner = self.inner();
Expand Down
111 changes: 109 additions & 2 deletions src/librustc/middle/effect.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ use middle::def;
use middle::ty;
use middle::typeck::MethodCall;
use util::ppaux;
use util::nodemap::NodeSet;
use euv = middle::expr_use_visitor;
use mc = middle::mem_categorization;

use syntax::ast;
use syntax::ast_util::PostExpansionMethod;
Expand All @@ -40,10 +43,18 @@ fn type_is_unsafe_function(ty: ty::t) -> bool {
struct EffectCheckVisitor<'a> {
tcx: &'a ty::ctxt,

mutably_accessed_statics: &'a mut NodeSet,

/// Whether we're in an unsafe context.
unsafe_context: UnsafeContext,
}

struct FunctionVisitor<'a, 'b>(euv::ExprUseVisitor<'a, 'b, ty::ctxt>);

struct StaticMutChecker<'a> {
mutably_accessed_statics: NodeSet,
}

impl<'a> EffectCheckVisitor<'a> {
fn require_unsafe(&mut self, span: Span, description: &str) {
match self.unsafe_context {
Expand Down Expand Up @@ -142,6 +153,10 @@ impl<'a> Visitor<()> for EffectCheckVisitor<'a> {
}

fn visit_expr(&mut self, expr: &ast::Expr, _:()) {
if self.mutably_accessed_statics.remove(&expr.id) {
self.require_unsafe(expr.span, "mutable use of static")
}

match expr.node {
ast::ExprMethodCall(_, _, _) => {
let method_call = MethodCall::expr(expr.id);
Expand Down Expand Up @@ -185,7 +200,12 @@ impl<'a> Visitor<()> for EffectCheckVisitor<'a> {
ast::ExprPath(..) => {
match ty::resolve_expr(self.tcx, expr) {
def::DefStatic(_, true) => {
self.require_unsafe(expr.span, "use of mutable static")
let ty = ty::node_id_to_type(self.tcx, expr.id);
let contents = ty::type_contents(self.tcx, ty);
if !contents.is_sharable(self.tcx) {
self.require_unsafe(expr.span,
"use of non-Share static mut")
}
}
_ => {}
}
Expand All @@ -197,11 +217,98 @@ impl<'a> Visitor<()> for EffectCheckVisitor<'a> {
}
}

impl<'a, 'b> Visitor<()> for FunctionVisitor<'a, 'b> {
fn visit_fn(&mut self, fk: &visit::FnKind, fd: &ast::FnDecl,
b: &ast::Block, s: Span, _: ast::NodeId, _: ()) {
{
let FunctionVisitor(ref mut inner) = *self;
inner.walk_fn(fd, b);
}
visit::walk_fn(self, fk, fd, b, s, ());
}
}

impl<'a> StaticMutChecker<'a> {
fn is_static_mut(&self, mut cur: &mc::cmt) -> bool {
loop {
match cur.cat {
mc::cat_static_item => {
return match cur.mutbl {
mc::McImmutable => return false,
_ => true
}
}
mc::cat_deref(ref cmt, _, _) |
mc::cat_discr(ref cmt, _) |
mc::cat_downcast(ref cmt) |
mc::cat_interior(ref cmt, _) => cur = cmt,

mc::cat_rvalue(..) |
mc::cat_copied_upvar(..) |
mc::cat_upvar(..) |
mc::cat_local(..) |
mc::cat_arg(..) => return false
}
}
}
}

impl<'a> euv::Delegate for StaticMutChecker<'a> {
fn borrow(&mut self,
borrow_id: ast::NodeId,
_borrow_span: Span,
cmt: mc::cmt,
_loan_region: ty::Region,
bk: ty::BorrowKind,
_loan_cause: euv::LoanCause) {
if !self.is_static_mut(&cmt) {
return
}
match bk {
ty::ImmBorrow => {}
ty::UniqueImmBorrow | ty::MutBorrow => {
self.mutably_accessed_statics.insert(borrow_id);
}
}
}

fn mutate(&mut self,
assignment_id: ast::NodeId,
_assignment_span: Span,
assignee_cmt: mc::cmt,
_mode: euv::MutateMode) {
if !self.is_static_mut(&assignee_cmt) {
return
}
self.mutably_accessed_statics.insert(assignment_id);
}

fn consume(&mut self,
_consume_id: ast::NodeId,
_consume_span: Span,
_cmt: mc::cmt,
_mode: euv::ConsumeMode) {}
fn consume_pat(&mut self,
_consume_pat: &ast::Pat,
_cmt: mc::cmt,
_mode: euv::ConsumeMode) {}
fn decl_without_init(&mut self, _id: ast::NodeId, _span: Span) {}
}

pub fn check_crate(tcx: &ty::ctxt, krate: &ast::Crate) {
let mut delegate = StaticMutChecker {
mutably_accessed_statics: NodeSet::new(),
};
{
let visitor = euv::ExprUseVisitor::new(&mut delegate, tcx);
visit::walk_crate(&mut FunctionVisitor(visitor), krate, ());
}

let mut visitor = EffectCheckVisitor {
tcx: tcx,
unsafe_context: SafeContext,
mutably_accessed_statics: &mut delegate.mutably_accessed_statics,
};

visit::walk_crate(&mut visitor, krate, ());
assert!(visitor.mutably_accessed_statics.len() == 0);
}
16 changes: 16 additions & 0 deletions src/librustrt/bookkeeping.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,18 @@ impl Drop for Token {

/// Increment the number of live tasks, returning a token which will decrement
/// the count when dropped.
#[cfg(stage0)]
pub fn increment() -> Token {
let _ = unsafe { TASK_COUNT.fetch_add(1, atomics::SeqCst) };
Token { _private: () }
}
#[cfg(not(stage0))]
pub fn increment() -> Token {
let _ = TASK_COUNT.fetch_add(1, atomics::SeqCst);
Token { _private: () }
}

#[cfg(stage0)]
pub fn decrement() {
unsafe {
if TASK_COUNT.fetch_sub(1, atomics::SeqCst) == 1 {
Expand All @@ -47,6 +54,15 @@ pub fn decrement() {
}
}
}
#[cfg(not(stage0))]
pub fn decrement() {
if TASK_COUNT.fetch_sub(1, atomics::SeqCst) == 1 {
unsafe {
let guard = TASK_LOCK.lock();
guard.signal();
}
}
}

/// Waits for all other native tasks in the system to exit. This is only used by
/// the entry points of native programs
Expand Down
Loading