Skip to content

Commit 664c779

Browse files
committed
Auto merge of #56614 - Zoxc:query-perf2, r=michaelwoerister
Replace LockCell with atomic types Split from #56509 r? @michaelwoerister
2 parents 8c97b6f + 9b47acf commit 664c779

File tree

5 files changed

+99
-178
lines changed

5 files changed

+99
-178
lines changed

src/librustc/session/mod.rs

+28-19
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,10 @@ use util::common::{duration_to_secs_str, ErrorReported};
1616
use util::common::ProfileQueriesMsg;
1717

1818
use rustc_data_structures::base_n;
19-
use rustc_data_structures::sync::{self, Lrc, Lock, LockCell, OneThread, Once, RwLock};
19+
use rustc_data_structures::sync::{
20+
self, Lrc, Lock, OneThread, Once, RwLock, AtomicU64, AtomicUsize, Ordering,
21+
Ordering::SeqCst,
22+
};
2023

2124
use errors::{self, DiagnosticBuilder, DiagnosticId, Applicability};
2225
use errors::emitter::{Emitter, EmitterWriter};
@@ -41,13 +44,19 @@ use std::io::Write;
4144
use std::path::PathBuf;
4245
use std::time::Duration;
4346
use std::sync::mpsc;
44-
use std::sync::atomic::{AtomicUsize, Ordering};
4547

4648
mod code_stats;
4749
pub mod config;
4850
pub mod filesearch;
4951
pub mod search_paths;
5052

53+
pub struct OptimizationFuel {
54+
/// If -zfuel=crate=n is specified, initially set to n. Otherwise 0.
55+
remaining: u64,
56+
/// We're rejecting all further optimizations.
57+
out_of_fuel: bool,
58+
}
59+
5160
/// Represents the data associated with a compilation
5261
/// session for a single crate.
5362
pub struct Session {
@@ -137,16 +146,15 @@ pub struct Session {
137146

138147
/// If -zfuel=crate=n is specified, Some(crate).
139148
optimization_fuel_crate: Option<String>,
140-
/// If -zfuel=crate=n is specified, initially set to n. Otherwise 0.
141-
optimization_fuel_limit: LockCell<u64>,
142-
/// We're rejecting all further optimizations.
143-
out_of_fuel: LockCell<bool>,
149+
150+
/// Tracks fuel info if If -zfuel=crate=n is specified
151+
optimization_fuel: Lock<OptimizationFuel>,
144152

145153
// The next two are public because the driver needs to read them.
146154
/// If -zprint-fuel=crate, Some(crate).
147155
pub print_fuel_crate: Option<String>,
148156
/// Always set to zero and incremented so that we can print fuel expended by a crate.
149-
pub print_fuel: LockCell<u64>,
157+
pub print_fuel: AtomicU64,
150158

151159
/// Loaded up early on in the initialization of this `Session` to avoid
152160
/// false positives about a job server in our environment.
@@ -871,20 +879,20 @@ impl Session {
871879
if let Some(ref c) = self.optimization_fuel_crate {
872880
if c == crate_name {
873881
assert_eq!(self.query_threads(), 1);
874-
let fuel = self.optimization_fuel_limit.get();
875-
ret = fuel != 0;
876-
if fuel == 0 && !self.out_of_fuel.get() {
882+
let mut fuel = self.optimization_fuel.lock();
883+
ret = fuel.remaining != 0;
884+
if fuel.remaining == 0 && !fuel.out_of_fuel {
877885
eprintln!("optimization-fuel-exhausted: {}", msg());
878-
self.out_of_fuel.set(true);
879-
} else if fuel > 0 {
880-
self.optimization_fuel_limit.set(fuel - 1);
886+
fuel.out_of_fuel = true;
887+
} else if fuel.remaining > 0 {
888+
fuel.remaining -= 1;
881889
}
882890
}
883891
}
884892
if let Some(ref c) = self.print_fuel_crate {
885893
if c == crate_name {
886894
assert_eq!(self.query_threads(), 1);
887-
self.print_fuel.set(self.print_fuel.get() + 1);
895+
self.print_fuel.fetch_add(1, SeqCst);
888896
}
889897
}
890898
ret
@@ -1134,10 +1142,12 @@ pub fn build_session_(
11341142
local_crate_source_file.map(|path| file_path_mapping.map_prefix(path).0);
11351143

11361144
let optimization_fuel_crate = sopts.debugging_opts.fuel.as_ref().map(|i| i.0.clone());
1137-
let optimization_fuel_limit =
1138-
LockCell::new(sopts.debugging_opts.fuel.as_ref().map(|i| i.1).unwrap_or(0));
1145+
let optimization_fuel = Lock::new(OptimizationFuel {
1146+
remaining: sopts.debugging_opts.fuel.as_ref().map(|i| i.1).unwrap_or(0),
1147+
out_of_fuel: false,
1148+
});
11391149
let print_fuel_crate = sopts.debugging_opts.print_fuel.clone();
1140-
let print_fuel = LockCell::new(0);
1150+
let print_fuel = AtomicU64::new(0);
11411151

11421152
let working_dir = env::current_dir().unwrap_or_else(|e|
11431153
p_s.span_diagnostic
@@ -1199,10 +1209,9 @@ pub fn build_session_(
11991209
},
12001210
code_stats: Default::default(),
12011211
optimization_fuel_crate,
1202-
optimization_fuel_limit,
1212+
optimization_fuel,
12031213
print_fuel_crate,
12041214
print_fuel,
1205-
out_of_fuel: LockCell::new(false),
12061215
// Note that this is unsafe because it may misinterpret file descriptors
12071216
// on Unix as jobserver file descriptors. We hopefully execute this near
12081217
// the beginning of the process though to ensure we don't get false

src/librustc_data_structures/lib.rs

+1
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#![feature(hash_raw_entry)]
2222
#![feature(stmt_expr_attributes)]
2323
#![feature(core_intrinsics)]
24+
#![feature(integer_atomics)]
2425

2526
#![cfg_attr(unix, feature(libc))]
2627
#![cfg_attr(test, feature(test))]

src/librustc_data_structures/sync.rs

+61-149
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,6 @@
1010
//! It internally uses `parking_lot::RwLock` if cfg!(parallel_queries) is true,
1111
//! `RefCell` otherwise.
1212
//!
13-
//! `LockCell` is a thread safe version of `Cell`, with `set` and `get` operations.
14-
//! It can never deadlock. It uses `Cell` when
15-
//! cfg!(parallel_queries) is false, otherwise it is a `Lock`.
16-
//!
1713
//! `MTLock` is a mutex which disappears if cfg!(parallel_queries) is false.
1814
//!
1915
//! `MTRef` is a immutable reference if cfg!(parallel_queries), and an mutable reference otherwise.
@@ -23,11 +19,7 @@
2319
2420
use std::collections::HashMap;
2521
use std::hash::{Hash, BuildHasher};
26-
use std::cmp::Ordering;
2722
use std::marker::PhantomData;
28-
use std::fmt::Debug;
29-
use std::fmt::Formatter;
30-
use std::fmt;
3123
use std::ops::{Deref, DerefMut};
3224
use owning_ref::{Erased, OwningRef};
3325

@@ -54,6 +46,9 @@ pub fn serial_scope<F, R>(f: F) -> R
5446
f(&SerialScope)
5547
}
5648

49+
pub use std::sync::atomic::Ordering::SeqCst;
50+
pub use std::sync::atomic::Ordering;
51+
5752
cfg_if! {
5853
if #[cfg(not(parallel_queries))] {
5954
pub auto trait Send {}
@@ -69,6 +64,62 @@ cfg_if! {
6964
}
7065
}
7166

67+
use std::ops::Add;
68+
69+
#[derive(Debug)]
70+
pub struct Atomic<T: Copy>(Cell<T>);
71+
72+
impl<T: Copy> Atomic<T> {
73+
pub fn new(v: T) -> Self {
74+
Atomic(Cell::new(v))
75+
}
76+
}
77+
78+
impl<T: Copy + PartialEq> Atomic<T> {
79+
pub fn into_inner(self) -> T {
80+
self.0.into_inner()
81+
}
82+
83+
pub fn load(&self, _: Ordering) -> T {
84+
self.0.get()
85+
}
86+
87+
pub fn store(&self, val: T, _: Ordering) {
88+
self.0.set(val)
89+
}
90+
91+
pub fn swap(&self, val: T, _: Ordering) -> T {
92+
self.0.replace(val)
93+
}
94+
95+
pub fn compare_exchange(&self,
96+
current: T,
97+
new: T,
98+
_: Ordering,
99+
_: Ordering)
100+
-> Result<T, T> {
101+
let read = self.0.get();
102+
if read == current {
103+
self.0.set(new);
104+
Ok(read)
105+
} else {
106+
Err(read)
107+
}
108+
}
109+
}
110+
111+
impl<T: Add<Output=T> + Copy> Atomic<T> {
112+
pub fn fetch_add(&self, val: T, _: Ordering) -> T {
113+
let old = self.0.get();
114+
self.0.set(old + val);
115+
old
116+
}
117+
}
118+
119+
pub type AtomicUsize = Atomic<usize>;
120+
pub type AtomicBool = Atomic<bool>;
121+
pub type AtomicU64 = Atomic<u64>;
122+
72123
pub use self::serial_join as join;
73124
pub use self::serial_scope as scope;
74125

@@ -160,47 +211,6 @@ cfg_if! {
160211
MTLock(self.0.clone())
161212
}
162213
}
163-
164-
pub struct LockCell<T>(Cell<T>);
165-
166-
impl<T> LockCell<T> {
167-
#[inline(always)]
168-
pub fn new(inner: T) -> Self {
169-
LockCell(Cell::new(inner))
170-
}
171-
172-
#[inline(always)]
173-
pub fn into_inner(self) -> T {
174-
self.0.into_inner()
175-
}
176-
177-
#[inline(always)]
178-
pub fn set(&self, new_inner: T) {
179-
self.0.set(new_inner);
180-
}
181-
182-
#[inline(always)]
183-
pub fn get(&self) -> T where T: Copy {
184-
self.0.get()
185-
}
186-
187-
#[inline(always)]
188-
pub fn set_mut(&mut self, new_inner: T) {
189-
self.0.set(new_inner);
190-
}
191-
192-
#[inline(always)]
193-
pub fn get_mut(&mut self) -> T where T: Copy {
194-
self.0.get()
195-
}
196-
}
197-
198-
impl<T> LockCell<Option<T>> {
199-
#[inline(always)]
200-
pub fn take(&self) -> Option<T> {
201-
unsafe { (*self.0.as_ptr()).take() }
202-
}
203-
}
204214
} else {
205215
pub use std::marker::Send as Send;
206216
pub use std::marker::Sync as Sync;
@@ -213,6 +223,8 @@ cfg_if! {
213223
pub use parking_lot::MutexGuard as LockGuard;
214224
pub use parking_lot::MappedMutexGuard as MappedLockGuard;
215225

226+
pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU64};
227+
216228
pub use std::sync::Arc as Lrc;
217229
pub use std::sync::Weak as Weak;
218230

@@ -278,47 +290,6 @@ cfg_if! {
278290
v.erase_send_sync_owner()
279291
}}
280292
}
281-
282-
pub struct LockCell<T>(Lock<T>);
283-
284-
impl<T> LockCell<T> {
285-
#[inline(always)]
286-
pub fn new(inner: T) -> Self {
287-
LockCell(Lock::new(inner))
288-
}
289-
290-
#[inline(always)]
291-
pub fn into_inner(self) -> T {
292-
self.0.into_inner()
293-
}
294-
295-
#[inline(always)]
296-
pub fn set(&self, new_inner: T) {
297-
*self.0.lock() = new_inner;
298-
}
299-
300-
#[inline(always)]
301-
pub fn get(&self) -> T where T: Copy {
302-
*self.0.lock()
303-
}
304-
305-
#[inline(always)]
306-
pub fn set_mut(&mut self, new_inner: T) {
307-
*self.0.get_mut() = new_inner;
308-
}
309-
310-
#[inline(always)]
311-
pub fn get_mut(&mut self) -> T where T: Copy {
312-
*self.0.get_mut()
313-
}
314-
}
315-
316-
impl<T> LockCell<Option<T>> {
317-
#[inline(always)]
318-
pub fn take(&self) -> Option<T> {
319-
self.0.lock().take()
320-
}
321-
}
322293
}
323294
}
324295

@@ -467,65 +438,6 @@ impl<T> Once<T> {
467438
}
468439
}
469440

470-
impl<T: Copy + Debug> Debug for LockCell<T> {
471-
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
472-
f.debug_struct("LockCell")
473-
.field("value", &self.get())
474-
.finish()
475-
}
476-
}
477-
478-
impl<T:Default> Default for LockCell<T> {
479-
/// Creates a `LockCell<T>`, with the `Default` value for T.
480-
#[inline]
481-
fn default() -> LockCell<T> {
482-
LockCell::new(Default::default())
483-
}
484-
}
485-
486-
impl<T:PartialEq + Copy> PartialEq for LockCell<T> {
487-
#[inline]
488-
fn eq(&self, other: &LockCell<T>) -> bool {
489-
self.get() == other.get()
490-
}
491-
}
492-
493-
impl<T:Eq + Copy> Eq for LockCell<T> {}
494-
495-
impl<T:PartialOrd + Copy> PartialOrd for LockCell<T> {
496-
#[inline]
497-
fn partial_cmp(&self, other: &LockCell<T>) -> Option<Ordering> {
498-
self.get().partial_cmp(&other.get())
499-
}
500-
501-
#[inline]
502-
fn lt(&self, other: &LockCell<T>) -> bool {
503-
self.get() < other.get()
504-
}
505-
506-
#[inline]
507-
fn le(&self, other: &LockCell<T>) -> bool {
508-
self.get() <= other.get()
509-
}
510-
511-
#[inline]
512-
fn gt(&self, other: &LockCell<T>) -> bool {
513-
self.get() > other.get()
514-
}
515-
516-
#[inline]
517-
fn ge(&self, other: &LockCell<T>) -> bool {
518-
self.get() >= other.get()
519-
}
520-
}
521-
522-
impl<T:Ord + Copy> Ord for LockCell<T> {
523-
#[inline]
524-
fn cmp(&self, other: &LockCell<T>) -> Ordering {
525-
self.get().cmp(&other.get())
526-
}
527-
}
528-
529441
#[derive(Debug)]
530442
pub struct Lock<T>(InnerLock<T>);
531443

0 commit comments

Comments
 (0)