|
41 | 41 | //! [^2] `MTLockRef` is a typedef. |
42 | 42 |
|
43 | 43 | pub use crate::marker::*; |
44 | | -use parking_lot::Mutex; |
45 | | -use std::any::Any; |
46 | 44 | use std::collections::HashMap; |
47 | 45 | use std::hash::{BuildHasher, Hash}; |
48 | 46 | use std::ops::{Deref, DerefMut}; |
49 | | -use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; |
50 | 47 |
|
51 | 48 | mod lock; |
52 | 49 | pub use lock::{Lock, LockGuard, Mode}; |
53 | 50 |
|
54 | 51 | mod worker_local; |
55 | 52 | pub use worker_local::{Registry, WorkerLocal}; |
56 | 53 |
|
| 54 | +mod parallel; |
| 55 | +#[cfg(parallel_compiler)] |
| 56 | +pub use parallel::scope; |
| 57 | +pub use parallel::{join, par_for_each_in, par_map, parallel_guard}; |
| 58 | + |
57 | 59 | pub use std::sync::atomic::Ordering; |
58 | 60 | pub use std::sync::atomic::Ordering::SeqCst; |
59 | 61 |
|
@@ -107,37 +109,6 @@ mod mode { |
107 | 109 |
|
108 | 110 | pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; |
109 | 111 |
|
110 | | -/// A guard used to hold panics that occur during a parallel section to later by unwound. |
111 | | -/// This is used for the parallel compiler to prevent fatal errors from non-deterministically |
112 | | -/// hiding errors by ensuring that everything in the section has completed executing before |
113 | | -/// continuing with unwinding. It's also used for the non-parallel code to ensure error message |
114 | | -/// output match the parallel compiler for testing purposes. |
115 | | -pub struct ParallelGuard { |
116 | | - panic: Mutex<Option<Box<dyn Any + std::marker::Send + 'static>>>, |
117 | | -} |
118 | | - |
119 | | -impl ParallelGuard { |
120 | | - pub fn run<R>(&self, f: impl FnOnce() -> R) -> Option<R> { |
121 | | - catch_unwind(AssertUnwindSafe(f)) |
122 | | - .map_err(|err| { |
123 | | - *self.panic.lock() = Some(err); |
124 | | - }) |
125 | | - .ok() |
126 | | - } |
127 | | -} |
128 | | - |
129 | | -/// This gives access to a fresh parallel guard in the closure and will unwind any panics |
130 | | -/// caught in it after the closure returns. |
131 | | -#[inline] |
132 | | -pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R { |
133 | | - let guard = ParallelGuard { panic: Mutex::new(None) }; |
134 | | - let ret = f(&guard); |
135 | | - if let Some(panic) = guard.panic.into_inner() { |
136 | | - resume_unwind(panic); |
137 | | - } |
138 | | - ret |
139 | | -} |
140 | | - |
141 | 112 | cfg_if! { |
142 | 113 | if #[cfg(not(parallel_compiler))] { |
143 | 114 | use std::ops::Add; |
@@ -229,44 +200,6 @@ cfg_if! { |
229 | 200 | pub type AtomicU32 = Atomic<u32>; |
230 | 201 | pub type AtomicU64 = Atomic<u64>; |
231 | 202 |
|
232 | | - pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB) |
233 | | - where A: FnOnce() -> RA, |
234 | | - B: FnOnce() -> RB |
235 | | - { |
236 | | - let (a, b) = parallel_guard(|guard| { |
237 | | - let a = guard.run(oper_a); |
238 | | - let b = guard.run(oper_b); |
239 | | - (a, b) |
240 | | - }); |
241 | | - (a.unwrap(), b.unwrap()) |
242 | | - } |
243 | | - |
244 | | - #[macro_export] |
245 | | - macro_rules! parallel { |
246 | | - ($($blocks:block),*) => {{ |
247 | | - $crate::sync::parallel_guard(|guard| { |
248 | | - $(guard.run(|| $blocks);)* |
249 | | - }); |
250 | | - }} |
251 | | - } |
252 | | - |
253 | | - pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) { |
254 | | - parallel_guard(|guard| { |
255 | | - t.into_iter().for_each(|i| { |
256 | | - guard.run(|| for_each(i)); |
257 | | - }); |
258 | | - }) |
259 | | - } |
260 | | - |
261 | | - pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>( |
262 | | - t: T, |
263 | | - mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R, |
264 | | - ) -> C { |
265 | | - parallel_guard(|guard| { |
266 | | - t.into_iter().filter_map(|i| guard.run(|| map(i))).collect() |
267 | | - }) |
268 | | - } |
269 | | - |
270 | 203 | pub use std::rc::Rc as Lrc; |
271 | 204 | pub use std::rc::Weak as Weak; |
272 | 205 | pub use std::cell::Ref as ReadGuard; |
@@ -372,105 +305,6 @@ cfg_if! { |
372 | 305 |
|
373 | 306 | use std::thread; |
374 | 307 |
|
375 | | - #[inline] |
376 | | - pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB) |
377 | | - where |
378 | | - A: FnOnce() -> RA + DynSend, |
379 | | - B: FnOnce() -> RB + DynSend, |
380 | | - { |
381 | | - if mode::is_dyn_thread_safe() { |
382 | | - let oper_a = FromDyn::from(oper_a); |
383 | | - let oper_b = FromDyn::from(oper_b); |
384 | | - let (a, b) = rayon::join(move || FromDyn::from(oper_a.into_inner()()), move || FromDyn::from(oper_b.into_inner()())); |
385 | | - (a.into_inner(), b.into_inner()) |
386 | | - } else { |
387 | | - let (a, b) = parallel_guard(|guard| { |
388 | | - let a = guard.run(oper_a); |
389 | | - let b = guard.run(oper_b); |
390 | | - (a, b) |
391 | | - }); |
392 | | - (a.unwrap(), b.unwrap()) |
393 | | - } |
394 | | - } |
395 | | - |
396 | | - // This function only works when `mode::is_dyn_thread_safe()`. |
397 | | - pub fn scope<'scope, OP, R>(op: OP) -> R |
398 | | - where |
399 | | - OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend, |
400 | | - R: DynSend, |
401 | | - { |
402 | | - let op = FromDyn::from(op); |
403 | | - rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner() |
404 | | - } |
405 | | - |
406 | | - /// Runs a list of blocks in parallel. The first block is executed immediately on |
407 | | - /// the current thread. Use that for the longest running block. |
408 | | - #[macro_export] |
409 | | - macro_rules! parallel { |
410 | | - (impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => { |
411 | | - parallel!(impl $fblock [$block, $($c,)*] [$($rest),*]) |
412 | | - }; |
413 | | - (impl $fblock:block [$($blocks:expr,)*] []) => { |
414 | | - ::rustc_data_structures::sync::scope(|s| { |
415 | | - $(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks); |
416 | | - s.spawn(move |_| block.into_inner()());)* |
417 | | - (|| $fblock)(); |
418 | | - }); |
419 | | - }; |
420 | | - ($fblock:block, $($blocks:block),*) => { |
421 | | - if rustc_data_structures::sync::is_dyn_thread_safe() { |
422 | | - // Reverse the order of the later blocks since Rayon executes them in reverse order |
423 | | - // when using a single thread. This ensures the execution order matches that |
424 | | - // of a single threaded rustc. |
425 | | - parallel!(impl $fblock [] [$($blocks),*]); |
426 | | - } else { |
427 | | - $crate::sync::parallel_guard(|guard| { |
428 | | - guard.run(|| $fblock); |
429 | | - $(guard.run(|| $blocks);)* |
430 | | - }); |
431 | | - } |
432 | | - }; |
433 | | - } |
434 | | - |
435 | | - use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator}; |
436 | | - |
437 | | - pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>( |
438 | | - t: T, |
439 | | - for_each: impl Fn(I) + DynSync + DynSend |
440 | | - ) { |
441 | | - parallel_guard(|guard| { |
442 | | - if mode::is_dyn_thread_safe() { |
443 | | - let for_each = FromDyn::from(for_each); |
444 | | - t.into_par_iter().for_each(|i| { |
445 | | - guard.run(|| for_each(i)); |
446 | | - }); |
447 | | - } else { |
448 | | - t.into_iter().for_each(|i| { |
449 | | - guard.run(|| for_each(i)); |
450 | | - }); |
451 | | - } |
452 | | - }); |
453 | | - } |
454 | | - |
455 | | - pub fn par_map< |
456 | | - I, |
457 | | - T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>, |
458 | | - R: std::marker::Send, |
459 | | - C: FromIterator<R> + FromParallelIterator<R> |
460 | | - >( |
461 | | - t: T, |
462 | | - map: impl Fn(I) -> R + DynSync + DynSend |
463 | | - ) -> C { |
464 | | - parallel_guard(|guard| { |
465 | | - if mode::is_dyn_thread_safe() { |
466 | | - let map = FromDyn::from(map); |
467 | | - t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect() |
468 | | - } else { |
469 | | - t.into_iter().filter_map(|i| guard.run(|| map(i))).collect() |
470 | | - } |
471 | | - }) |
472 | | - } |
473 | | - |
474 | 308 | /// This makes locks panic if they are already held. |
475 | 309 | /// It is only useful when you are running in a single thread |
476 | 310 | const ERROR_CHECKING: bool = false; |
|
0 commit comments