Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit e215eda

Browse files
committedMay 12, 2022
Add tests showing weak memory behaviours
1 parent bace17d commit e215eda

File tree

5 files changed

+112
-35
lines changed

5 files changed

+112
-35
lines changed
 

‎tests/run-pass/concurrency/weak_memory.rs ‎tests/run-pass/weak_memory/consistency.rs

+4-35
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
// ignore-windows: Concurrency on Windows is not supported yet.
22
// compile-flags: -Zmiri-ignore-leaks -Zmiri-disable-stacked-borrows
33

4-
// Weak memory emulation tests. All of the following test if
5-
// our weak memory emulation produces any inconsistent execution outcomes
4+
// The following tests check whether our weak memory emulation produces
5+
// any inconsistent execution outcomes
66
//
77
// Due to the random nature of choosing valid stores, it is always
88
// possible that our tests spuriously succeeds: even though our weak
@@ -12,26 +12,16 @@
1212
//
1313
// To mitigate this, each test is ran enough times such that the chance
1414
// of spurious success is very low. These tests never supriously fail.
15-
//
16-
// Note that we can't effectively test whether our weak memory emulation
17-
// can produce *all* consistent execution outcomes. This may be possible
18-
// if Miri's scheduler is sufficiently random and explores all possible
19-
// interleavings of our small test cases after a reasonable number of runs.
20-
// However, since Miri's scheduler is not even pre-emptive, there will
21-
// always be possible interleavings (and possible execution outcomes),
22-
// that can never be observed regardless of how weak memory emulation is
23-
// implemented.
2415

2516
// Test cases and their consistent outcomes are from
2617
// http://svr-pes20-cppmem.cl.cam.ac.uk/cppmem/
2718
// Based on
2819
// M. Batty, S. Owens, S. Sarkar, P. Sewell and T. Weber,
2920
// "Mathematizing C++ concurrency", ACM SIGPLAN Notices, vol. 46, no. 1, pp. 55-66, 2011.
3021
// Available: https://ss265.host.cs.st-andrews.ac.uk/papers/n3132.pdf.
31-
#![feature(atomic_from_mut)]
3222

23+
use std::sync::atomic::AtomicUsize;
3324
use std::sync::atomic::Ordering::*;
34-
use std::sync::atomic::{AtomicU16, AtomicU32, AtomicUsize};
3525
use std::thread::{spawn, yield_now};
3626

3727
#[derive(Copy, Clone)]
@@ -41,7 +31,7 @@ unsafe impl<T> Send for EvilSend<T> {}
4131
unsafe impl<T> Sync for EvilSend<T> {}
4232

4333
// We can't create static items because we need to run each test
44-
// multiple tests
34+
// multiple times
4535
fn static_atomic(val: usize) -> &'static AtomicUsize {
4636
let ret = Box::leak(Box::new(AtomicUsize::new(val)));
4737
// A workaround to put the initialisation value in the store buffer
@@ -190,26 +180,6 @@ fn test_mixed_access() {
190180
assert_eq!(r2, 2);
191181
}
192182

193-
// Strictly speaking, atomic accesses that imperfectly overlap with existing
194-
// atomic objects are UB. Nonetheless we'd like to provide a sane value when
195-
// the access is not racy.
196-
fn test_imperfectly_overlapping_access() {
197-
let mut qword = AtomicU32::new(42);
198-
assert_eq!(qword.load(Relaxed), 42);
199-
qword.store(u32::to_be(0xabbafafa), Relaxed);
200-
201-
let qword_mut = qword.get_mut();
202-
203-
let dwords_mut = unsafe { std::mem::transmute::<&mut u32, &mut [u16; 2]>(qword_mut) };
204-
205-
let (hi_mut, lo_mut) = dwords_mut.split_at_mut(1);
206-
207-
let (hi, lo) = (AtomicU16::from_mut(&mut hi_mut[0]), AtomicU16::from_mut(&mut lo_mut[0]));
208-
209-
assert_eq!(u16::from_be(hi.load(Relaxed)), 0xabba);
210-
assert_eq!(u16::from_be(lo.load(Relaxed)), 0xfafa);
211-
}
212-
213183
// The following two tests are taken from Repairing Sequential Consistency in C/C++11
214184
// by Lahav et al.
215185
// https://plv.mpi-sws.org/scfix/paper.pdf
@@ -236,7 +206,6 @@ fn test_sc_store_buffering() {
236206
}
237207

238208
pub fn main() {
239-
test_imperfectly_overlapping_access();
240209
// TODO: does this make chances of spurious success
241210
// "sufficiently low"? This also takes a long time to run,
242211
// prehaps each function should be its own test case so they
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
// ignore-windows: Concurrency on Windows is not supported yet.
2+
#![feature(atomic_from_mut)]
3+
4+
use std::sync::atomic::Ordering::*;
5+
use std::sync::atomic::{AtomicU16, AtomicU32};
6+
7+
// Strictly speaking, atomic accesses that imperfectly overlap with existing
8+
// atomic objects are UB. Nonetheless we'd like to provide a sane value when
9+
// the access is not racy.
10+
fn test_same_thread() {
11+
let mut qword = AtomicU32::new(42);
12+
assert_eq!(qword.load(Relaxed), 42);
13+
qword.store(u32::to_be(0xabbafafa), Relaxed);
14+
15+
let qword_mut = qword.get_mut();
16+
17+
let dwords_mut = unsafe { std::mem::transmute::<&mut u32, &mut [u16; 2]>(qword_mut) };
18+
19+
let (hi_mut, lo_mut) = dwords_mut.split_at_mut(1);
20+
21+
let (hi, lo) = (AtomicU16::from_mut(&mut hi_mut[0]), AtomicU16::from_mut(&mut lo_mut[0]));
22+
23+
assert_eq!(u16::from_be(hi.load(Relaxed)), 0xabba);
24+
assert_eq!(u16::from_be(lo.load(Relaxed)), 0xfafa);
25+
}
26+
27+
pub fn main() {
28+
test_same_thread();
29+
}

‎tests/run-pass/weak_memory/weak.rs

+77
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
// ignore-windows: Concurrency on Windows is not supported yet.
2+
// compile-flags: -Zmiri-ignore-leaks
3+
4+
// Tests showing weak memory behaviours are exhibited. All tests
5+
// return true when the desired behaviour is seen.
6+
// This is scheduler and pseudo-RNG dependent, so each test is
7+
// run multiple times until one try returns true.
8+
// Spurious failure is possible, if you are really unlucky with
9+
// the RNG.
10+
11+
use std::sync::atomic::AtomicUsize;
12+
use std::sync::atomic::Ordering::*;
13+
use std::thread::spawn;
14+
15+
#[derive(Copy, Clone)]
16+
struct EvilSend<T>(pub T);
17+
18+
unsafe impl<T> Send for EvilSend<T> {}
19+
unsafe impl<T> Sync for EvilSend<T> {}
20+
21+
// We can't create static items because we need to run each test
22+
// multiple times
23+
fn static_atomic(val: usize) -> &'static AtomicUsize {
24+
let ret = Box::leak(Box::new(AtomicUsize::new(val)));
25+
// A workaround to put the initialisation value in the store buffer
26+
ret.store(val, Relaxed);
27+
ret
28+
}
29+
30+
fn relaxed() -> bool {
31+
let x = static_atomic(0);
32+
let j1 = spawn(move || {
33+
x.store(1, Relaxed);
34+
x.store(2, Relaxed);
35+
});
36+
37+
let j2 = spawn(move || x.load(Relaxed));
38+
39+
j1.join().unwrap();
40+
let r2 = j2.join().unwrap();
41+
42+
r2 == 1
43+
}
44+
45+
// https://www.doc.ic.ac.uk/~afd/homepages/papers/pdfs/2017/POPL.pdf Figure 8
46+
fn seq_cst() -> bool {
47+
let x = static_atomic(0);
48+
49+
let j1 = spawn(move || {
50+
x.store(1, Relaxed);
51+
});
52+
53+
let j2 = spawn(move || {
54+
x.store(2, SeqCst);
55+
x.store(3, SeqCst);
56+
});
57+
58+
let j3 = spawn(move || x.load(SeqCst));
59+
60+
j1.join().unwrap();
61+
j2.join().unwrap();
62+
let r3 = j3.join().unwrap();
63+
64+
r3 == 1
65+
}
66+
67+
// Asserts that the function returns true at least once in 100 runs
68+
macro_rules! assert_once {
69+
($f:ident) => {
70+
assert!(std::iter::repeat_with(|| $f()).take(100).any(|x| x));
71+
};
72+
}
73+
74+
pub fn main() {
75+
assert_once!(relaxed);
76+
assert_once!(seq_cst);
77+
}
+2
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
warning: thread support is experimental and incomplete: weak memory effects are not emulated.
2+

0 commit comments

Comments
 (0)
Please sign in to comment.