Skip to content

Commit 4fd1e62

Browse files
committedJan 13, 2015
auto merge of #20367 : retep998/rust/master, r=alexcrichton
Also adjusted some of the FFI definitions because apparently they don't use the long pointer prefix. Gives a free performance boost because `SRWLock` is several times faster than `CriticalRegion` on every Windows system tested. Fixes #19962
2 parents e94a9f0 + ee1ca88 commit 4fd1e62

File tree

3 files changed

+49
-83
lines changed

3 files changed

+49
-83
lines changed
 

‎src/libstd/sys/windows/condvar.rs

+8-6
Original file line numberDiff line numberDiff line change
@@ -27,16 +27,18 @@ impl Condvar {
2727

2828
#[inline]
2929
pub unsafe fn wait(&self, mutex: &Mutex) {
30-
let r = ffi::SleepConditionVariableCS(self.inner.get(),
31-
mutex::raw(mutex),
32-
libc::INFINITE);
30+
let r = ffi::SleepConditionVariableSRW(self.inner.get(),
31+
mutex::raw(mutex),
32+
libc::INFINITE,
33+
0);
3334
debug_assert!(r != 0);
3435
}
3536

3637
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
37-
let r = ffi::SleepConditionVariableCS(self.inner.get(),
38-
mutex::raw(mutex),
39-
dur.num_milliseconds() as DWORD);
38+
let r = ffi::SleepConditionVariableSRW(self.inner.get(),
39+
mutex::raw(mutex),
40+
dur.num_milliseconds() as DWORD,
41+
0);
4042
if r == 0 {
4143
const ERROR_TIMEOUT: DWORD = 0x5B4;
4244
debug_assert_eq!(os::errno() as uint, ERROR_TIMEOUT as uint);

‎src/libstd/sys/windows/mutex.rs

+25-47
Original file line numberDiff line numberDiff line change
@@ -8,73 +8,51 @@
88
// option. This file may not be copied, modified, or distributed
99
// except according to those terms.
1010

11-
use prelude::v1::*;
12-
13-
use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
14-
use alloc::{self, heap};
15-
16-
use libc::DWORD;
11+
use marker::Sync;
12+
use cell::UnsafeCell;
1713
use sys::sync as ffi;
1814

19-
const SPIN_COUNT: DWORD = 4000;
15+
pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> }
2016

21-
pub struct Mutex { inner: AtomicUsize }
22-
23-
pub const MUTEX_INIT: Mutex = Mutex { inner: ATOMIC_USIZE_INIT };
17+
pub const MUTEX_INIT: Mutex = Mutex {
18+
inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
19+
};
2420

2521
unsafe impl Sync for Mutex {}
2622

2723
#[inline]
28-
pub unsafe fn raw(m: &Mutex) -> ffi::LPCRITICAL_SECTION {
29-
m.get()
24+
pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK {
25+
m.inner.get()
3026
}
3127

28+
// So you might be asking why we're using SRWLock instead of CriticalSection?
29+
//
30+
// 1. SRWLock is several times faster than CriticalSection according to benchmarks performed on both
31+
// Windows 8 and Windows 7.
32+
//
33+
// 2. CriticalSection allows recursive locking while SRWLock deadlocks. The Unix implementation
34+
// deadlocks so consistency is preferred. See #19962 for more details.
35+
//
36+
// 3. While CriticalSection is fair and SRWLock is not, the current Rust policy is there there are
37+
// no guarantees of fairness.
38+
3239
impl Mutex {
3340
#[inline]
34-
pub unsafe fn new() -> Mutex {
35-
Mutex { inner: AtomicUsize::new(init_lock() as uint) }
36-
}
41+
pub unsafe fn new() -> Mutex { MUTEX_INIT }
3742
#[inline]
3843
pub unsafe fn lock(&self) {
39-
ffi::EnterCriticalSection(self.get())
44+
ffi::AcquireSRWLockExclusive(self.inner.get())
4045
}
4146
#[inline]
4247
pub unsafe fn try_lock(&self) -> bool {
43-
ffi::TryEnterCriticalSection(self.get()) != 0
48+
ffi::TryAcquireSRWLockExclusive(self.inner.get()) != 0
4449
}
4550
#[inline]
4651
pub unsafe fn unlock(&self) {
47-
ffi::LeaveCriticalSection(self.get())
52+
ffi::ReleaseSRWLockExclusive(self.inner.get())
4853
}
54+
#[inline]
4955
pub unsafe fn destroy(&self) {
50-
let lock = self.inner.swap(0, Ordering::SeqCst);
51-
if lock != 0 { free_lock(lock as ffi::LPCRITICAL_SECTION) }
52-
}
53-
54-
unsafe fn get(&self) -> ffi::LPCRITICAL_SECTION {
55-
match self.inner.load(Ordering::SeqCst) {
56-
0 => {}
57-
n => return n as ffi::LPCRITICAL_SECTION
58-
}
59-
let lock = init_lock();
60-
match self.inner.compare_and_swap(0, lock as uint, Ordering::SeqCst) {
61-
0 => return lock as ffi::LPCRITICAL_SECTION,
62-
_ => {}
63-
}
64-
free_lock(lock);
65-
return self.inner.load(Ordering::SeqCst) as ffi::LPCRITICAL_SECTION;
56+
// ...
6657
}
6758
}
68-
69-
unsafe fn init_lock() -> ffi::LPCRITICAL_SECTION {
70-
let block = heap::allocate(ffi::CRITICAL_SECTION_SIZE, 8)
71-
as ffi::LPCRITICAL_SECTION;
72-
if block.is_null() { alloc::oom() }
73-
ffi::InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT);
74-
return block;
75-
}
76-
77-
unsafe fn free_lock(h: ffi::LPCRITICAL_SECTION) {
78-
ffi::DeleteCriticalSection(h);
79-
heap::deallocate(h as *mut _, ffi::CRITICAL_SECTION_SIZE, 8);
80-
}

‎src/libstd/sys/windows/sync.rs

+16-30
Original file line numberDiff line numberDiff line change
@@ -8,17 +8,12 @@
88
// option. This file may not be copied, modified, or distributed
99
// except according to those terms.
1010

11-
use libc::{BOOL, DWORD, c_void, LPVOID};
11+
use libc::{BOOL, DWORD, c_void, LPVOID, c_ulong};
1212
use libc::types::os::arch::extra::BOOLEAN;
1313

14-
pub type LPCRITICAL_SECTION = *mut c_void;
15-
pub type LPCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
16-
pub type LPSRWLOCK = *mut SRWLOCK;
17-
18-
#[cfg(target_arch = "x86")]
19-
pub const CRITICAL_SECTION_SIZE: uint = 24;
20-
#[cfg(target_arch = "x86_64")]
21-
pub const CRITICAL_SECTION_SIZE: uint = 40;
14+
pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
15+
pub type PSRWLOCK = *mut SRWLOCK;
16+
pub type ULONG = c_ulong;
2217

2318
#[repr(C)]
2419
pub struct CONDITION_VARIABLE { pub ptr: LPVOID }
@@ -31,28 +26,19 @@ pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE {
3126
pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: 0 as *mut _ };
3227

3328
extern "system" {
34-
// critical sections
35-
pub fn InitializeCriticalSectionAndSpinCount(
36-
lpCriticalSection: LPCRITICAL_SECTION,
37-
dwSpinCount: DWORD) -> BOOL;
38-
pub fn DeleteCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
39-
pub fn EnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
40-
pub fn LeaveCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
41-
pub fn TryEnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION) -> BOOL;
42-
4329
// condition variables
44-
pub fn SleepConditionVariableCS(ConditionVariable: LPCONDITION_VARIABLE,
45-
CriticalSection: LPCRITICAL_SECTION,
46-
dwMilliseconds: DWORD) -> BOOL;
47-
pub fn WakeConditionVariable(ConditionVariable: LPCONDITION_VARIABLE);
48-
pub fn WakeAllConditionVariable(ConditionVariable: LPCONDITION_VARIABLE);
30+
pub fn SleepConditionVariableSRW(ConditionVariable: PCONDITION_VARIABLE,
31+
SRWLock: PSRWLOCK,
32+
dwMilliseconds: DWORD,
33+
Flags: ULONG) -> BOOL;
34+
pub fn WakeConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
35+
pub fn WakeAllConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
4936

5037
// slim rwlocks
51-
pub fn AcquireSRWLockExclusive(SRWLock: LPSRWLOCK);
52-
pub fn AcquireSRWLockShared(SRWLock: LPSRWLOCK);
53-
pub fn ReleaseSRWLockExclusive(SRWLock: LPSRWLOCK);
54-
pub fn ReleaseSRWLockShared(SRWLock: LPSRWLOCK);
55-
pub fn TryAcquireSRWLockExclusive(SRWLock: LPSRWLOCK) -> BOOLEAN;
56-
pub fn TryAcquireSRWLockShared(SRWLock: LPSRWLOCK) -> BOOLEAN;
38+
pub fn AcquireSRWLockExclusive(SRWLock: PSRWLOCK);
39+
pub fn AcquireSRWLockShared(SRWLock: PSRWLOCK);
40+
pub fn ReleaseSRWLockExclusive(SRWLock: PSRWLOCK);
41+
pub fn ReleaseSRWLockShared(SRWLock: PSRWLOCK);
42+
pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN;
43+
pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN;
5744
}
58-

0 commit comments

Comments
 (0)
Please sign in to comment.