diff --git a/.gitmodules b/.gitmodules index dd74d49828f75..ec73db88cfdb6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -47,3 +47,7 @@ [submodule "src/doc/embedded-book"] path = src/doc/embedded-book url = https://github.com/rust-embedded/book.git +[submodule "src/parking_lot"] + path = src/parking_lot + url = https://github.com/faern/parking_lot + diff --git a/Cargo.toml b/Cargo.toml index ccd7e8b7654a6..1ed2110ae7d31 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,6 +28,7 @@ exclude = [ "build", # HACK(eddyb) This hardcodes the fact that our CI uses `/checkout/obj`. "obj", + "src/parking_lot/benchmark", ] # Curiously, LLVM 7.0 will segfault if compiled with opt-level=3 diff --git a/src/ci/docker/dist-various-2/Dockerfile b/src/ci/docker/dist-various-2/Dockerfile index c0f3326524d5c..b9f81f6cd4e83 100644 --- a/src/ci/docker/dist-various-2/Dockerfile +++ b/src/ci/docker/dist-various-2/Dockerfile @@ -32,7 +32,7 @@ RUN /tmp/build-solaris-toolchain.sh sparcv9 sparcv9 solaris-sparc COPY dist-various-2/build-x86_64-fortanix-unknown-sgx-toolchain.sh /tmp/ # We pass the commit id of the port of LLVM's libunwind to the build script. # Any update to the commit id here, should cause the container image to be re-built from this point on. -RUN /tmp/build-x86_64-fortanix-unknown-sgx-toolchain.sh "53b586346f2c7870e20b170decdc30729d97c42b" +RUN /tmp/build-x86_64-fortanix-unknown-sgx-toolchain.sh "a50a70f1394b2e62d6a5d2510330eb110e31dad4" COPY dist-various-2/build-wasi-toolchain.sh /tmp/ RUN /tmp/build-wasi-toolchain.sh diff --git a/src/libstd/Cargo.toml b/src/libstd/Cargo.toml index ac1aff845d8c9..47cf495a22bcf 100644 --- a/src/libstd/Cargo.toml +++ b/src/libstd/Cargo.toml @@ -49,7 +49,7 @@ fortanix-sgx-abi = { version = "0.3.2", features = ['rustc-dep-of-std'] } cc = "1.0" [features] -default = ["compiler_builtins_c", "std_detect_file_io", "std_detect_dlsym_getauxval"] +default = ["compiler_builtins_c", "std_detect_file_io", "std_detect_dlsym_getauxval", "i-am-libstd"] backtrace = ["backtrace-sys"] panic-unwind = ["panic_unwind"] @@ -75,6 +75,9 @@ wasm-bindgen-threads = [] std_detect_file_io = [] std_detect_dlsym_getauxval = [] +# Feature used by parking_lot +i-am-libstd = [] + [package.metadata.fortanix-sgx] # Maximum possible number of threads when testing threads = 125 diff --git a/src/libstd/io/lazy.rs b/src/libstd/io/lazy.rs index e864aa2c864bb..49799e35fd2c8 100644 --- a/src/libstd/io/lazy.rs +++ b/src/libstd/io/lazy.rs @@ -1,12 +1,10 @@ use crate::cell::Cell; use crate::ptr; -use crate::sync::Arc; +use crate::sync::{Arc, RawMutex}; use crate::sys_common; -use crate::sys_common::mutex::Mutex; pub struct Lazy { - // We never call `lock.init()`, so it is UB to attempt to acquire this mutex reentrantly! - lock: Mutex, + lock: RawMutex, ptr: Cell<*mut Arc>, } @@ -18,24 +16,24 @@ unsafe impl Sync for Lazy {} impl Lazy { pub const fn new() -> Lazy { Lazy { - lock: Mutex::new(), + lock: RawMutex::new(), ptr: Cell::new(ptr::null_mut()), } } } impl Lazy { - /// Safety: `init` must not call `get` on the variable that is being - /// initialized. - pub unsafe fn get(&'static self, init: fn() -> Arc) -> Option> { + /// Warning: `init` must not call `get` on the variable that is being + /// initialized. Doing so will cause a deadlock. + pub fn get(&'static self, init: fn() -> Arc) -> Option> { let _guard = self.lock.lock(); let ptr = self.ptr.get(); if ptr.is_null() { - Some(self.init(init)) + Some(unsafe { self.init(init) }) } else if ptr == done() { None } else { - Some((*ptr).clone()) + Some(unsafe { (*ptr).clone() }) } } @@ -53,8 +51,7 @@ impl Lazy { drop(Box::from_raw(ptr)) }); // This could reentrantly call `init` again, which is a problem - // because our `lock` allows reentrancy! - // That's why `get` is unsafe and requires the caller to ensure no reentrancy happens. + // because our `lock` will then deadlock! let ret = init(); if registered.is_ok() { self.ptr.set(Box::into_raw(Box::new(ret.clone()))); diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index 990c0eb8955e4..9bd55012c91d2 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -6,9 +6,10 @@ use crate::cell::RefCell; use crate::fmt; use crate::io::lazy::Lazy; use crate::io::{self, Initializer, BufReader, LineWriter, IoSlice, IoSliceMut}; -use crate::sync::{Arc, Mutex, MutexGuard}; +use crate::sync::Arc; use crate::sys::stdio; -use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard}; +use crate::panic::{UnwindSafe, RefUnwindSafe}; +use crate::parking_lot::{Mutex, MutexGuard, ReentrantMutex, ReentrantMutexGuard}; use crate::thread::LocalKey; thread_local! { @@ -242,9 +243,7 @@ pub struct StdinLock<'a> { pub fn stdin() -> Stdin { static INSTANCE: Lazy>>> = Lazy::new(); return Stdin { - inner: unsafe { - INSTANCE.get(stdin_init).expect("cannot access stdin during shutdown") - }, + inner: INSTANCE.get(stdin_init).expect("cannot access stdin during shutdown"), }; fn stdin_init() -> Arc>>> { @@ -285,7 +284,7 @@ impl Stdin { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn lock(&self) -> StdinLock<'_> { - StdinLock { inner: self.inner.lock().unwrap_or_else(|e| e.into_inner()) } + StdinLock { inner: self.inner.lock() } } /// Locks this handle and reads a line of input into the specified buffer. @@ -466,9 +465,7 @@ pub struct StdoutLock<'a> { pub fn stdout() -> Stdout { static INSTANCE: Lazy>>>> = Lazy::new(); return Stdout { - inner: unsafe { - INSTANCE.get(stdout_init).expect("cannot access stdout during shutdown") - }, + inner: INSTANCE.get(stdout_init).expect("cannot access stdout during shutdown"), }; fn stdout_init() -> Arc>>>> { @@ -504,7 +501,7 @@ impl Stdout { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn lock(&self) -> StdoutLock<'_> { - StdoutLock { inner: self.inner.lock().unwrap_or_else(|e| e.into_inner()) } + StdoutLock { inner: self.inner.lock() } } } @@ -533,6 +530,12 @@ impl Write for Stdout { self.lock().write_fmt(args) } } + +#[stable(feature = "rust1", since = "1.0.0")] +impl UnwindSafe for Stdout {} +#[stable(feature = "rust1", since = "1.0.0")] +impl RefUnwindSafe for Stdout {} + #[stable(feature = "rust1", since = "1.0.0")] impl Write for StdoutLock<'_> { fn write(&mut self, buf: &[u8]) -> io::Result { @@ -553,6 +556,11 @@ impl fmt::Debug for StdoutLock<'_> { } } +#[stable(feature = "rust1", since = "1.0.0")] +impl UnwindSafe for StdoutLock<'_> {} +#[stable(feature = "rust1", since = "1.0.0")] +impl RefUnwindSafe for StdoutLock<'_> {} + /// A handle to the standard error stream of a process. /// /// For more information, see the [`io::stderr`] method. @@ -625,9 +633,7 @@ pub struct StderrLock<'a> { pub fn stderr() -> Stderr { static INSTANCE: Lazy>>> = Lazy::new(); return Stderr { - inner: unsafe { - INSTANCE.get(stderr_init).expect("cannot access stderr during shutdown") - }, + inner: INSTANCE.get(stderr_init).expect("cannot access stderr during shutdown"), }; fn stderr_init() -> Arc>>> { @@ -663,7 +669,7 @@ impl Stderr { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn lock(&self) -> StderrLock<'_> { - StderrLock { inner: self.inner.lock().unwrap_or_else(|e| e.into_inner()) } + StderrLock { inner: self.inner.lock() } } } @@ -692,6 +698,12 @@ impl Write for Stderr { self.lock().write_fmt(args) } } + +#[stable(feature = "rust1", since = "1.0.0")] +impl UnwindSafe for Stderr {} +#[stable(feature = "rust1", since = "1.0.0")] +impl RefUnwindSafe for Stderr {} + #[stable(feature = "rust1", since = "1.0.0")] impl Write for StderrLock<'_> { fn write(&mut self, buf: &[u8]) -> io::Result { @@ -712,6 +724,11 @@ impl fmt::Debug for StderrLock<'_> { } } +#[stable(feature = "rust1", since = "1.0.0")] +impl UnwindSafe for StderrLock<'_> {} +#[stable(feature = "rust1", since = "1.0.0")] +impl RefUnwindSafe for StderrLock<'_> {} + /// Resets the thread-local stderr handle to the specified writer /// /// This will replace the current thread's stderr handle, returning the old @@ -816,7 +833,6 @@ pub use realstd::io::{_eprint, _print}; #[cfg(test)] mod tests { - use crate::panic::{UnwindSafe, RefUnwindSafe}; use crate::thread; use super::*; diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index 215f1bbc971af..fbc23a12a837b 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -495,6 +495,18 @@ pub mod rt; #[cfg(not(test))] mod std_detect; +#[path = "../parking_lot/core/src/lib.rs"] +mod parking_lot_core; + +#[path = "../parking_lot/lock_api/src/lib.rs"] +#[allow(dead_code)] +mod lock_api; + +#[path = "../parking_lot/src/lib.rs"] +#[allow(dead_code)] +mod parking_lot; + + #[doc(hidden)] #[unstable(feature = "stdsimd", issue = "48556")] #[cfg(not(test))] diff --git a/src/libstd/panicking.rs b/src/libstd/panicking.rs index 27b8a110ca71e..d965fb8607433 100644 --- a/src/libstd/panicking.rs +++ b/src/libstd/panicking.rs @@ -12,11 +12,12 @@ use core::panic::{BoxMeUp, PanicInfo, Location}; use crate::any::Any; use crate::fmt; use crate::intrinsics; +use crate::lock_api::RawRwLock as _; use crate::mem; +use crate::parking_lot::RawRwLock; use crate::ptr; use crate::raw; use crate::sys::stdio::panic_output; -use crate::sys_common::rwlock::RWLock; use crate::sys_common::thread_info; use crate::sys_common::util; use crate::thread; @@ -54,7 +55,7 @@ enum Hook { Custom(*mut (dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send)), } -static HOOK_LOCK: RWLock = RWLock::new(); +static HOOK_LOCK: RawRwLock = RawRwLock::INIT; static mut HOOK: Hook = Hook::Default; /// Registers a custom panic hook, replacing any that was previously registered. @@ -97,10 +98,10 @@ pub fn set_hook(hook: Box) + 'static + Sync + Send>) { } unsafe { - HOOK_LOCK.write(); + HOOK_LOCK.lock_exclusive(); let old_hook = HOOK; HOOK = Hook::Custom(Box::into_raw(hook)); - HOOK_LOCK.write_unlock(); + HOOK_LOCK.unlock_exclusive(); if let Hook::Custom(ptr) = old_hook { Box::from_raw(ptr); @@ -142,10 +143,10 @@ pub fn take_hook() -> Box) + 'static + Sync + Send> { } unsafe { - HOOK_LOCK.write(); + HOOK_LOCK.lock_exclusive(); let hook = HOOK; HOOK = Hook::Default; - HOOK_LOCK.write_unlock(); + HOOK_LOCK.unlock_exclusive(); match hook { Hook::Default => Box::new(default_hook), @@ -463,7 +464,7 @@ fn rust_panic_with_hook(payload: &mut dyn BoxMeUp, message, Location::internal_constructor(file, line, col), ); - HOOK_LOCK.read(); + HOOK_LOCK.lock_shared(); match HOOK { // Some platforms know that printing to stderr won't ever actually // print anything, and if that's the case we can skip the default @@ -478,7 +479,7 @@ fn rust_panic_with_hook(payload: &mut dyn BoxMeUp, (*ptr)(&info); } }; - HOOK_LOCK.read_unlock(); + HOOK_LOCK.unlock_shared(); } if panics > 1 { diff --git a/src/libstd/sync/condvar.rs b/src/libstd/sync/condvar.rs index ffb9ce1c81a53..8ffc5256aa54b 100644 --- a/src/libstd/sync/condvar.rs +++ b/src/libstd/sync/condvar.rs @@ -1,8 +1,6 @@ use crate::fmt; -use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::parking_lot; use crate::sync::{mutex, MutexGuard, PoisonError}; -use crate::sys_common::condvar as sys; -use crate::sys_common::mutex as sys_mutex; use crate::sys_common::poison::{self, LockResult}; use crate::time::{Duration, Instant}; @@ -78,10 +76,11 @@ impl WaitTimeoutResult { /// Functions in this module will block the current **thread** of execution and /// are bindings to system-provided condition variables where possible. Note /// that this module places one additional restriction over the system condition -/// variables: each condvar can be used with precisely one mutex at runtime. Any -/// attempt to use multiple mutexes on the same condition variable will result -/// in a runtime panic. If this is not desired, then the unsafe primitives in -/// `sys` do not have this restriction but may result in undefined behavior. +/// variables: each condvar can be used with only one mutex at a time. Any +/// attempt to use multiple mutexes on the same condition variable +/// simultaneously will result in a runtime panic. However it is possible to +/// switch to a different mutex if there are no threads currently waiting on +/// the condition variable. /// /// # Examples /// @@ -110,8 +109,7 @@ impl WaitTimeoutResult { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub struct Condvar { - inner: Box, - mutex: AtomicUsize, + inner: parking_lot::Condvar, } impl Condvar { @@ -127,14 +125,7 @@ impl Condvar { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn new() -> Condvar { - let mut c = Condvar { - inner: box sys::Condvar::new(), - mutex: AtomicUsize::new(0), - }; - unsafe { - c.inner.init(); - } - c + Condvar { inner: parking_lot::Condvar::new() } } /// Blocks the current thread until this condition variable receives a @@ -160,9 +151,10 @@ impl Condvar { /// # Panics /// /// This function will [`panic!`] if it is used with more than one mutex - /// over time. Each condition variable is dynamically bound to exactly one - /// mutex to ensure defined behavior across platforms. If this functionality - /// is not desired, then unsafe primitives in `sys` are provided. + /// at a time. Any attempt to use multiple mutexes on the same condition + /// variable simultaneously will result in a runtime panic. However it is + /// possible to switch to a different mutex if there are no threads + /// currently waiting on the condition variable. /// /// [`notify_one`]: #method.notify_one /// [`notify_all`]: #method.notify_all @@ -196,12 +188,11 @@ impl Condvar { /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>) + pub fn wait<'a, T>(&self, mut guard: MutexGuard<'a, T>) -> LockResult> { - let poisoned = unsafe { - let lock = mutex::guard_lock(&guard); - self.verify(lock); - self.inner.wait(lock); + let poisoned = { + let inner_guard = mutex::guard_lock(&mut guard); + self.inner.wait(inner_guard); mutex::guard_poison(&guard).get() }; if poisoned { @@ -396,14 +387,13 @@ impl Condvar { /// } /// ``` #[stable(feature = "wait_timeout", since = "1.5.0")] - pub fn wait_timeout<'a, T>(&self, guard: MutexGuard<'a, T>, + pub fn wait_timeout<'a, T>(&self, mut guard: MutexGuard<'a, T>, dur: Duration) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> { - let (poisoned, result) = unsafe { - let lock = mutex::guard_lock(&guard); - self.verify(lock); - let success = self.inner.wait_timeout(lock, dur); - (mutex::guard_poison(&guard).get(), WaitTimeoutResult(!success)) + let (poisoned, result) = { + let inner_guard = mutex::guard_lock(&mut guard); + let timed_out = self.inner.wait_for(inner_guard, dur).timed_out(); + (mutex::guard_poison(&guard).get(), WaitTimeoutResult(timed_out)) }; if poisoned { Err(PoisonError::new((guard, result))) @@ -525,7 +515,7 @@ impl Condvar { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn notify_one(&self) { - unsafe { self.inner.notify_one() } + self.inner.notify_one(); } /// Wakes up all blocked threads on this condvar. @@ -565,25 +555,7 @@ impl Condvar { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn notify_all(&self) { - unsafe { self.inner.notify_all() } - } - - fn verify(&self, mutex: &sys_mutex::Mutex) { - let addr = mutex as *const _ as usize; - match self.mutex.compare_and_swap(0, addr, Ordering::SeqCst) { - // If we got out 0, then we have successfully bound the mutex to - // this cvar. - 0 => {} - - // If we get out a value that's the same as `addr`, then someone - // already beat us to the punch. - n if n == addr => {} - - // Anything else and we're using more than one mutex on this cvar, - // which is currently disallowed. - _ => panic!("attempted to use a condition variable with two \ - mutexes"), - } + self.inner.notify_all(); } } @@ -604,9 +576,7 @@ impl Default for Condvar { #[stable(feature = "rust1", since = "1.0.0")] impl Drop for Condvar { - fn drop(&mut self) { - unsafe { self.inner.destroy() } - } + fn drop(&mut self) {} } #[cfg(test)] @@ -811,23 +781,51 @@ mod tests { } #[test] - #[should_panic] #[cfg_attr(target_os = "emscripten", ignore)] fn two_mutexes() { - let m = Arc::new(Mutex::new(())); - let m2 = m.clone(); - let c = Arc::new(Condvar::new()); - let c2 = c.clone(); + let mutex1 = Arc::new(Mutex::new(())); + let mutex2 = Arc::new(Mutex::new(())); + let condvar = Arc::new(Condvar::new()); - let mut g = m.lock().unwrap(); - let _t = thread::spawn(move|| { - let _g = m2.lock().unwrap(); - c2.notify_one(); + wait_and_notify(mutex1, condvar.clone()); + wait_and_notify(mutex2, condvar); + + fn wait_and_notify(mutex: Arc>, condvar: Arc) { + let mutex_alias = mutex.clone(); + let condvar_alias = condvar.clone(); + + let g = mutex.lock().unwrap(); + let _t = thread::spawn(move || { + let _g = mutex_alias.lock().unwrap(); + condvar_alias.notify_one(); + }); + let _g = condvar.wait(g).unwrap(); + } + } + + #[test] + #[should_panic] + #[cfg_attr(target_os = "emscripten", ignore)] + fn two_mutexes_simultaneously() { + let mutex1 = Arc::new(Mutex::new(())); + let mutex1_alias = mutex1.clone(); + let condvar = Arc::new(Condvar::new()); + let condvar_alias = condvar.clone(); + + let (tx, rx) = channel(); + let _t = thread::spawn(move || { + let guard1 = mutex1.lock().unwrap(); + tx.send(()).unwrap(); + let _ = condvar.wait(guard1).unwrap(); }); - g = c.wait(g).unwrap(); - drop(g); + // Wait for second thread to aquire mutex. + rx.recv().unwrap(); + // Wait for second thread to sleep in Condvar::wait + let _g = mutex1_alias.lock().unwrap(); - let m = Mutex::new(()); - let _ = c.wait(m.lock().unwrap()).unwrap(); + // Wait on the same condvar with a second mutex + let mutex2 = Mutex::new(()); + let guard2 = mutex2.lock().unwrap(); + let _ = condvar_alias.wait(guard2).unwrap(); } } diff --git a/src/libstd/sync/mod.rs b/src/libstd/sync/mod.rs index 809ee8826981b..257a955240a0a 100644 --- a/src/libstd/sync/mod.rs +++ b/src/libstd/sync/mod.rs @@ -169,6 +169,9 @@ pub use crate::sys_common::poison::{PoisonError, TryLockError, TryLockResult, Lo #[stable(feature = "rust1", since = "1.0.0")] pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +#[allow(unused_imports)] +pub(crate) use self::mutex::{RawMutex, RawMutexGuard}; + pub mod mpsc; mod barrier; diff --git a/src/libstd/sync/mutex.rs b/src/libstd/sync/mutex.rs index 11ac34fcb24f6..4601ce341a2ba 100644 --- a/src/libstd/sync/mutex.rs +++ b/src/libstd/sync/mutex.rs @@ -1,9 +1,9 @@ -use crate::cell::UnsafeCell; use crate::fmt; +use crate::lock_api::RawMutex as _; use crate::mem; use crate::ops::{Deref, DerefMut}; +use crate::parking_lot; use crate::ptr; -use crate::sys_common::mutex as sys; use crate::sys_common::poison::{self, TryLockError, TryLockResult, LockResult}; /// A mutual exclusion primitive useful for protecting shared data @@ -109,14 +109,8 @@ use crate::sys_common::poison::{self, TryLockError, TryLockResult, LockResult}; /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub struct Mutex { - // Note that this mutex is in a *box*, not inlined into the struct itself. - // Once a native mutex has been used once, its address can never change (it - // can't be moved). This mutex type can be safely moved at any time, so to - // ensure that the native mutex is used correctly we box the inner mutex to - // give it a constant address. - inner: Box, poison: poison::Flag, - data: UnsafeCell, + inner: parking_lot::Mutex, } // these are the only places where `T: Send` matters; all other @@ -147,6 +141,7 @@ pub struct MutexGuard<'a, T: ?Sized + 'a> { // disregard field privacy). __lock: &'a Mutex, __poison: poison::Guard, + __inner: parking_lot::MutexGuard<'a, T>, } #[stable(feature = "rust1", since = "1.0.0")] @@ -166,15 +161,10 @@ impl Mutex { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn new(t: T) -> Mutex { - let mut m = Mutex { - inner: box sys::Mutex::new(), + Mutex { poison: poison::Flag::new(), - data: UnsafeCell::new(t), - }; - unsafe { - m.inner.init(); + inner: parking_lot::Mutex::new(t), } - m } } @@ -216,10 +206,8 @@ impl Mutex { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn lock(&self) -> LockResult> { - unsafe { - self.inner.raw_lock(); - MutexGuard::new(self) - } + let inner_guard = self.inner.lock(); + unsafe { MutexGuard::new(self, inner_guard) } } /// Attempts to acquire this lock. @@ -259,12 +247,10 @@ impl Mutex { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn try_lock(&self) -> TryLockResult> { - unsafe { - if self.inner.try_lock() { - Ok(MutexGuard::new(self)?) - } else { - Err(TryLockError::WouldBlock) - } + if let Some(inner_guard) = self.inner.try_lock() { + Ok(unsafe { MutexGuard::new(self, inner_guard)? }) + } else { + Err(TryLockError::WouldBlock) } } @@ -319,16 +305,14 @@ impl Mutex { // but because `Mutex` impl-s `Drop`, we can't move out of it, so // we'll have to destructure it manually instead. unsafe { - // Like `let Mutex { inner, poison, data } = self`. - let (inner, poison, data) = { - let Mutex { ref inner, ref poison, ref data } = self; - (ptr::read(inner), ptr::read(poison), ptr::read(data)) + // Like `let Mutex { inner, poison } = self`. + let (poison, inner) = { + let Mutex { ref poison, ref inner } = self; + (ptr::read(poison), ptr::read(inner)) }; mem::forget(self); - inner.destroy(); // Keep in sync with the `Drop` impl. - drop(inner); - poison::map_result(poison.borrow(), |_| data.into_inner()) + poison::map_result(poison.borrow(), |_| inner.into_inner()) } } @@ -355,21 +339,14 @@ impl Mutex { pub fn get_mut(&mut self) -> LockResult<&mut T> { // We know statically that there are no other references to `self`, so // there's no need to lock the inner mutex. - let data = unsafe { &mut *self.data.get() }; + let data = self.inner.get_mut(); poison::map_result(self.poison.borrow(), |_| data ) } } #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<#[may_dangle] T: ?Sized> Drop for Mutex { - fn drop(&mut self) { - // This is actually safe b/c we know that there is no further usage of - // this mutex (it's up to the user to arrange for a mutex to get - // dropped, that's not our job) - // - // IMPORTANT: This code must be kept in sync with `Mutex::into_inner`. - unsafe { self.inner.destroy() } - } + fn drop(&mut self) {} } #[stable(feature = "mutex_from", since = "1.24.0")] @@ -412,11 +389,13 @@ impl fmt::Debug for Mutex { } impl<'mutex, T: ?Sized> MutexGuard<'mutex, T> { - unsafe fn new(lock: &'mutex Mutex) -> LockResult> { + unsafe fn new(lock: &'mutex Mutex, inner: parking_lot::MutexGuard<'mutex, T>) + -> LockResult> { poison::map_result(lock.poison.borrow(), |guard| { MutexGuard { __lock: lock, __poison: guard, + __inner: inner, } }) } @@ -427,14 +406,14 @@ impl Deref for MutexGuard<'_, T> { type Target = T; fn deref(&self) -> &T { - unsafe { &*self.__lock.data.get() } + self.__inner.deref() } } #[stable(feature = "rust1", since = "1.0.0")] impl DerefMut for MutexGuard<'_, T> { fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.__lock.data.get() } + self.__inner.deref_mut() } } @@ -442,10 +421,7 @@ impl DerefMut for MutexGuard<'_, T> { impl Drop for MutexGuard<'_, T> { #[inline] fn drop(&mut self) { - unsafe { - self.__lock.poison.done(&self.__poison); - self.__lock.inner.raw_unlock(); - } + self.__lock.poison.done(&self.__poison); } } @@ -463,14 +439,53 @@ impl fmt::Display for MutexGuard<'_, T> { } } -pub fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex { - &guard.__lock.inner +pub fn guard_lock<'a, 'b, T: ?Sized>(guard: &'b mut MutexGuard<'a, T>) + -> &'b mut parking_lot::MutexGuard<'a, T> +{ + &mut guard.__inner } pub fn guard_poison<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a poison::Flag { &guard.__lock.poison } + +/// Just a thin wrapper on top of `parking_lot::RawMutex`, but with guards. +/// So it's easier to use and harder to forget unlocking compared to +/// `parking_lot::RawMutex`. +/// +/// This type is not exposed from the standard library, only used inside it. +/// +/// This type exist because `parking_lot::Mutex` can't have a `const fn` +/// constructor, since it has trait bounds. When that becomes possible this type +/// can be removed and `Mutex<()>` can be used instead. +pub struct RawMutex(parking_lot::RawMutex); + +unsafe impl Sync for RawMutex {} + +impl RawMutex { + /// Creates a new mutex for use. + pub const fn new() -> Self { Self(parking_lot::RawMutex::INIT) } + + /// Locks the mutex and returns a RAII guard that will unlock it again on drop. + #[inline] + pub fn lock(&self) -> RawMutexGuard<'_> { + self.0.lock(); + RawMutexGuard(&self.0) + } +} + +#[must_use] +/// A simple RAII utility for the above Mutex without the poisoning semantics. +pub struct RawMutexGuard<'a>(&'a parking_lot::RawMutex); + +impl Drop for RawMutexGuard<'_> { + #[inline] + fn drop(&mut self) { + self.0.unlock(); + } +} + #[cfg(all(test, not(target_os = "emscripten")))] mod tests { use crate::sync::mpsc::channel; diff --git a/src/libstd/sync/once.rs b/src/libstd/sync/once.rs index 0c91249402417..9d45ffa50fbc1 100644 --- a/src/libstd/sync/once.rs +++ b/src/libstd/sync/once.rs @@ -10,53 +10,13 @@ // // As a result, we end up implementing it ourselves in the standard library. // This also gives us the opportunity to optimize the implementation a bit which -// should help the fast path on call sites. Consequently, let's explain how this -// primitive works now! +// should help the fast path on call sites. // -// So to recap, the guarantees of a Once are that it will call the -// initialization closure at most once, and it will never return until the one -// that's running has finished running. This means that we need some form of -// blocking here while the custom callback is running at the very least. -// Additionally, we add on the restriction of **poisoning**. Whenever an -// initialization closure panics, the Once enters a "poisoned" state which means -// that all future calls will immediately panic as well. -// -// So to implement this, one might first reach for a `Mutex`, but those cannot -// be put into a `static`. It also gets a lot harder with poisoning to figure -// out when the mutex needs to be deallocated because it's not after the closure -// finishes, but after the first successful closure finishes. -// -// All in all, this is instead implemented with atomics and lock-free -// operations! Whee! Each `Once` has one word of atomic state, and this state is -// CAS'd on to determine what to do. There are four possible state of a `Once`: -// -// * Incomplete - no initialization has run yet, and no thread is currently -// using the Once. -// * Poisoned - some thread has previously attempted to initialize the Once, but -// it panicked, so the Once is now poisoned. There are no other -// threads currently accessing this Once. -// * Running - some thread is currently attempting to run initialization. It may -// succeed, so all future threads need to wait for it to finish. -// Note that this state is accompanied with a payload, described -// below. -// * Complete - initialization has completed and all future calls should finish -// immediately. -// -// With 4 states we need 2 bits to encode this, and we use the remaining bits -// in the word we have allocated as a queue of threads waiting for the thread -// responsible for entering the RUNNING state. This queue is just a linked list -// of Waiter nodes which is monotonically increasing in size. Each node is -// allocated on the stack, and whenever the running closure finishes it will -// consume the entire queue and notify all waiters they should try again. -// -// You'll find a few more details in the implementation, but that's the gist of -// it! +// This primitive is now just a wrapper around `parking_lot::Once`. Which is faster, +// uses less memory and has fewer constraints than the implementation that was +// here before. -use crate::fmt; -use crate::marker; -use crate::ptr; -use crate::sync::atomic::{AtomicUsize, AtomicBool, Ordering}; -use crate::thread::{self, Thread}; +use crate::{fmt, parking_lot}; /// A synchronization primitive which can be used to run a one-time global /// initialization. Useful for one-time initialization for FFI or related @@ -79,19 +39,9 @@ use crate::thread::{self, Thread}; /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub struct Once { - // This `state` word is actually an encoded version of just a pointer to a - // `Waiter`, so we add the `PhantomData` appropriately. - state: AtomicUsize, - _marker: marker::PhantomData<*mut Waiter>, + inner: parking_lot::Once, } -// The `PhantomData` of a raw pointer removes these two auto traits, but we -// enforce both below in the implementation so this should be safe to add. -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl Sync for Once {} -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl Send for Once {} - /// State yielded to [`call_once_force`]’s closure parameter. The state can be /// used to query the poison status of the [`Once`]. /// @@ -117,38 +67,12 @@ pub struct OnceState { #[stable(feature = "rust1", since = "1.0.0")] pub const ONCE_INIT: Once = Once::new(); -// Four states that a Once can be in, encoded into the lower bits of `state` in -// the Once structure. -const INCOMPLETE: usize = 0x0; -const POISONED: usize = 0x1; -const RUNNING: usize = 0x2; -const COMPLETE: usize = 0x3; - -// Mask to learn about the state. All other bits are the queue of waiters if -// this is in the RUNNING state. -const STATE_MASK: usize = 0x3; - -// Representation of a node in the linked list of waiters in the RUNNING state. -struct Waiter { - thread: Option, - signaled: AtomicBool, - next: *mut Waiter, -} - -// Helper struct used to clean up after a closure call with a `Drop` -// implementation to also run on panic. -struct Finish<'a> { - panicked: bool, - me: &'a Once, -} - impl Once { /// Creates a new `Once` value. #[stable(feature = "once_new", since = "1.2.0")] pub const fn new() -> Once { Once { - state: AtomicUsize::new(INCOMPLETE), - _marker: marker::PhantomData, + inner: parking_lot::Once::new(), } } @@ -167,8 +91,7 @@ impl Once { /// return). /// /// If the given closure recursively invokes `call_once` on the same `Once` - /// instance the exact behavior is not specified, allowed outcomes are - /// a panic or a deadlock. + /// instance, it will deadlock. /// /// # Examples /// @@ -210,14 +133,11 @@ impl Once { /// /// [poison]: struct.Mutex.html#poisoning #[stable(feature = "rust1", since = "1.0.0")] - pub fn call_once(&self, f: F) where F: FnOnce() { - // Fast path check - if self.is_completed() { - return; - } - - let mut f = Some(f); - self.call_inner(false, &mut |_| f.take().unwrap()()); + pub fn call_once(&self, f: F) + where + F: FnOnce(), + { + self.inner.call_once(f); } /// Performs the same function as [`call_once`] except ignores poisoning. @@ -267,16 +187,16 @@ impl Once { /// INIT.call_once(|| {}); /// ``` #[unstable(feature = "once_poison", issue = "33577")] - pub fn call_once_force(&self, f: F) where F: FnOnce(&OnceState) { - // Fast path check - if self.is_completed() { - return; - } - - let mut f = Some(f); - self.call_inner(true, &mut |p| { - f.take().unwrap()(&OnceState { poisoned: p }) - }); + pub fn call_once_force(&self, f: F) + where + F: FnOnce(&OnceState), + { + self.inner + .call_once_force(move |state: parking_lot::OnceState| { + f(&OnceState { + poisoned: state.poisoned(), + }); + }); } /// Returns `true` if some `call_once` call has completed @@ -322,110 +242,7 @@ impl Once { #[unstable(feature = "once_is_completed", issue = "54890")] #[inline] pub fn is_completed(&self) -> bool { - // An `Acquire` load is enough because that makes all the initialization - // operations visible to us, and, this being a fast path, weaker - // ordering helps with performance. This `Acquire` synchronizes with - // `SeqCst` operations on the slow path. - self.state.load(Ordering::Acquire) == COMPLETE - } - - // This is a non-generic function to reduce the monomorphization cost of - // using `call_once` (this isn't exactly a trivial or small implementation). - // - // Additionally, this is tagged with `#[cold]` as it should indeed be cold - // and it helps let LLVM know that calls to this function should be off the - // fast path. Essentially, this should help generate more straight line code - // in LLVM. - // - // Finally, this takes an `FnMut` instead of a `FnOnce` because there's - // currently no way to take an `FnOnce` and call it via virtual dispatch - // without some allocation overhead. - #[cold] - fn call_inner(&self, - ignore_poisoning: bool, - init: &mut dyn FnMut(bool)) { - - // This cold path uses SeqCst consistently because the - // performance difference really does not matter there, and - // SeqCst minimizes the chances of something going wrong. - let mut state = self.state.load(Ordering::SeqCst); - - 'outer: loop { - match state { - // If we're complete, then there's nothing to do, we just - // jettison out as we shouldn't run the closure. - COMPLETE => return, - - // If we're poisoned and we're not in a mode to ignore - // poisoning, then we panic here to propagate the poison. - POISONED if !ignore_poisoning => { - panic!("Once instance has previously been poisoned"); - } - - // Otherwise if we see a poisoned or otherwise incomplete state - // we will attempt to move ourselves into the RUNNING state. If - // we succeed, then the queue of waiters starts at null (all 0 - // bits). - POISONED | - INCOMPLETE => { - let old = self.state.compare_and_swap(state, RUNNING, - Ordering::SeqCst); - if old != state { - state = old; - continue - } - - // Run the initialization routine, letting it know if we're - // poisoned or not. The `Finish` struct is then dropped, and - // the `Drop` implementation here is responsible for waking - // up other waiters both in the normal return and panicking - // case. - let mut complete = Finish { - panicked: true, - me: self, - }; - init(state == POISONED); - complete.panicked = false; - return - } - - // All other values we find should correspond to the RUNNING - // state with an encoded waiter list in the more significant - // bits. We attempt to enqueue ourselves by moving us to the - // head of the list and bail out if we ever see a state that's - // not RUNNING. - _ => { - assert!(state & STATE_MASK == RUNNING); - let mut node = Waiter { - thread: Some(thread::current()), - signaled: AtomicBool::new(false), - next: ptr::null_mut(), - }; - let me = &mut node as *mut Waiter as usize; - assert!(me & STATE_MASK == 0); - - while state & STATE_MASK == RUNNING { - node.next = (state & !STATE_MASK) as *mut Waiter; - let old = self.state.compare_and_swap(state, - me | RUNNING, - Ordering::SeqCst); - if old != state { - state = old; - continue - } - - // Once we've enqueued ourselves, wait in a loop. - // Afterwards reload the state and continue with what we - // were doing from before. - while !node.signaled.load(Ordering::SeqCst) { - thread::park(); - } - state = self.state.load(Ordering::SeqCst); - continue 'outer - } - } - } - } + self.inner.state().done() } } @@ -436,34 +253,6 @@ impl fmt::Debug for Once { } } -impl Drop for Finish<'_> { - fn drop(&mut self) { - // Swap out our state with however we finished. We should only ever see - // an old state which was RUNNING. - let queue = if self.panicked { - self.me.state.swap(POISONED, Ordering::SeqCst) - } else { - self.me.state.swap(COMPLETE, Ordering::SeqCst) - }; - assert_eq!(queue & STATE_MASK, RUNNING); - - // Decode the RUNNING to a list of waiters, then walk that entire list - // and wake them up. Note that it is crucial that after we store `true` - // in the node it can be free'd! As a result we load the `thread` to - // signal ahead of time and then unpark it after the store. - unsafe { - let mut queue = (queue & !STATE_MASK) as *mut Waiter; - while !queue.is_null() { - let next = (*queue).next; - let thread = (*queue).thread.take().unwrap(); - (*queue).signaled.store(true, Ordering::SeqCst); - thread.unpark(); - queue = next; - } - } - } -} - impl OnceState { /// Returns `true` if the associated [`Once`] was poisoned prior to the /// invocation of the closure passed to [`call_once_force`]. @@ -514,10 +303,10 @@ impl OnceState { #[cfg(all(test, not(target_os = "emscripten")))] mod tests { + use super::Once; use crate::panic; use crate::sync::mpsc::channel; use crate::thread; - use super::Once; #[test] fn smoke_once() { @@ -537,8 +326,10 @@ mod tests { let (tx, rx) = channel(); for _ in 0..10 { let tx = tx.clone(); - thread::spawn(move|| { - for _ in 0..4 { thread::yield_now() } + thread::spawn(move || { + for _ in 0..4 { + thread::yield_now() + } unsafe { O.call_once(|| { assert!(!RUN); @@ -627,6 +418,5 @@ mod tests { assert!(t1.join().is_ok()); assert!(t2.join().is_ok()); - } } diff --git a/src/libstd/sync/rwlock.rs b/src/libstd/sync/rwlock.rs index 1299a74409560..f9f7f61fd9a8b 100644 --- a/src/libstd/sync/rwlock.rs +++ b/src/libstd/sync/rwlock.rs @@ -1,10 +1,9 @@ -use crate::cell::UnsafeCell; use crate::fmt; use crate::mem; use crate::ops::{Deref, DerefMut}; +use crate::parking_lot; use crate::ptr; use crate::sys_common::poison::{self, LockResult, TryLockError, TryLockResult}; -use crate::sys_common::rwlock as sys; /// A reader-writer lock /// @@ -65,9 +64,8 @@ use crate::sys_common::rwlock as sys; /// [`Mutex`]: struct.Mutex.html #[stable(feature = "rust1", since = "1.0.0")] pub struct RwLock { - inner: Box, poison: poison::Flag, - data: UnsafeCell, + inner: parking_lot::RwLock, } #[stable(feature = "rust1", since = "1.0.0")] @@ -87,7 +85,7 @@ unsafe impl Sync for RwLock {} #[must_use = "if unused the RwLock will immediately unlock"] #[stable(feature = "rust1", since = "1.0.0")] pub struct RwLockReadGuard<'a, T: ?Sized + 'a> { - __lock: &'a RwLock, + __inner: parking_lot::RwLockReadGuard<'a, T>, } #[stable(feature = "rust1", since = "1.0.0")] @@ -110,6 +108,7 @@ unsafe impl Sync for RwLockReadGuard<'_, T> {} pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> { __lock: &'a RwLock, __poison: poison::Guard, + __inner: parking_lot::RwLockWriteGuard<'a, T>, } #[stable(feature = "rust1", since = "1.0.0")] @@ -131,9 +130,8 @@ impl RwLock { #[stable(feature = "rust1", since = "1.0.0")] pub fn new(t: T) -> RwLock { RwLock { - inner: box sys::RWLock::new(), poison: poison::Flag::new(), - data: UnsafeCell::new(t), + inner: parking_lot::RwLock::new(t), } } } @@ -181,10 +179,8 @@ impl RwLock { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn read(&self) -> LockResult> { - unsafe { - self.inner.read(); - RwLockReadGuard::new(self) - } + let inner_guard = self.inner.read(); + unsafe { RwLockReadGuard::new(self, inner_guard) } } /// Attempts to acquire this rwlock with shared read access. @@ -220,12 +216,10 @@ impl RwLock { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn try_read(&self) -> TryLockResult> { - unsafe { - if self.inner.try_read() { - Ok(RwLockReadGuard::new(self)?) - } else { - Err(TryLockError::WouldBlock) - } + if let Some(inner_guard) = self.inner.try_read() { + Ok(unsafe { RwLockReadGuard::new(self, inner_guard)? }) + } else { + Err(TryLockError::WouldBlock) } } @@ -263,10 +257,8 @@ impl RwLock { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn write(&self) -> LockResult> { - unsafe { - self.inner.write(); - RwLockWriteGuard::new(self) - } + let inner_guard = self.inner.write(); + unsafe { RwLockWriteGuard::new(self, inner_guard) } } /// Attempts to lock this rwlock with exclusive write access. @@ -302,12 +294,10 @@ impl RwLock { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn try_write(&self) -> TryLockResult> { - unsafe { - if self.inner.try_write() { - Ok(RwLockWriteGuard::new(self)?) - } else { - Err(TryLockError::WouldBlock) - } + if let Some(inner_guard) = self.inner.try_write() { + Ok(unsafe { RwLockWriteGuard::new(self, inner_guard)? }) + } else { + Err(TryLockError::WouldBlock) } } @@ -369,15 +359,13 @@ impl RwLock { // we'll have to destructure it manually instead. unsafe { // Like `let RwLock { inner, poison, data } = self`. - let (inner, poison, data) = { - let RwLock { ref inner, ref poison, ref data } = self; - (ptr::read(inner), ptr::read(poison), ptr::read(data)) + let (poison, inner) = { + let RwLock { ref poison, ref inner } = self; + (ptr::read(poison), ptr::read(inner)) }; mem::forget(self); - inner.destroy(); // Keep in sync with the `Drop` impl. - drop(inner); - poison::map_result(poison.borrow(), |_| data.into_inner()) + poison::map_result(poison.borrow(), |_| inner.into_inner()) } } @@ -406,17 +394,14 @@ impl RwLock { pub fn get_mut(&mut self) -> LockResult<&mut T> { // We know statically that there are no other references to `self`, so // there's no need to lock the inner lock. - let data = unsafe { &mut *self.data.get() }; + let data = self.inner.get_mut(); poison::map_result(self.poison.borrow(), |_| data) } } #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<#[may_dangle] T: ?Sized> Drop for RwLock { - fn drop(&mut self) { - // IMPORTANT: This code needs to be kept in sync with `RwLock::into_inner`. - unsafe { self.inner.destroy() } - } + fn drop(&mut self) {} } #[stable(feature = "rust1", since = "1.0.0")] @@ -459,23 +444,24 @@ impl From for RwLock { } impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> { - unsafe fn new(lock: &'rwlock RwLock) + unsafe fn new(lock: &'rwlock RwLock, inner: parking_lot::RwLockReadGuard<'rwlock, T>) -> LockResult> { poison::map_result(lock.poison.borrow(), |_| { RwLockReadGuard { - __lock: lock, + __inner: inner, } }) } } impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> { - unsafe fn new(lock: &'rwlock RwLock) + unsafe fn new(lock: &'rwlock RwLock, inner: parking_lot::RwLockWriteGuard<'rwlock, T>) -> LockResult> { poison::map_result(lock.poison.borrow(), |guard| { RwLockWriteGuard { __lock: lock, __poison: guard, + __inner: inner, } }) } @@ -485,7 +471,7 @@ impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> { impl fmt::Debug for RwLockReadGuard<'_, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RwLockReadGuard") - .field("lock", &self.__lock) + .field("inner", &self.__inner) .finish() } } @@ -501,7 +487,7 @@ impl fmt::Display for RwLockReadGuard<'_, T> { impl fmt::Debug for RwLockWriteGuard<'_, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RwLockWriteGuard") - .field("lock", &self.__lock) + .field("inner", &self.__inner) .finish() } } @@ -518,7 +504,7 @@ impl Deref for RwLockReadGuard<'_, T> { type Target = T; fn deref(&self) -> &T { - unsafe { &*self.__lock.data.get() } + self.__inner.deref() } } @@ -527,29 +513,26 @@ impl Deref for RwLockWriteGuard<'_, T> { type Target = T; fn deref(&self) -> &T { - unsafe { &*self.__lock.data.get() } + self.__inner.deref() } } #[stable(feature = "rust1", since = "1.0.0")] impl DerefMut for RwLockWriteGuard<'_, T> { fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.__lock.data.get() } + self.__inner.deref_mut() } } #[stable(feature = "rust1", since = "1.0.0")] impl Drop for RwLockReadGuard<'_, T> { - fn drop(&mut self) { - unsafe { self.__lock.inner.read_unlock(); } - } + fn drop(&mut self) {} } #[stable(feature = "rust1", since = "1.0.0")] impl Drop for RwLockWriteGuard<'_, T> { fn drop(&mut self) { self.__lock.poison.done(&self.__poison); - unsafe { self.__lock.inner.write_unlock(); } } } diff --git a/src/libstd/sys/cloudabi/condvar.rs b/src/libstd/sys/cloudabi/condvar.rs deleted file mode 100644 index 7aa0b0b6f4912..0000000000000 --- a/src/libstd/sys/cloudabi/condvar.rs +++ /dev/null @@ -1,163 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::mem; -use crate::sync::atomic::{AtomicU32, Ordering}; -use crate::sys::cloudabi::abi; -use crate::sys::mutex::{self, Mutex}; -use crate::sys::time::checked_dur2intervals; -use crate::time::Duration; - -extern "C" { - #[thread_local] - static __pthread_thread_id: abi::tid; -} - -pub struct Condvar { - condvar: UnsafeCell, -} - -unsafe impl Send for Condvar {} -unsafe impl Sync for Condvar {} - -const NEW: Condvar = Condvar { - condvar: UnsafeCell::new(AtomicU32::new(abi::CONDVAR_HAS_NO_WAITERS.0)), -}; - -impl Condvar { - pub const fn new() -> Condvar { - NEW - } - - pub unsafe fn init(&mut self) {} - - pub unsafe fn notify_one(&self) { - let condvar = self.condvar.get(); - if (*condvar).load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 { - let ret = abi::condvar_signal(condvar as *mut abi::condvar, abi::scope::PRIVATE, 1); - assert_eq!( - ret, - abi::errno::SUCCESS, - "Failed to signal on condition variable" - ); - } - } - - pub unsafe fn notify_all(&self) { - let condvar = self.condvar.get(); - if (*condvar).load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 { - let ret = abi::condvar_signal( - condvar as *mut abi::condvar, - abi::scope::PRIVATE, - abi::nthreads::max_value(), - ); - assert_eq!( - ret, - abi::errno::SUCCESS, - "Failed to broadcast on condition variable" - ); - } - } - - pub unsafe fn wait(&self, mutex: &Mutex) { - let mutex = mutex::raw(mutex); - assert_eq!( - (*mutex).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, - __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, - "This lock is not write-locked by this thread" - ); - - // Call into the kernel to wait on the condition variable. - let condvar = self.condvar.get(); - let subscription = abi::subscription { - type_: abi::eventtype::CONDVAR, - union: abi::subscription_union { - condvar: abi::subscription_condvar { - condvar: condvar as *mut abi::condvar, - condvar_scope: abi::scope::PRIVATE, - lock: mutex as *mut abi::lock, - lock_scope: abi::scope::PRIVATE, - }, - }, - ..mem::zeroed() - }; - let mut event: abi::event = mem::uninitialized(); - let mut nevents: usize = mem::uninitialized(); - let ret = abi::poll(&subscription, &mut event, 1, &mut nevents); - assert_eq!( - ret, - abi::errno::SUCCESS, - "Failed to wait on condition variable" - ); - assert_eq!( - event.error, - abi::errno::SUCCESS, - "Failed to wait on condition variable" - ); - } - - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - let mutex = mutex::raw(mutex); - assert_eq!( - (*mutex).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, - __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, - "This lock is not write-locked by this thread" - ); - - // Call into the kernel to wait on the condition variable. - let condvar = self.condvar.get(); - let timeout = checked_dur2intervals(&dur) - .expect("overflow converting duration to nanoseconds"); - let subscriptions = [ - abi::subscription { - type_: abi::eventtype::CONDVAR, - union: abi::subscription_union { - condvar: abi::subscription_condvar { - condvar: condvar as *mut abi::condvar, - condvar_scope: abi::scope::PRIVATE, - lock: mutex as *mut abi::lock, - lock_scope: abi::scope::PRIVATE, - }, - }, - ..mem::zeroed() - }, - abi::subscription { - type_: abi::eventtype::CLOCK, - union: abi::subscription_union { - clock: abi::subscription_clock { - clock_id: abi::clockid::MONOTONIC, - timeout, - ..mem::zeroed() - }, - }, - ..mem::zeroed() - }, - ]; - let mut events: [abi::event; 2] = mem::uninitialized(); - let mut nevents: usize = mem::uninitialized(); - let ret = abi::poll(subscriptions.as_ptr(), events.as_mut_ptr(), 2, &mut nevents); - assert_eq!( - ret, - abi::errno::SUCCESS, - "Failed to wait on condition variable" - ); - for i in 0..nevents { - assert_eq!( - events[i].error, - abi::errno::SUCCESS, - "Failed to wait on condition variable" - ); - if events[i].type_ == abi::eventtype::CONDVAR { - return true; - } - } - false - } - - pub unsafe fn destroy(&self) { - let condvar = self.condvar.get(); - assert_eq!( - (*condvar).load(Ordering::Relaxed), - abi::CONDVAR_HAS_NO_WAITERS.0, - "Attempted to destroy a condition variable with blocked threads" - ); - } -} diff --git a/src/libstd/sys/cloudabi/mod.rs b/src/libstd/sys/cloudabi/mod.rs index 3f8e67a7af85e..f45cfdec3f717 100644 --- a/src/libstd/sys/cloudabi/mod.rs +++ b/src/libstd/sys/cloudabi/mod.rs @@ -1,6 +1,7 @@ use crate::io::ErrorKind; use crate::mem; +pub mod abi; #[path = "../unix/alloc.rs"] pub mod alloc; pub mod args; @@ -8,13 +9,10 @@ pub mod args; pub mod backtrace; #[path = "../unix/cmath.rs"] pub mod cmath; -pub mod condvar; pub mod io; #[path = "../unix/memchr.rs"] pub mod memchr; -pub mod mutex; pub mod os; -pub mod rwlock; pub mod stack_overflow; pub mod stdio; pub mod thread; @@ -24,8 +22,6 @@ pub mod time; pub use crate::sys_common::os_str_bytes as os_str; -mod abi; - mod shims; pub use self::shims::*; diff --git a/src/libstd/sys/cloudabi/mutex.rs b/src/libstd/sys/cloudabi/mutex.rs deleted file mode 100644 index 5e191e31d5fc4..0000000000000 --- a/src/libstd/sys/cloudabi/mutex.rs +++ /dev/null @@ -1,148 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::mem; -use crate::sync::atomic::{AtomicU32, Ordering}; -use crate::sys::cloudabi::abi; -use crate::sys::rwlock::{self, RWLock}; - -extern "C" { - #[thread_local] - static __pthread_thread_id: abi::tid; -} - -// Implement Mutex using an RWLock. This doesn't introduce any -// performance overhead in this environment, as the operations would be -// implemented identically. -pub struct Mutex(RWLock); - -pub unsafe fn raw(m: &Mutex) -> *mut AtomicU32 { - rwlock::raw(&m.0) -} - -impl Mutex { - pub const fn new() -> Mutex { - Mutex(RWLock::new()) - } - - pub unsafe fn init(&mut self) { - // This function should normally reinitialize the mutex after - // moving it to a different memory address. This implementation - // does not require adjustments after moving. - } - - pub unsafe fn try_lock(&self) -> bool { - self.0.try_write() - } - - pub unsafe fn lock(&self) { - self.0.write() - } - - pub unsafe fn unlock(&self) { - self.0.write_unlock() - } - - pub unsafe fn destroy(&self) { - self.0.destroy() - } -} - -pub struct ReentrantMutex { - lock: UnsafeCell, - recursion: UnsafeCell, -} - -impl ReentrantMutex { - pub unsafe fn uninitialized() -> ReentrantMutex { - mem::uninitialized() - } - - pub unsafe fn init(&mut self) { - self.lock = UnsafeCell::new(AtomicU32::new(abi::LOCK_UNLOCKED.0)); - self.recursion = UnsafeCell::new(0); - } - - pub unsafe fn try_lock(&self) -> bool { - // Attempt to acquire the lock. - let lock = self.lock.get(); - let recursion = self.recursion.get(); - if let Err(old) = (*lock).compare_exchange( - abi::LOCK_UNLOCKED.0, - __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, - Ordering::Acquire, - Ordering::Relaxed, - ) { - // If we fail to acquire the lock, it may be the case - // that we've already acquired it and may need to recurse. - if old & !abi::LOCK_KERNEL_MANAGED.0 == __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 { - *recursion += 1; - true - } else { - false - } - } else { - // Success. - assert_eq!(*recursion, 0, "Mutex has invalid recursion count"); - true - } - } - - pub unsafe fn lock(&self) { - if !self.try_lock() { - // Call into the kernel to acquire a write lock. - let lock = self.lock.get(); - let subscription = abi::subscription { - type_: abi::eventtype::LOCK_WRLOCK, - union: abi::subscription_union { - lock: abi::subscription_lock { - lock: lock as *mut abi::lock, - lock_scope: abi::scope::PRIVATE, - }, - }, - ..mem::zeroed() - }; - let mut event: abi::event = mem::uninitialized(); - let mut nevents: usize = mem::uninitialized(); - let ret = abi::poll(&subscription, &mut event, 1, &mut nevents); - assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire mutex"); - assert_eq!(event.error, abi::errno::SUCCESS, "Failed to acquire mutex"); - } - } - - pub unsafe fn unlock(&self) { - let lock = self.lock.get(); - let recursion = self.recursion.get(); - assert_eq!( - (*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, - __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, - "This mutex is locked by a different thread" - ); - - if *recursion > 0 { - *recursion -= 1; - } else if !(*lock) - .compare_exchange( - __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, - abi::LOCK_UNLOCKED.0, - Ordering::Release, - Ordering::Relaxed, - ) - .is_ok() - { - // Lock is managed by kernelspace. Call into the kernel - // to unblock waiting threads. - let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE); - assert_eq!(ret, abi::errno::SUCCESS, "Failed to unlock a mutex"); - } - } - - pub unsafe fn destroy(&self) { - let lock = self.lock.get(); - let recursion = self.recursion.get(); - assert_eq!( - (*lock).load(Ordering::Relaxed), - abi::LOCK_UNLOCKED.0, - "Attempted to destroy locked mutex" - ); - assert_eq!(*recursion, 0, "Recursion counter invalid"); - } -} diff --git a/src/libstd/sys/cloudabi/rwlock.rs b/src/libstd/sys/cloudabi/rwlock.rs deleted file mode 100644 index 6da3f3841b6c6..0000000000000 --- a/src/libstd/sys/cloudabi/rwlock.rs +++ /dev/null @@ -1,229 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::mem; -use crate::sync::atomic::{AtomicU32, Ordering}; -use crate::sys::cloudabi::abi; - -extern "C" { - #[thread_local] - static __pthread_thread_id: abi::tid; -} - -#[thread_local] -static mut RDLOCKS_ACQUIRED: u32 = 0; - -pub struct RWLock { - lock: UnsafeCell, -} - -pub unsafe fn raw(r: &RWLock) -> *mut AtomicU32 { - r.lock.get() -} - -unsafe impl Send for RWLock {} -unsafe impl Sync for RWLock {} - -const NEW: RWLock = RWLock { - lock: UnsafeCell::new(AtomicU32::new(abi::LOCK_UNLOCKED.0)), -}; - -impl RWLock { - pub const fn new() -> RWLock { - NEW - } - - pub unsafe fn try_read(&self) -> bool { - let lock = self.lock.get(); - let mut old = abi::LOCK_UNLOCKED.0; - while let Err(cur) = - (*lock).compare_exchange_weak(old, old + 1, Ordering::Acquire, Ordering::Relaxed) - { - if (cur & abi::LOCK_WRLOCKED.0) != 0 { - // Another thread already has a write lock. - assert_ne!( - old & !abi::LOCK_KERNEL_MANAGED.0, - __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, - "Attempted to acquire a read lock while holding a write lock" - ); - return false; - } else if (old & abi::LOCK_KERNEL_MANAGED.0) != 0 && RDLOCKS_ACQUIRED == 0 { - // Lock has threads waiting for the lock. Only acquire - // the lock if we have already acquired read locks. In - // that case, it is justified to acquire this lock to - // prevent a deadlock. - return false; - } - old = cur; - } - - RDLOCKS_ACQUIRED += 1; - true - } - - pub unsafe fn read(&self) { - if !self.try_read() { - // Call into the kernel to acquire a read lock. - let lock = self.lock.get(); - let subscription = abi::subscription { - type_: abi::eventtype::LOCK_RDLOCK, - union: abi::subscription_union { - lock: abi::subscription_lock { - lock: lock as *mut abi::lock, - lock_scope: abi::scope::PRIVATE, - }, - }, - ..mem::zeroed() - }; - let mut event: abi::event = mem::uninitialized(); - let mut nevents: usize = mem::uninitialized(); - let ret = abi::poll(&subscription, &mut event, 1, &mut nevents); - assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire read lock"); - assert_eq!( - event.error, - abi::errno::SUCCESS, - "Failed to acquire read lock" - ); - - RDLOCKS_ACQUIRED += 1; - } - } - - pub unsafe fn read_unlock(&self) { - // Perform a read unlock. We can do this in userspace, except when - // other threads are blocked and we are performing the last unlock. - // In that case, call into the kernel. - // - // Other threads may attempt to increment the read lock count, - // meaning that the call into the kernel could be spurious. To - // prevent this from happening, upgrade to a write lock first. This - // allows us to call into the kernel, having the guarantee that the - // lock value will not change in the meantime. - assert!(RDLOCKS_ACQUIRED > 0, "Bad lock count"); - let mut old = 1; - loop { - let lock = self.lock.get(); - if old == 1 | abi::LOCK_KERNEL_MANAGED.0 { - // Last read lock while threads are waiting. Attempt to upgrade - // to a write lock before calling into the kernel to unlock. - if let Err(cur) = (*lock).compare_exchange_weak( - old, - __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 | abi::LOCK_KERNEL_MANAGED.0, - Ordering::Acquire, - Ordering::Relaxed, - ) { - old = cur; - } else { - // Call into the kernel to unlock. - let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE); - assert_eq!(ret, abi::errno::SUCCESS, "Failed to write unlock a rwlock"); - break; - } - } else { - // No threads waiting or not the last read lock. Just decrement - // the read lock count. - assert_ne!( - old & !abi::LOCK_KERNEL_MANAGED.0, - 0, - "This rwlock is not locked" - ); - assert_eq!( - old & abi::LOCK_WRLOCKED.0, - 0, - "Attempted to read-unlock a write-locked rwlock" - ); - if let Err(cur) = (*lock).compare_exchange_weak( - old, - old - 1, - Ordering::Acquire, - Ordering::Relaxed, - ) { - old = cur; - } else { - break; - } - } - } - - RDLOCKS_ACQUIRED -= 1; - } - - pub unsafe fn try_write(&self) -> bool { - // Attempt to acquire the lock. - let lock = self.lock.get(); - if let Err(old) = (*lock).compare_exchange( - abi::LOCK_UNLOCKED.0, - __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, - Ordering::Acquire, - Ordering::Relaxed, - ) { - // Failure. Crash upon recursive acquisition. - assert_ne!( - old & !abi::LOCK_KERNEL_MANAGED.0, - __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, - "Attempted to recursive write-lock a rwlock", - ); - false - } else { - // Success. - true - } - } - - pub unsafe fn write(&self) { - if !self.try_write() { - // Call into the kernel to acquire a write lock. - let lock = self.lock.get(); - let subscription = abi::subscription { - type_: abi::eventtype::LOCK_WRLOCK, - union: abi::subscription_union { - lock: abi::subscription_lock { - lock: lock as *mut abi::lock, - lock_scope: abi::scope::PRIVATE, - }, - }, - ..mem::zeroed() - }; - let mut event: abi::event = mem::uninitialized(); - let mut nevents: usize = mem::uninitialized(); - let ret = abi::poll(&subscription, &mut event, 1, &mut nevents); - assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire write lock"); - assert_eq!( - event.error, - abi::errno::SUCCESS, - "Failed to acquire write lock" - ); - } - } - - pub unsafe fn write_unlock(&self) { - let lock = self.lock.get(); - assert_eq!( - (*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, - __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, - "This rwlock is not write-locked by this thread" - ); - - if !(*lock) - .compare_exchange( - __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, - abi::LOCK_UNLOCKED.0, - Ordering::Release, - Ordering::Relaxed, - ) - .is_ok() - { - // Lock is managed by kernelspace. Call into the kernel - // to unblock waiting threads. - let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE); - assert_eq!(ret, abi::errno::SUCCESS, "Failed to write unlock a rwlock"); - } - } - - pub unsafe fn destroy(&self) { - let lock = self.lock.get(); - assert_eq!( - (*lock).load(Ordering::Relaxed), - abi::LOCK_UNLOCKED.0, - "Attempted to destroy locked rwlock" - ); - } -} diff --git a/src/libstd/sys/redox/args.rs b/src/libstd/sys/redox/args.rs index f9e2f5ba311a0..eaa3a9708193d 100644 --- a/src/libstd/sys/redox/args.rs +++ b/src/libstd/sys/redox/args.rs @@ -46,16 +46,15 @@ impl DoubleEndedIterator for Args { } mod imp { - use crate::os::unix::prelude::*; - use crate::mem; use crate::ffi::{CStr, OsString}; use crate::marker::PhantomData; + use crate::mem; + use crate::os::unix::prelude::*; + use crate::sync::RawMutex; use super::Args; - use crate::sys_common::mutex::Mutex; - static mut GLOBAL_ARGS_PTR: usize = 0; - static LOCK: Mutex = Mutex::new(); + static LOCK: RawMutex = RawMutex::new(); pub unsafe fn init(argc: isize, argv: *const *const u8) { let args = (0..argc).map(|i| { @@ -82,8 +81,8 @@ mod imp { } fn clone() -> Option>> { + let _guard = LOCK.lock(); unsafe { - let _guard = LOCK.lock(); let ptr = get_global_ptr(); (*ptr).as_ref().map(|s| (**s).clone()) } diff --git a/src/libstd/sys/redox/condvar.rs b/src/libstd/sys/redox/condvar.rs deleted file mode 100644 index a6365cac23ea7..0000000000000 --- a/src/libstd/sys/redox/condvar.rs +++ /dev/null @@ -1,111 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::intrinsics::{atomic_cxchg, atomic_load, atomic_xadd, atomic_xchg}; -use crate::ptr; -use crate::time::Duration; - -use crate::sys::mutex::{mutex_unlock, Mutex}; -use crate::sys::syscall::{futex, TimeSpec, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE}; - -pub struct Condvar { - lock: UnsafeCell<*mut i32>, - seq: UnsafeCell -} - -impl Condvar { - pub const fn new() -> Condvar { - Condvar { - lock: UnsafeCell::new(ptr::null_mut()), - seq: UnsafeCell::new(0) - } - } - - #[inline] - pub unsafe fn init(&self) { - *self.lock.get() = ptr::null_mut(); - *self.seq.get() = 0; - } - - #[inline] - pub fn notify_one(&self) { - unsafe { - let seq = self.seq.get(); - - atomic_xadd(seq, 1); - - let _ = futex(seq, FUTEX_WAKE, 1, 0, ptr::null_mut()); - } - } - - #[inline] - pub fn notify_all(&self) { - unsafe { - let lock = self.lock.get(); - let seq = self.seq.get(); - - if *lock == ptr::null_mut() { - return; - } - - atomic_xadd(seq, 1); - - let _ = futex(seq, FUTEX_REQUEUE, 1, crate::usize::MAX, *lock); - } - } - - #[inline] - unsafe fn wait_inner(&self, mutex: &Mutex, timeout_ptr: *const TimeSpec) -> bool { - let lock = self.lock.get(); - let seq = self.seq.get(); - - if *lock != mutex.lock.get() { - if *lock != ptr::null_mut() { - panic!("Condvar used with more than one Mutex"); - } - - atomic_cxchg(lock as *mut usize, 0, mutex.lock.get() as usize); - } - - mutex_unlock(*lock); - - let seq_before = atomic_load(seq); - - let _ = futex(seq, FUTEX_WAIT, seq_before, timeout_ptr as usize, ptr::null_mut()); - - let seq_after = atomic_load(seq); - - while atomic_xchg(*lock, 2) != 0 { - let _ = futex(*lock, FUTEX_WAIT, 2, 0, ptr::null_mut()); - } - - seq_before != seq_after - } - - #[inline] - pub fn wait(&self, mutex: &Mutex) { - unsafe { - assert!(self.wait_inner(mutex, ptr::null())); - } - } - - #[inline] - pub fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - unsafe { - let timeout = TimeSpec { - tv_sec: dur.as_secs() as i64, - tv_nsec: dur.subsec_nanos() as i32 - }; - - self.wait_inner(mutex, &timeout as *const TimeSpec) - } - } - - #[inline] - pub unsafe fn destroy(&self) { - *self.lock.get() = ptr::null_mut(); - *self.seq.get() = 0; - } -} - -unsafe impl Send for Condvar {} - -unsafe impl Sync for Condvar {} diff --git a/src/libstd/sys/redox/mod.rs b/src/libstd/sys/redox/mod.rs index 0e8ed8e303d43..4265a4641c108 100644 --- a/src/libstd/sys/redox/mod.rs +++ b/src/libstd/sys/redox/mod.rs @@ -11,7 +11,6 @@ pub mod args; #[cfg(feature = "backtrace")] pub mod backtrace; pub mod cmath; -pub mod condvar; pub mod env; pub mod ext; pub mod fast_thread_local; @@ -19,14 +18,12 @@ pub mod fd; pub mod fs; pub mod io; pub mod memchr; -pub mod mutex; pub mod net; pub mod os; pub mod path; pub mod pipe; pub mod process; pub mod rand; -pub mod rwlock; pub mod stack_overflow; pub mod stdio; pub mod syscall; diff --git a/src/libstd/sys/redox/mutex.rs b/src/libstd/sys/redox/mutex.rs deleted file mode 100644 index 59399df0294c8..0000000000000 --- a/src/libstd/sys/redox/mutex.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::intrinsics::{atomic_cxchg, atomic_xchg}; -use crate::ptr; - -use crate::sys::syscall::{futex, getpid, FUTEX_WAIT, FUTEX_WAKE}; - -pub unsafe fn mutex_try_lock(m: *mut i32) -> bool { - atomic_cxchg(m, 0, 1).0 == 0 -} - -pub unsafe fn mutex_lock(m: *mut i32) { - let mut c = 0; - //Set to larger value for longer spin test - for _i in 0..100 { - c = atomic_cxchg(m, 0, 1).0; - if c == 0 { - break; - } - //cpu_relax() - } - if c == 1 { - c = atomic_xchg(m, 2); - } - while c != 0 { - let _ = futex(m, FUTEX_WAIT, 2, 0, ptr::null_mut()); - c = atomic_xchg(m, 2); - } -} - -pub unsafe fn mutex_unlock(m: *mut i32) { - if *m == 2 { - *m = 0; - } else if atomic_xchg(m, 0) == 1 { - return; - } - //Set to larger value for longer spin test - for _i in 0..100 { - if *m != 0 { - if atomic_cxchg(m, 1, 2).0 != 0 { - return; - } - } - //cpu_relax() - } - let _ = futex(m, FUTEX_WAKE, 1, 0, ptr::null_mut()); -} - -pub struct Mutex { - pub lock: UnsafeCell, -} - -impl Mutex { - /// Creates a new mutex. - pub const fn new() -> Self { - Mutex { - lock: UnsafeCell::new(0), - } - } - - #[inline] - pub unsafe fn init(&self) { - *self.lock.get() = 0; - } - - /// Try to lock the mutex - #[inline] - pub unsafe fn try_lock(&self) -> bool { - mutex_try_lock(self.lock.get()) - } - - /// Lock the mutex - #[inline] - pub unsafe fn lock(&self) { - mutex_lock(self.lock.get()); - } - - /// Unlock the mutex - #[inline] - pub unsafe fn unlock(&self) { - mutex_unlock(self.lock.get()); - } - - #[inline] - pub unsafe fn destroy(&self) { - *self.lock.get() = 0; - } -} - -unsafe impl Send for Mutex {} - -unsafe impl Sync for Mutex {} - -pub struct ReentrantMutex { - pub lock: UnsafeCell, - pub owner: UnsafeCell, - pub own_count: UnsafeCell, -} - -impl ReentrantMutex { - pub const fn uninitialized() -> Self { - ReentrantMutex { - lock: UnsafeCell::new(0), - owner: UnsafeCell::new(0), - own_count: UnsafeCell::new(0), - } - } - - #[inline] - pub unsafe fn init(&mut self) { - *self.lock.get() = 0; - *self.owner.get() = 0; - *self.own_count.get() = 0; - } - - /// Try to lock the mutex - #[inline] - pub unsafe fn try_lock(&self) -> bool { - let pid = getpid().unwrap(); - if *self.own_count.get() > 0 && *self.owner.get() == pid { - *self.own_count.get() += 1; - true - } else { - if mutex_try_lock(self.lock.get()) { - *self.owner.get() = pid; - *self.own_count.get() = 1; - true - } else { - false - } - } - } - - /// Lock the mutex - #[inline] - pub unsafe fn lock(&self) { - let pid = getpid().unwrap(); - if *self.own_count.get() > 0 && *self.owner.get() == pid { - *self.own_count.get() += 1; - } else { - mutex_lock(self.lock.get()); - *self.owner.get() = pid; - *self.own_count.get() = 1; - } - } - - /// Unlock the mutex - #[inline] - pub unsafe fn unlock(&self) { - let pid = getpid().unwrap(); - if *self.own_count.get() > 0 && *self.owner.get() == pid { - *self.own_count.get() -= 1; - if *self.own_count.get() == 0 { - *self.owner.get() = 0; - mutex_unlock(self.lock.get()); - } - } - } - - #[inline] - pub unsafe fn destroy(&self) { - *self.lock.get() = 0; - *self.owner.get() = 0; - *self.own_count.get() = 0; - } -} - -unsafe impl Send for ReentrantMutex {} - -unsafe impl Sync for ReentrantMutex {} diff --git a/src/libstd/sys/redox/os.rs b/src/libstd/sys/redox/os.rs index 3ae201f698c2b..020aea20bf6e7 100644 --- a/src/libstd/sys/redox/os.rs +++ b/src/libstd/sys/redox/os.rs @@ -18,7 +18,7 @@ use crate::path::{self, PathBuf}; use crate::ptr; use crate::slice; use crate::str; -use crate::sys_common::mutex::Mutex; +use crate::sync::RawMutex; use crate::sys::{cvt, cvt_libc, fd, syscall}; use crate::vec; @@ -121,7 +121,7 @@ pub fn current_exe() -> io::Result { Ok(PathBuf::from(path)) } -pub static ENV_LOCK: Mutex = Mutex::new(); +pub static ENV_LOCK: RawMutex = RawMutex::new(); pub struct Env { iter: vec::IntoIter<(OsString, OsString)>, @@ -142,8 +142,8 @@ pub unsafe fn environ() -> *mut *const *const c_char { /// Returns a vector of (variable, value) byte-vector pairs for all the /// environment variables of the current process. pub fn env() -> Env { + let _guard = ENV_LOCK.lock(); unsafe { - let _guard = ENV_LOCK.lock(); let mut environ = *environ(); if environ == ptr::null() { panic!("os::env() failure getting env string from OS: {}", @@ -182,8 +182,8 @@ pub fn getenv(k: &OsStr) -> io::Result> { // environment variables with a nul byte can't be set, so their value is // always None as well let k = CString::new(k.as_bytes())?; + let _guard = ENV_LOCK.lock(); unsafe { - let _guard = ENV_LOCK.lock(); let s = libc::getenv(k.as_ptr()) as *const libc::c_char; let ret = if s.is_null() { None @@ -198,8 +198,8 @@ pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { let k = CString::new(k.as_bytes())?; let v = CString::new(v.as_bytes())?; + let _guard = ENV_LOCK.lock(); unsafe { - let _guard = ENV_LOCK.lock(); cvt_libc(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(|_| ()) } } @@ -207,8 +207,8 @@ pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { pub fn unsetenv(n: &OsStr) -> io::Result<()> { let nbuf = CString::new(n.as_bytes())?; + let _guard = ENV_LOCK.lock(); unsafe { - let _guard = ENV_LOCK.lock(); cvt_libc(libc::unsetenv(nbuf.as_ptr())).map(|_| ()) } } diff --git a/src/libstd/sys/redox/rwlock.rs b/src/libstd/sys/redox/rwlock.rs deleted file mode 100644 index 990e7551114bb..0000000000000 --- a/src/libstd/sys/redox/rwlock.rs +++ /dev/null @@ -1,51 +0,0 @@ -use super::mutex::Mutex; - -pub struct RWLock { - mutex: Mutex -} - -unsafe impl Send for RWLock {} -unsafe impl Sync for RWLock {} - -impl RWLock { - pub const fn new() -> RWLock { - RWLock { - mutex: Mutex::new() - } - } - - #[inline] - pub unsafe fn read(&self) { - self.mutex.lock(); - } - - #[inline] - pub unsafe fn try_read(&self) -> bool { - self.mutex.try_lock() - } - - #[inline] - pub unsafe fn write(&self) { - self.mutex.lock(); - } - - #[inline] - pub unsafe fn try_write(&self) -> bool { - self.mutex.try_lock() - } - - #[inline] - pub unsafe fn read_unlock(&self) { - self.mutex.unlock(); - } - - #[inline] - pub unsafe fn write_unlock(&self) { - self.mutex.unlock(); - } - - #[inline] - pub unsafe fn destroy(&self) { - self.mutex.destroy(); - } -} diff --git a/src/libstd/sys/sgx/alloc.rs b/src/libstd/sys/sgx/alloc.rs index 40daec758a9fc..23eaa9e1c426f 100644 --- a/src/libstd/sys/sgx/alloc.rs +++ b/src/libstd/sys/sgx/alloc.rs @@ -1,6 +1,6 @@ use crate::alloc::{GlobalAlloc, Layout, System}; -use super::waitqueue::SpinMutex; +use super::spinmutex::SpinMutex; // Using a SpinMutex because we never want to exit the enclave waiting for the // allocator. diff --git a/src/libstd/sys/sgx/condvar.rs b/src/libstd/sys/sgx/condvar.rs deleted file mode 100644 index 000bb19f2692a..0000000000000 --- a/src/libstd/sys/sgx/condvar.rs +++ /dev/null @@ -1,41 +0,0 @@ -use crate::sys::mutex::Mutex; -use crate::time::Duration; - -use super::waitqueue::{WaitVariable, WaitQueue, SpinMutex}; - -pub struct Condvar { - inner: SpinMutex>, -} - -impl Condvar { - pub const fn new() -> Condvar { - Condvar { inner: SpinMutex::new(WaitVariable::new(())) } - } - - #[inline] - pub unsafe fn init(&mut self) {} - - #[inline] - pub unsafe fn notify_one(&self) { - let _ = WaitQueue::notify_one(self.inner.lock()); - } - - #[inline] - pub unsafe fn notify_all(&self) { - let _ = WaitQueue::notify_all(self.inner.lock()); - } - - pub unsafe fn wait(&self, mutex: &Mutex) { - let guard = self.inner.lock(); - mutex.unlock(); - WaitQueue::wait(guard); - mutex.lock() - } - - pub unsafe fn wait_timeout(&self, _mutex: &Mutex, _dur: Duration) -> bool { - rtabort!("timeout not supported in SGX"); - } - - #[inline] - pub unsafe fn destroy(&self) {} -} diff --git a/src/libstd/sys/sgx/mod.rs b/src/libstd/sys/sgx/mod.rs index a99a534f41e3c..7a6264a1360dc 100644 --- a/src/libstd/sys/sgx/mod.rs +++ b/src/libstd/sys/sgx/mod.rs @@ -8,27 +8,24 @@ use crate::os::raw::c_char; use crate::sync::atomic::{AtomicBool, Ordering}; pub mod abi; -mod waitqueue; +mod spinmutex; pub mod alloc; pub mod args; #[cfg(feature = "backtrace")] pub mod backtrace; pub mod cmath; -pub mod condvar; pub mod env; pub mod ext; pub mod fd; pub mod fs; pub mod io; pub mod memchr; -pub mod mutex; pub mod net; pub mod os; pub mod path; pub mod pipe; pub mod process; -pub mod rwlock; pub mod stack_overflow; pub mod thread; pub mod thread_local; diff --git a/src/libstd/sys/sgx/mutex.rs b/src/libstd/sys/sgx/mutex.rs deleted file mode 100644 index f325fb1dd582f..0000000000000 --- a/src/libstd/sys/sgx/mutex.rs +++ /dev/null @@ -1,140 +0,0 @@ -use fortanix_sgx_abi::Tcs; - -use super::abi::thread; - -use super::waitqueue::{WaitVariable, WaitQueue, SpinMutex, NotifiedTcs, try_lock_or_false}; - -pub struct Mutex { - inner: SpinMutex>, -} - -// Implementation according to “Operating Systems: Three Easy Pieces”, chapter 28 -impl Mutex { - pub const fn new() -> Mutex { - Mutex { inner: SpinMutex::new(WaitVariable::new(false)) } - } - - #[inline] - pub unsafe fn init(&mut self) {} - - #[inline] - pub unsafe fn lock(&self) { - let mut guard = self.inner.lock(); - if *guard.lock_var() { - // Another thread has the lock, wait - WaitQueue::wait(guard) - // Another thread has passed the lock to us - } else { - // We are just now obtaining the lock - *guard.lock_var_mut() = true; - } - } - - #[inline] - pub unsafe fn unlock(&self) { - let guard = self.inner.lock(); - if let Err(mut guard) = WaitQueue::notify_one(guard) { - // No other waiters, unlock - *guard.lock_var_mut() = false; - } else { - // There was a thread waiting, just pass the lock - } - } - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - let mut guard = try_lock_or_false!(self.inner); - if *guard.lock_var() { - // Another thread has the lock - false - } else { - // We are just now obtaining the lock - *guard.lock_var_mut() = true; - true - } - } - - #[inline] - pub unsafe fn destroy(&self) {} -} - -struct ReentrantLock { - owner: Option, - count: usize -} - -pub struct ReentrantMutex { - inner: SpinMutex>, -} - -impl ReentrantMutex { - pub const fn uninitialized() -> ReentrantMutex { - ReentrantMutex { - inner: SpinMutex::new(WaitVariable::new(ReentrantLock { owner: None, count: 0 })) - } - } - - #[inline] - pub unsafe fn init(&mut self) {} - - #[inline] - pub unsafe fn lock(&self) { - let mut guard = self.inner.lock(); - match guard.lock_var().owner { - Some(tcs) if tcs != thread::current() => { - // Another thread has the lock, wait - WaitQueue::wait(guard); - // Another thread has passed the lock to us - }, - _ => { - // We are just now obtaining the lock - guard.lock_var_mut().owner = Some(thread::current()); - guard.lock_var_mut().count += 1; - }, - } - } - - #[inline] - pub unsafe fn unlock(&self) { - let mut guard = self.inner.lock(); - if guard.lock_var().count > 1 { - guard.lock_var_mut().count -= 1; - } else { - match WaitQueue::notify_one(guard) { - Err(mut guard) => { - // No other waiters, unlock - guard.lock_var_mut().count = 0; - guard.lock_var_mut().owner = None; - }, - Ok(mut guard) => { - // There was a thread waiting, just pass the lock - if let NotifiedTcs::Single(tcs) = guard.notified_tcs() { - guard.lock_var_mut().owner = Some(tcs) - } else { - unreachable!() // called notify_one - } - } - } - } - } - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - let mut guard = try_lock_or_false!(self.inner); - match guard.lock_var().owner { - Some(tcs) if tcs != thread::current() => { - // Another thread has the lock - false - }, - _ => { - // We are just now obtaining the lock - guard.lock_var_mut().owner = Some(thread::current()); - guard.lock_var_mut().count += 1; - true - }, - } - } - - #[inline] - pub unsafe fn destroy(&self) {} -} diff --git a/src/libstd/sys/sgx/rwlock.rs b/src/libstd/sys/sgx/rwlock.rs index 30c47e44eef8e..2c53e68f0cbef 100644 --- a/src/libstd/sys/sgx/rwlock.rs +++ b/src/libstd/sys/sgx/rwlock.rs @@ -1,195 +1,59 @@ -use crate::num::NonZeroUsize; +// The following functions are needed by libunwind. These symbols are named +// in pre-link args for the target specification, so keep that in sync. -use super::waitqueue::{ - try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable, +#[cfg(not(test))] +use crate::{ + alloc::{self, Layout}, + lock_api::RawRwLock as _, + slice, str, + sync::atomic::Ordering, }; -use crate::mem; - -pub struct RWLock { - readers: SpinMutex>>, - writer: SpinMutex>, -} - -// Below is to check at compile time, that RWLock has size of 128 bytes. -#[allow(dead_code)] -unsafe fn rw_lock_size_assert(r: RWLock) { - mem::transmute::(r); -} - -impl RWLock { - pub const fn new() -> RWLock { - RWLock { - readers: SpinMutex::new(WaitVariable::new(None)), - writer: SpinMutex::new(WaitVariable::new(false)), - } - } - - #[inline] - pub unsafe fn read(&self) { - let mut rguard = self.readers.lock(); - let wguard = self.writer.lock(); - if *wguard.lock_var() || !wguard.queue_empty() { - // Another thread has or is waiting for the write lock, wait - drop(wguard); - WaitQueue::wait(rguard); - // Another thread has passed the lock to us - } else { - // No waiting writers, acquire the read lock - *rguard.lock_var_mut() = - NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1); - } - } - - #[inline] - pub unsafe fn try_read(&self) -> bool { - let mut rguard = try_lock_or_false!(self.readers); - let wguard = try_lock_or_false!(self.writer); - if *wguard.lock_var() || !wguard.queue_empty() { - // Another thread has or is waiting for the write lock - false - } else { - // No waiting writers, acquire the read lock - *rguard.lock_var_mut() = - NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1); - true - } - } - - #[inline] - pub unsafe fn write(&self) { - let rguard = self.readers.lock(); - let mut wguard = self.writer.lock(); - if *wguard.lock_var() || rguard.lock_var().is_some() { - // Another thread has the lock, wait - drop(rguard); - WaitQueue::wait(wguard); - // Another thread has passed the lock to us - } else { - // We are just now obtaining the lock - *wguard.lock_var_mut() = true; - } - } - - #[inline] - pub unsafe fn try_write(&self) -> bool { - let rguard = try_lock_or_false!(self.readers); - let mut wguard = try_lock_or_false!(self.writer); - if *wguard.lock_var() || rguard.lock_var().is_some() { - // Another thread has the lock - false - } else { - // We are just now obtaining the lock - *wguard.lock_var_mut() = true; - true - } - } - - #[inline] - unsafe fn __read_unlock( - &self, - mut rguard: SpinMutexGuard<'_, WaitVariable>>, - wguard: SpinMutexGuard<'_, WaitVariable>, - ) { - *rguard.lock_var_mut() = NonZeroUsize::new(rguard.lock_var().unwrap().get() - 1); - if rguard.lock_var().is_some() { - // There are other active readers - } else { - if let Ok(mut wguard) = WaitQueue::notify_one(wguard) { - // A writer was waiting, pass the lock - *wguard.lock_var_mut() = true; - } else { - // No writers were waiting, the lock is released - rtassert!(rguard.queue_empty()); - } - } - } - - #[inline] - pub unsafe fn read_unlock(&self) { - let rguard = self.readers.lock(); - let wguard = self.writer.lock(); - self.__read_unlock(rguard, wguard); - } - - #[inline] - unsafe fn __write_unlock( - &self, - rguard: SpinMutexGuard<'_, WaitVariable>>, - wguard: SpinMutexGuard<'_, WaitVariable>, - ) { - if let Err(mut wguard) = WaitQueue::notify_one(wguard) { - // No writers waiting, release the write lock - *wguard.lock_var_mut() = false; - if let Ok(mut rguard) = WaitQueue::notify_all(rguard) { - // One or more readers were waiting, pass the lock to them - if let NotifiedTcs::All { count } = rguard.notified_tcs() { - *rguard.lock_var_mut() = Some(count) - } else { - unreachable!() // called notify_all - } - } else { - // No readers waiting, the lock is released - } - } else { - // There was a thread waiting for write, just pass the lock - } - } - - #[inline] - pub unsafe fn write_unlock(&self) { - let rguard = self.readers.lock(); - let wguard = self.writer.lock(); - self.__write_unlock(rguard, wguard); - } - - // only used by __rust_rwlock_unlock below - #[inline] - #[cfg_attr(test, allow(dead_code))] - unsafe fn unlock(&self) { - let rguard = self.readers.lock(); - let wguard = self.writer.lock(); - if *wguard.lock_var() == true { - self.__write_unlock(rguard, wguard); - } else { - self.__read_unlock(rguard, wguard); - } - } - - #[inline] - pub unsafe fn destroy(&self) {} -} +use crate::{parking_lot::RawRwLock, sync::atomic::AtomicBool}; -// The following functions are needed by libunwind. These symbols are named -// in pre-link args for the target specification, so keep that in sync. #[cfg(not(test))] const EINVAL: i32 = 22; +#[repr(C)] +pub struct RwLock { + lock: RawRwLock, + is_write_locked: AtomicBool, +} + #[cfg(not(test))] #[no_mangle] -pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RWLock) -> i32 { +pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RwLock) -> i32 { if p.is_null() { return EINVAL; } - (*p).read(); + (*p).lock.lock_shared(); return 0; } #[cfg(not(test))] #[no_mangle] -pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RWLock) -> i32 { +pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RwLock) -> i32 { if p.is_null() { return EINVAL; } - (*p).write(); + (*p).lock.lock_exclusive(); + (*p).is_write_locked.store(true, Ordering::Relaxed); return 0; } #[cfg(not(test))] #[no_mangle] -pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RWLock) -> i32 { +pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RwLock) -> i32 { if p.is_null() { return EINVAL; } - (*p).unlock(); + if (*p) + .is_write_locked + .compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed) + .is_ok() + { + (*p).lock.unlock_exclusive() + } else { + (*p).lock.unlock_shared(); + } return 0; } @@ -199,52 +63,54 @@ mod tests { use core::array::FixedSizeArray; use crate::mem::{self, MaybeUninit}; - // Verify that the bytes of initialized RWLock are the same as in + // Verify that the bytes of an initialized RwLock are the same as in // libunwind. If they change, `src/UnwindRustSgx.h` in libunwind needs to // be changed too. #[test] fn test_c_rwlock_initializer() { - const RWLOCK_INIT: &[u8] = &[ - 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - ]; + /// The value of a newly initialized `RwLock`. Which happens to be + /// `RawRwLock::INIT` (a zeroed `usize`), a false boolean (zero) + /// and then padding. + const RWLOCK_INIT: &[u8] = &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; #[inline(never)] fn zero_stack() { - test::black_box(MaybeUninit::<[RWLock; 16]>::zeroed()); + test::black_box(MaybeUninit::<[RwLock; 16]>::zeroed()); } #[inline(never)] - unsafe fn rwlock_new(init: &mut MaybeUninit) { - init.write(RWLock::new()); + unsafe fn rwlock_new(init: &mut MaybeUninit) { + use crate::lock_api::RawRwLock as _; + init.write(RwLock { + lock: RawRwLock::INIT, + is_write_locked: AtomicBool::new(false), + }); } unsafe { - // try hard to make sure that the padding/unused bytes in RWLock + // try hard to make sure that the padding/unused bytes in RwLock // get initialized as 0. If the assertion below fails, that might // just be an issue with the test code and not with the value of // RWLOCK_INIT. zero_stack(); - let mut init = MaybeUninit::::zeroed(); + let mut init = MaybeUninit::::zeroed(); rwlock_new(&mut init); assert_eq!( - mem::transmute::<_, [u8; 128]>(init.assume_init()).as_slice(), + mem::transmute::<_, [u8; 16]>(init.assume_init()).as_slice(), RWLOCK_INIT ) }; } + + #[test] + fn test_rwlock_memory_layout() { + assert_eq!(mem::size_of::(), mem::size_of::() * 2); + assert_eq!(mem::align_of::(), mem::align_of::()); + } + + #[test] + fn test_sgx_on_64bit() { + #[cfg(target_pointer_width = "32")] + panic!("The RwLock implementation for SGX only works on 64 bit architectures for now"); + } } diff --git a/src/libstd/sys/sgx/spinmutex.rs b/src/libstd/sys/sgx/spinmutex.rs new file mode 100644 index 0000000000000..ef3642b77ca31 --- /dev/null +++ b/src/libstd/sys/sgx/spinmutex.rs @@ -0,0 +1,119 @@ +use crate::cell::UnsafeCell; +use crate::sync::atomic::{AtomicBool, Ordering, spin_loop_hint}; +use crate::ops::{Deref, DerefMut}; + +/// Trivial spinlock-based implementation of `sync::Mutex`. +// FIXME: Perhaps use Intel TSX to avoid locking? +#[derive(Default)] +pub struct SpinMutex { + value: UnsafeCell, + lock: AtomicBool, +} + +unsafe impl Send for SpinMutex {} +unsafe impl Sync for SpinMutex {} + +pub struct SpinMutexGuard<'a, T: 'a> { + mutex: &'a SpinMutex, +} + +impl<'a, T> !Send for SpinMutexGuard<'a, T> {} +unsafe impl<'a, T: Sync> Sync for SpinMutexGuard<'a, T> {} + +impl SpinMutex { + pub const fn new(value: T) -> Self { + SpinMutex { + value: UnsafeCell::new(value), + lock: AtomicBool::new(false) + } + } + + #[inline(always)] + pub fn lock(&self) -> SpinMutexGuard<'_, T> { + loop { + match self.try_lock() { + None => while self.lock.load(Ordering::Relaxed) { + spin_loop_hint() + }, + Some(guard) => return guard + } + } + } + + #[inline(always)] + pub fn try_lock(&self) -> Option> { + if !self.lock.compare_and_swap(false, true, Ordering::Acquire) { + Some(SpinMutexGuard { + mutex: self, + }) + } else { + None + } + } +} + +/// Lock the Mutex or return false. +pub macro try_lock_or_false { + ($e:expr) => { + if let Some(v) = $e.try_lock() { + v + } else { + return false + } + } +} + +impl<'a, T> Deref for SpinMutexGuard<'a, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { + &*self.mutex.value.get() + } + } +} + +impl<'a, T> DerefMut for SpinMutexGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { + &mut*self.mutex.value.get() + } + } +} + +impl<'a, T> Drop for SpinMutexGuard<'a, T> { + fn drop(&mut self) { + self.mutex.lock.store(false, Ordering::Release) + } +} + +#[cfg(test)] +mod tests { + #![allow(deprecated)] + + use super::*; + use crate::sync::Arc; + use crate::thread; + use crate::time::{SystemTime, Duration}; + + #[test] + fn sleep() { + let mutex = Arc::new(SpinMutex::::default()); + let mutex2 = mutex.clone(); + let guard = mutex.lock(); + let t1 = thread::spawn(move || { + *mutex2.lock() = 1; + }); + + // "sleep" for 50ms + // FIXME: https://github.com/fortanix/rust-sgx/issues/31 + let start = SystemTime::now(); + let max = Duration::from_millis(50); + while start.elapsed().unwrap() < max {} + + assert_eq!(*guard, 0); + drop(guard); + t1.join().unwrap(); + assert_eq!(*mutex.lock(), 1); + } +} diff --git a/src/libstd/sys/sgx/waitqueue.rs b/src/libstd/sys/sgx/waitqueue.rs deleted file mode 100644 index d542f9b410127..0000000000000 --- a/src/libstd/sys/sgx/waitqueue.rs +++ /dev/null @@ -1,552 +0,0 @@ -/// A simple queue implementation for synchronization primitives. -/// -/// This queue is used to implement condition variable and mutexes. -/// -/// Users of this API are expected to use the `WaitVariable` type. Since -/// that type is not `Sync`, it needs to be protected by e.g., a `SpinMutex` to -/// allow shared access. -/// -/// Since userspace may send spurious wake-ups, the wakeup event state is -/// recorded in the enclave. The wakeup event state is protected by a spinlock. -/// The queue and associated wait state are stored in a `WaitVariable`. - -use crate::ops::{Deref, DerefMut}; -use crate::num::NonZeroUsize; - -use fortanix_sgx_abi::{Tcs, EV_UNPARK, WAIT_INDEFINITE}; -use super::abi::usercalls; -use super::abi::thread; - -use self::unsafe_list::{UnsafeList, UnsafeListEntry}; -pub use self::spin_mutex::{SpinMutex, SpinMutexGuard, try_lock_or_false}; - -/// An queue entry in a `WaitQueue`. -struct WaitEntry { - /// TCS address of the thread that is waiting - tcs: Tcs, - /// Whether this thread has been notified to be awoken - wake: bool -} - -/// Data stored with a `WaitQueue` alongside it. This ensures accesses to the -/// queue and the data are synchronized, since the type itself is not `Sync`. -/// -/// Consumers of this API should use a synchronization primitive for shared -/// access, such as `SpinMutex`. -#[derive(Default)] -pub struct WaitVariable { - queue: WaitQueue, - lock: T -} - -impl WaitVariable { - pub const fn new(var: T) -> Self { - WaitVariable { - queue: WaitQueue::new(), - lock: var - } - } - - pub fn queue_empty(&self) -> bool { - self.queue.is_empty() - } - - pub fn lock_var(&self) -> &T { - &self.lock - } - - pub fn lock_var_mut(&mut self) -> &mut T { - &mut self.lock - } -} - -#[derive(Copy, Clone)] -pub enum NotifiedTcs { - Single(Tcs), - All { count: NonZeroUsize } -} - -/// An RAII guard that will notify a set of target threads as well as unlock -/// a mutex on drop. -pub struct WaitGuard<'a, T: 'a> { - mutex_guard: Option>>, - notified_tcs: NotifiedTcs -} - -/// A queue of threads that are waiting on some synchronization primitive. -/// -/// `UnsafeList` entries are allocated on the waiting thread's stack. This -/// avoids any global locking that might happen in the heap allocator. This is -/// safe because the waiting thread will not return from that stack frame until -/// after it is notified. The notifying thread ensures to clean up any -/// references to the list entries before sending the wakeup event. -pub struct WaitQueue { - // We use an inner Mutex here to protect the data in the face of spurious - // wakeups. - inner: UnsafeList>, -} -unsafe impl Send for WaitQueue {} - -impl Default for WaitQueue { - fn default() -> Self { - Self::new() - } -} - -impl<'a, T> WaitGuard<'a, T> { - /// Returns which TCSes will be notified when this guard drops. - pub fn notified_tcs(&self) -> NotifiedTcs { - self.notified_tcs - } -} - -impl<'a, T> Deref for WaitGuard<'a, T> { - type Target = SpinMutexGuard<'a, WaitVariable>; - - fn deref(&self) -> &Self::Target { - self.mutex_guard.as_ref().unwrap() - } -} - -impl<'a, T> DerefMut for WaitGuard<'a, T> { - fn deref_mut(&mut self) -> &mut Self::Target { - self.mutex_guard.as_mut().unwrap() - } -} - -impl<'a, T> Drop for WaitGuard<'a, T> { - fn drop(&mut self) { - drop(self.mutex_guard.take()); - let target_tcs = match self.notified_tcs { - NotifiedTcs::Single(tcs) => Some(tcs), - NotifiedTcs::All { .. } => None - }; - rtunwrap!(Ok, usercalls::send(EV_UNPARK, target_tcs)); - } -} - -impl WaitQueue { - pub const fn new() -> Self { - WaitQueue { - inner: UnsafeList::new() - } - } - - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } - - /// Adds the calling thread to the `WaitVariable`'s wait queue, then wait - /// until a wakeup event. - /// - /// This function does not return until this thread has been awoken. - pub fn wait(mut guard: SpinMutexGuard<'_, WaitVariable>) { - // very unsafe: check requirements of UnsafeList::push - unsafe { - let mut entry = UnsafeListEntry::new(SpinMutex::new(WaitEntry { - tcs: thread::current(), - wake: false - })); - let entry = guard.queue.inner.push(&mut entry); - drop(guard); - while !entry.lock().wake { - // don't panic, this would invalidate `entry` during unwinding - let eventset = rtunwrap!(Ok, usercalls::wait(EV_UNPARK, WAIT_INDEFINITE)); - rtassert!(eventset & EV_UNPARK == EV_UNPARK); - } - } - } - - /// Either find the next waiter on the wait queue, or return the mutex - /// guard unchanged. - /// - /// If a waiter is found, a `WaitGuard` is returned which will notify the - /// waiter when it is dropped. - pub fn notify_one(mut guard: SpinMutexGuard<'_, WaitVariable>) - -> Result, SpinMutexGuard<'_, WaitVariable>> - { - unsafe { - if let Some(entry) = guard.queue.inner.pop() { - let mut entry_guard = entry.lock(); - let tcs = entry_guard.tcs; - entry_guard.wake = true; - drop(entry); - Ok(WaitGuard { - mutex_guard: Some(guard), - notified_tcs: NotifiedTcs::Single(tcs) - }) - } else { - Err(guard) - } - } - } - - /// Either find any and all waiters on the wait queue, or return the mutex - /// guard unchanged. - /// - /// If at least one waiter is found, a `WaitGuard` is returned which will - /// notify all waiters when it is dropped. - pub fn notify_all(mut guard: SpinMutexGuard<'_, WaitVariable>) - -> Result, SpinMutexGuard<'_, WaitVariable>> - { - unsafe { - let mut count = 0; - while let Some(entry) = guard.queue.inner.pop() { - count += 1; - let mut entry_guard = entry.lock(); - entry_guard.wake = true; - } - if let Some(count) = NonZeroUsize::new(count) { - Ok(WaitGuard { - mutex_guard: Some(guard), - notified_tcs: NotifiedTcs::All { count } - }) - } else { - Err(guard) - } - } - } -} - -/// A doubly-linked list where callers are in charge of memory allocation -/// of the nodes in the list. -mod unsafe_list { - use crate::ptr::NonNull; - use crate::mem; - - pub struct UnsafeListEntry { - next: NonNull>, - prev: NonNull>, - value: Option - } - - impl UnsafeListEntry { - fn dummy() -> Self { - UnsafeListEntry { - next: NonNull::dangling(), - prev: NonNull::dangling(), - value: None - } - } - - pub fn new(value: T) -> Self { - UnsafeListEntry { - value: Some(value), - ..Self::dummy() - } - } - } - - pub struct UnsafeList { - head_tail: NonNull>, - head_tail_entry: Option>, - } - - impl UnsafeList { - pub const fn new() -> Self { - unsafe { - UnsafeList { - head_tail: NonNull::new_unchecked(1 as _), - head_tail_entry: None - } - } - } - - unsafe fn init(&mut self) { - if self.head_tail_entry.is_none() { - self.head_tail_entry = Some(UnsafeListEntry::dummy()); - self.head_tail = NonNull::new_unchecked(self.head_tail_entry.as_mut().unwrap()); - self.head_tail.as_mut().next = self.head_tail; - self.head_tail.as_mut().prev = self.head_tail; - } - } - - pub fn is_empty(&self) -> bool { - unsafe { - if self.head_tail_entry.is_some() { - let first = self.head_tail.as_ref().next; - if first == self.head_tail { - // ,-------> /---------\ next ---, - // | |head_tail| | - // `--- prev \---------/ <-------` - rtassert!(self.head_tail.as_ref().prev == first); - true - } else { - false - } - } else { - true - } - } - } - - /// Pushes an entry onto the back of the list. - /// - /// # Safety - /// - /// The entry must remain allocated until the entry is removed from the - /// list AND the caller who popped is done using the entry. Special - /// care must be taken in the caller of `push` to ensure unwinding does - /// not destroy the stack frame containing the entry. - pub unsafe fn push<'a>(&mut self, entry: &'a mut UnsafeListEntry) -> &'a T { - self.init(); - - // BEFORE: - // /---------\ next ---> /---------\ - // ... |prev_tail| |head_tail| ... - // \---------/ <--- prev \---------/ - // - // AFTER: - // /---------\ next ---> /-----\ next ---> /---------\ - // ... |prev_tail| |entry| |head_tail| ... - // \---------/ <--- prev \-----/ <--- prev \---------/ - let mut entry = NonNull::new_unchecked(entry); - let mut prev_tail = mem::replace(&mut self.head_tail.as_mut().prev, entry); - entry.as_mut().prev = prev_tail; - entry.as_mut().next = self.head_tail; - prev_tail.as_mut().next = entry; - // unwrap ok: always `Some` on non-dummy entries - (*entry.as_ptr()).value.as_ref().unwrap() - } - - /// Pops an entry from the front of the list. - /// - /// # Safety - /// - /// The caller must make sure to synchronize ending the borrow of the - /// return value and deallocation of the containing entry. - pub unsafe fn pop<'a>(&mut self) -> Option<&'a T> { - self.init(); - - if self.is_empty() { - None - } else { - // BEFORE: - // /---------\ next ---> /-----\ next ---> /------\ - // ... |head_tail| |first| |second| ... - // \---------/ <--- prev \-----/ <--- prev \------/ - // - // AFTER: - // /---------\ next ---> /------\ - // ... |head_tail| |second| ... - // \---------/ <--- prev \------/ - let mut first = self.head_tail.as_mut().next; - let mut second = first.as_mut().next; - self.head_tail.as_mut().next = second; - second.as_mut().prev = self.head_tail; - first.as_mut().next = NonNull::dangling(); - first.as_mut().prev = NonNull::dangling(); - // unwrap ok: always `Some` on non-dummy entries - Some((*first.as_ptr()).value.as_ref().unwrap()) - } - } - } - - #[cfg(test)] - mod tests { - use super::*; - use crate::cell::Cell; - - unsafe fn assert_empty(list: &mut UnsafeList) { - assert!(list.pop().is_none(), "assertion failed: list is not empty"); - } - - #[test] - fn init_empty() { - unsafe { - assert_empty(&mut UnsafeList::::new()); - } - } - - #[test] - fn push_pop() { - unsafe { - let mut node = UnsafeListEntry::new(1234); - let mut list = UnsafeList::new(); - assert_eq!(list.push(&mut node), &1234); - assert_eq!(list.pop().unwrap(), &1234); - assert_empty(&mut list); - } - } - - #[test] - fn complex_pushes_pops() { - unsafe { - let mut node1 = UnsafeListEntry::new(1234); - let mut node2 = UnsafeListEntry::new(4567); - let mut node3 = UnsafeListEntry::new(9999); - let mut node4 = UnsafeListEntry::new(8642); - let mut list = UnsafeList::new(); - list.push(&mut node1); - list.push(&mut node2); - assert_eq!(list.pop().unwrap(), &1234); - list.push(&mut node3); - assert_eq!(list.pop().unwrap(), &4567); - assert_eq!(list.pop().unwrap(), &9999); - assert_empty(&mut list); - list.push(&mut node4); - assert_eq!(list.pop().unwrap(), &8642); - assert_empty(&mut list); - } - } - - #[test] - fn cell() { - unsafe { - let mut node = UnsafeListEntry::new(Cell::new(0)); - let mut list = UnsafeList::new(); - let noderef = list.push(&mut node); - assert_eq!(noderef.get(), 0); - list.pop().unwrap().set(1); - assert_empty(&mut list); - assert_eq!(noderef.get(), 1); - } - } - } -} - -/// Trivial spinlock-based implementation of `sync::Mutex`. -// FIXME: Perhaps use Intel TSX to avoid locking? -mod spin_mutex { - use crate::cell::UnsafeCell; - use crate::sync::atomic::{AtomicBool, Ordering, spin_loop_hint}; - use crate::ops::{Deref, DerefMut}; - - #[derive(Default)] - pub struct SpinMutex { - value: UnsafeCell, - lock: AtomicBool, - } - - unsafe impl Send for SpinMutex {} - unsafe impl Sync for SpinMutex {} - - pub struct SpinMutexGuard<'a, T: 'a> { - mutex: &'a SpinMutex, - } - - impl<'a, T> !Send for SpinMutexGuard<'a, T> {} - unsafe impl<'a, T: Sync> Sync for SpinMutexGuard<'a, T> {} - - impl SpinMutex { - pub const fn new(value: T) -> Self { - SpinMutex { - value: UnsafeCell::new(value), - lock: AtomicBool::new(false) - } - } - - #[inline(always)] - pub fn lock(&self) -> SpinMutexGuard<'_, T> { - loop { - match self.try_lock() { - None => while self.lock.load(Ordering::Relaxed) { - spin_loop_hint() - }, - Some(guard) => return guard - } - } - } - - #[inline(always)] - pub fn try_lock(&self) -> Option> { - if !self.lock.compare_and_swap(false, true, Ordering::Acquire) { - Some(SpinMutexGuard { - mutex: self, - }) - } else { - None - } - } - } - - /// Lock the Mutex or return false. - pub macro try_lock_or_false { - ($e:expr) => { - if let Some(v) = $e.try_lock() { - v - } else { - return false - } - } - } - - impl<'a, T> Deref for SpinMutexGuard<'a, T> { - type Target = T; - - fn deref(&self) -> &T { - unsafe { - &*self.mutex.value.get() - } - } - } - - impl<'a, T> DerefMut for SpinMutexGuard<'a, T> { - fn deref_mut(&mut self) -> &mut T { - unsafe { - &mut*self.mutex.value.get() - } - } - } - - impl<'a, T> Drop for SpinMutexGuard<'a, T> { - fn drop(&mut self) { - self.mutex.lock.store(false, Ordering::Release) - } - } - - #[cfg(test)] - mod tests { - #![allow(deprecated)] - - use super::*; - use crate::sync::Arc; - use crate::thread; - use crate::time::{SystemTime, Duration}; - - #[test] - fn sleep() { - let mutex = Arc::new(SpinMutex::::default()); - let mutex2 = mutex.clone(); - let guard = mutex.lock(); - let t1 = thread::spawn(move || { - *mutex2.lock() = 1; - }); - - // "sleep" for 50ms - // FIXME: https://github.com/fortanix/rust-sgx/issues/31 - let start = SystemTime::now(); - let max = Duration::from_millis(50); - while start.elapsed().unwrap() < max {} - - assert_eq!(*guard, 0); - drop(guard); - t1.join().unwrap(); - assert_eq!(*mutex.lock(), 1); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::sync::Arc; - use crate::thread; - - #[test] - fn queue() { - let wq = Arc::new(SpinMutex::>::default()); - let wq2 = wq.clone(); - - let locked = wq.lock(); - - let t1 = thread::spawn(move || { - // if we obtain the lock, the main thread should be waiting - assert!(WaitQueue::notify_one(wq2.lock()).is_ok()); - }); - - WaitQueue::wait(locked); - - t1.join().unwrap(); - } -} diff --git a/src/libstd/sys/unix/args.rs b/src/libstd/sys/unix/args.rs index 18de1096df2a2..fb3702588d188 100644 --- a/src/libstd/sys/unix/args.rs +++ b/src/libstd/sys/unix/args.rs @@ -65,13 +65,11 @@ mod imp { use crate::marker::PhantomData; use super::Args; - use crate::sys_common::mutex::Mutex; + use crate::sync::RawMutex; static mut ARGC: isize = 0; static mut ARGV: *const *const u8 = ptr::null(); - // We never call `ENV_LOCK.init()`, so it is UB to attempt to - // acquire this mutex reentrantly! - static LOCK: Mutex = Mutex::new(); + static LOCK: RawMutex = RawMutex::new(); pub unsafe fn init(argc: isize, argv: *const *const u8) { let _guard = LOCK.lock(); @@ -93,8 +91,8 @@ mod imp { } fn clone() -> Vec { + let _guard = LOCK.lock(); unsafe { - let _guard = LOCK.lock(); (0..ARGC).map(|i| { let cstr = CStr::from_ptr(*ARGV.offset(i) as *const libc::c_char); OsStringExt::from_vec(cstr.to_bytes().to_vec()) diff --git a/src/libstd/sys/unix/condvar.rs b/src/libstd/sys/unix/condvar.rs deleted file mode 100644 index 47fb6792f08ae..0000000000000 --- a/src/libstd/sys/unix/condvar.rs +++ /dev/null @@ -1,181 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::sys::mutex::{self, Mutex}; -use crate::time::Duration; - -pub struct Condvar { inner: UnsafeCell } - -unsafe impl Send for Condvar {} -unsafe impl Sync for Condvar {} - -const TIMESPEC_MAX: libc::timespec = libc::timespec { - tv_sec: ::max_value(), - tv_nsec: 1_000_000_000 - 1, -}; - -fn saturating_cast_to_time_t(value: u64) -> libc::time_t { - if value > ::max_value() as u64 { - ::max_value() - } else { - value as libc::time_t - } -} - -impl Condvar { - pub const fn new() -> Condvar { - // Might be moved and address is changing it is better to avoid - // initialization of potentially opaque OS data before it landed - Condvar { inner: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER) } - } - - #[cfg(any(target_os = "macos", - target_os = "ios", - target_os = "l4re", - target_os = "android", - target_os = "hermit"))] - pub unsafe fn init(&mut self) {} - - #[cfg(not(any(target_os = "macos", - target_os = "ios", - target_os = "l4re", - target_os = "android", - target_os = "hermit")))] - pub unsafe fn init(&mut self) { - use crate::mem; - let mut attr: libc::pthread_condattr_t = mem::uninitialized(); - let r = libc::pthread_condattr_init(&mut attr); - assert_eq!(r, 0); - let r = libc::pthread_condattr_setclock(&mut attr, libc::CLOCK_MONOTONIC); - assert_eq!(r, 0); - let r = libc::pthread_cond_init(self.inner.get(), &attr); - assert_eq!(r, 0); - let r = libc::pthread_condattr_destroy(&mut attr); - assert_eq!(r, 0); - } - - #[inline] - pub unsafe fn notify_one(&self) { - let r = libc::pthread_cond_signal(self.inner.get()); - debug_assert_eq!(r, 0); - } - - #[inline] - pub unsafe fn notify_all(&self) { - let r = libc::pthread_cond_broadcast(self.inner.get()); - debug_assert_eq!(r, 0); - } - - #[inline] - pub unsafe fn wait(&self, mutex: &Mutex) { - let r = libc::pthread_cond_wait(self.inner.get(), mutex::raw(mutex)); - debug_assert_eq!(r, 0); - } - - // This implementation is used on systems that support pthread_condattr_setclock - // where we configure condition variable to use monotonic clock (instead of - // default system clock). This approach avoids all problems that result - // from changes made to the system time. - #[cfg(not(any(target_os = "macos", - target_os = "ios", - target_os = "android", - target_os = "hermit")))] - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - use crate::mem; - - let mut now: libc::timespec = mem::zeroed(); - let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now); - assert_eq!(r, 0); - - // Nanosecond calculations can't overflow because both values are below 1e9. - let nsec = dur.subsec_nanos() + now.tv_nsec as u32; - - let sec = saturating_cast_to_time_t(dur.as_secs()) - .checked_add((nsec / 1_000_000_000) as libc::time_t) - .and_then(|s| s.checked_add(now.tv_sec)); - let nsec = nsec % 1_000_000_000; - - let timeout = sec.map(|s| { - libc::timespec { tv_sec: s, tv_nsec: nsec as _} - }).unwrap_or(TIMESPEC_MAX); - - let r = libc::pthread_cond_timedwait(self.inner.get(), mutex::raw(mutex), - &timeout); - assert!(r == libc::ETIMEDOUT || r == 0); - r == 0 - } - - - // This implementation is modeled after libcxx's condition_variable - // https://github.com/llvm-mirror/libcxx/blob/release_35/src/condition_variable.cpp#L46 - // https://github.com/llvm-mirror/libcxx/blob/release_35/include/__mutex_base#L367 - #[cfg(any(target_os = "macos", target_os = "ios", target_os = "android", target_os = "hermit"))] - pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool { - use crate::ptr; - use crate::time::Instant; - - // 1000 years - let max_dur = Duration::from_secs(1000 * 365 * 86400); - - if dur > max_dur { - // OSX implementation of `pthread_cond_timedwait` is buggy - // with super long durations. When duration is greater than - // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait` - // in macOS Sierra return error 316. - // - // This program demonstrates the issue: - // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c - // - // To work around this issue, and possible bugs of other OSes, timeout - // is clamped to 1000 years, which is allowable per the API of `wait_timeout` - // because of spurious wakeups. - - dur = max_dur; - } - - // First, figure out what time it currently is, in both system and - // stable time. pthread_cond_timedwait uses system time, but we want to - // report timeout based on stable time. - let mut sys_now = libc::timeval { tv_sec: 0, tv_usec: 0 }; - let stable_now = Instant::now(); - let r = libc::gettimeofday(&mut sys_now, ptr::null_mut()); - debug_assert_eq!(r, 0); - - let nsec = dur.subsec_nanos() as libc::c_long + - (sys_now.tv_usec * 1000) as libc::c_long; - let extra = (nsec / 1_000_000_000) as libc::time_t; - let nsec = nsec % 1_000_000_000; - let seconds = saturating_cast_to_time_t(dur.as_secs()); - - let timeout = sys_now.tv_sec.checked_add(extra).and_then(|s| { - s.checked_add(seconds) - }).map(|s| { - libc::timespec { tv_sec: s, tv_nsec: nsec } - }).unwrap_or(TIMESPEC_MAX); - - // And wait! - let r = libc::pthread_cond_timedwait(self.inner.get(), mutex::raw(mutex), - &timeout); - debug_assert!(r == libc::ETIMEDOUT || r == 0); - - // ETIMEDOUT is not a totally reliable method of determining timeout due - // to clock shifts, so do the check ourselves - stable_now.elapsed() < dur - } - - #[inline] - #[cfg(not(target_os = "dragonfly"))] - pub unsafe fn destroy(&self) { - let r = libc::pthread_cond_destroy(self.inner.get()); - debug_assert_eq!(r, 0); - } - - #[inline] - #[cfg(target_os = "dragonfly")] - pub unsafe fn destroy(&self) { - let r = libc::pthread_cond_destroy(self.inner.get()); - // On DragonFly pthread_cond_destroy() returns EINVAL if called on - // a condvar that was just initialized with - // libc::PTHREAD_COND_INITIALIZER. Once it is used or - // pthread_cond_init() is called, this behaviour no longer occurs. - debug_assert!(r == 0 || r == libc::EINVAL); - } -} diff --git a/src/libstd/sys/unix/mod.rs b/src/libstd/sys/unix/mod.rs index 90e26449ae280..50d5287a8538d 100644 --- a/src/libstd/sys/unix/mod.rs +++ b/src/libstd/sys/unix/mod.rs @@ -31,7 +31,6 @@ pub mod android; #[cfg(feature = "backtrace")] pub mod backtrace; pub mod cmath; -pub mod condvar; pub mod env; pub mod ext; pub mod fast_thread_local; @@ -39,7 +38,6 @@ pub mod fd; pub mod fs; pub mod memchr; pub mod io; -pub mod mutex; #[cfg(not(target_os = "l4re"))] pub mod net; #[cfg(target_os = "l4re")] @@ -51,7 +49,6 @@ pub mod path; pub mod pipe; pub mod process; pub mod rand; -pub mod rwlock; pub mod stack_overflow; pub mod thread; pub mod thread_local; diff --git a/src/libstd/sys/unix/mutex.rs b/src/libstd/sys/unix/mutex.rs deleted file mode 100644 index b6a22e1962ab8..0000000000000 --- a/src/libstd/sys/unix/mutex.rs +++ /dev/null @@ -1,127 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::mem; - -pub struct Mutex { inner: UnsafeCell } - -#[inline] -pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t { - m.inner.get() -} - -unsafe impl Send for Mutex {} -unsafe impl Sync for Mutex {} - -#[allow(dead_code)] // sys isn't exported yet -impl Mutex { - pub const fn new() -> Mutex { - // Might be moved to a different address, so it is better to avoid - // initialization of potentially opaque OS data before it landed. - // Be very careful using this newly constructed `Mutex`, reentrant - // locking is undefined behavior until `init` is called! - Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) } - } - #[inline] - pub unsafe fn init(&mut self) { - // Issue #33770 - // - // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have - // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you - // try to re-lock it from the same thread when you already hold a lock. - // - // In practice, glibc takes advantage of this undefined behavior to - // implement hardware lock elision, which uses hardware transactional - // memory to avoid acquiring the lock. While a transaction is in - // progress, the lock appears to be unlocked. This isn't a problem for - // other threads since the transactional memory will abort if a conflict - // is detected, however no abort is generated if re-locking from the - // same thread. - // - // Since locking the same mutex twice will result in two aliasing &mut - // references, we instead create the mutex with type - // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to - // re-lock it from the same thread, thus avoiding undefined behavior. - let mut attr: libc::pthread_mutexattr_t = mem::uninitialized(); - let r = libc::pthread_mutexattr_init(&mut attr); - debug_assert_eq!(r, 0); - let r = libc::pthread_mutexattr_settype(&mut attr, libc::PTHREAD_MUTEX_NORMAL); - debug_assert_eq!(r, 0); - let r = libc::pthread_mutex_init(self.inner.get(), &attr); - debug_assert_eq!(r, 0); - let r = libc::pthread_mutexattr_destroy(&mut attr); - debug_assert_eq!(r, 0); - } - #[inline] - pub unsafe fn lock(&self) { - let r = libc::pthread_mutex_lock(self.inner.get()); - debug_assert_eq!(r, 0); - } - #[inline] - pub unsafe fn unlock(&self) { - let r = libc::pthread_mutex_unlock(self.inner.get()); - debug_assert_eq!(r, 0); - } - #[inline] - pub unsafe fn try_lock(&self) -> bool { - libc::pthread_mutex_trylock(self.inner.get()) == 0 - } - #[inline] - #[cfg(not(target_os = "dragonfly"))] - pub unsafe fn destroy(&self) { - let r = libc::pthread_mutex_destroy(self.inner.get()); - debug_assert_eq!(r, 0); - } - #[inline] - #[cfg(target_os = "dragonfly")] - pub unsafe fn destroy(&self) { - let r = libc::pthread_mutex_destroy(self.inner.get()); - // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a - // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER. - // Once it is used (locked/unlocked) or pthread_mutex_init() is called, - // this behaviour no longer occurs. - debug_assert!(r == 0 || r == libc::EINVAL); - } -} - -pub struct ReentrantMutex { inner: UnsafeCell } - -unsafe impl Send for ReentrantMutex {} -unsafe impl Sync for ReentrantMutex {} - -impl ReentrantMutex { - pub unsafe fn uninitialized() -> ReentrantMutex { - ReentrantMutex { inner: mem::uninitialized() } - } - - pub unsafe fn init(&mut self) { - let mut attr: libc::pthread_mutexattr_t = mem::uninitialized(); - let result = libc::pthread_mutexattr_init(&mut attr as *mut _); - debug_assert_eq!(result, 0); - let result = libc::pthread_mutexattr_settype(&mut attr as *mut _, - libc::PTHREAD_MUTEX_RECURSIVE); - debug_assert_eq!(result, 0); - let result = libc::pthread_mutex_init(self.inner.get(), &attr as *const _); - debug_assert_eq!(result, 0); - let result = libc::pthread_mutexattr_destroy(&mut attr as *mut _); - debug_assert_eq!(result, 0); - } - - pub unsafe fn lock(&self) { - let result = libc::pthread_mutex_lock(self.inner.get()); - debug_assert_eq!(result, 0); - } - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - libc::pthread_mutex_trylock(self.inner.get()) == 0 - } - - pub unsafe fn unlock(&self) { - let result = libc::pthread_mutex_unlock(self.inner.get()); - debug_assert_eq!(result, 0); - } - - pub unsafe fn destroy(&self) { - let result = libc::pthread_mutex_destroy(self.inner.get()); - debug_assert_eq!(result, 0); - } -} diff --git a/src/libstd/sys/unix/os.rs b/src/libstd/sys/unix/os.rs index 726b17969b7c3..b9b2053a32dba 100644 --- a/src/libstd/sys/unix/os.rs +++ b/src/libstd/sys/unix/os.rs @@ -16,7 +16,7 @@ use crate::path::{self, PathBuf}; use crate::ptr; use crate::slice; use crate::str; -use crate::sys_common::mutex::{Mutex, MutexGuard}; +use crate::sync::{RawMutex, RawMutexGuard}; use crate::sys::cvt; use crate::sys::fd; use crate::vec; @@ -412,10 +412,8 @@ pub unsafe fn environ() -> *mut *const *const c_char { &mut environ } -pub unsafe fn env_lock() -> MutexGuard<'static> { - // We never call `ENV_LOCK.init()`, so it is UB to attempt to - // acquire this mutex reentrantly! - static ENV_LOCK: Mutex = Mutex::new(); +pub fn env_lock() -> RawMutexGuard<'static> { + static ENV_LOCK: RawMutex = RawMutex::new(); ENV_LOCK.lock() } diff --git a/src/libstd/sys/unix/process/process_unix.rs b/src/libstd/sys/unix/process/process_unix.rs index 220b1fd453131..5d860844e7e0f 100644 --- a/src/libstd/sys/unix/process/process_unix.rs +++ b/src/libstd/sys/unix/process/process_unix.rs @@ -1,4 +1,5 @@ use crate::io::{self, Error, ErrorKind}; +use crate::mem; use crate::ptr; use crate::sys::cvt; use crate::sys::process::process_common::*; @@ -35,17 +36,16 @@ impl Command { // accessing the `environ` pointer ourselves). Make sure no other thread // is accessing the environment when we do the fork itself. // - // Note that as soon as we're done with the fork there's no need to hold - // a lock any more because the parent won't do anything and the child is - // in its own process. - let result = unsafe { - let _env_lock = sys::os::env_lock(); - cvt(libc::fork())? - }; - + // Note that as soon as we're done with the fork the parent can stop + // holding the lock, because it won't do anything. And the child must + // forget it, keeping it locked. This is because parking_lot is + // not fork safe and the child process must not lock or unlock any + // locks before it gets to libc::execvp. let pid = unsafe { - match result { + let _env_lock = sys::os::env_lock(); + match cvt(libc::fork())? { 0 => { + mem::forget(_env_lock); drop(input); let err = self.do_exec(theirs, envp.as_ref()); let errno = err.raw_os_error().unwrap_or(libc::EINVAL) as u32; diff --git a/src/libstd/sys/unix/rwlock.rs b/src/libstd/sys/unix/rwlock.rs deleted file mode 100644 index e48bfdae61047..0000000000000 --- a/src/libstd/sys/unix/rwlock.rs +++ /dev/null @@ -1,131 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::sync::atomic::{AtomicUsize, Ordering}; - -pub struct RWLock { - inner: UnsafeCell, - write_locked: UnsafeCell, // guarded by the `inner` RwLock - num_readers: AtomicUsize, -} - -unsafe impl Send for RWLock {} -unsafe impl Sync for RWLock {} - -impl RWLock { - pub const fn new() -> RWLock { - RWLock { - inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), - write_locked: UnsafeCell::new(false), - num_readers: AtomicUsize::new(0), - } - } - #[inline] - pub unsafe fn read(&self) { - let r = libc::pthread_rwlock_rdlock(self.inner.get()); - - // According to the pthread_rwlock_rdlock spec, this function **may** - // fail with EDEADLK if a deadlock is detected. On the other hand - // pthread mutexes will *never* return EDEADLK if they are initialized - // as the "fast" kind (which ours always are). As a result, a deadlock - // situation may actually return from the call to pthread_rwlock_rdlock - // instead of blocking forever (as mutexes and Windows rwlocks do). Note - // that not all unix implementations, however, will return EDEADLK for - // their rwlocks. - // - // We roughly maintain the deadlocking behavior by panicking to ensure - // that this lock acquisition does not succeed. - // - // We also check whether this lock is already write locked. This - // is only possible if it was write locked by the current thread and - // the implementation allows recursive locking. The POSIX standard - // doesn't require recursively locking a rwlock to deadlock, but we can't - // allow that because it could lead to aliasing issues. - if r == libc::EAGAIN { - panic!("rwlock maximum reader count exceeded"); - } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) { - if r == 0 { - self.raw_unlock(); - } - panic!("rwlock read lock would result in deadlock"); - } else { - assert_eq!(r, 0); - self.num_readers.fetch_add(1, Ordering::Relaxed); - } - } - #[inline] - pub unsafe fn try_read(&self) -> bool { - let r = libc::pthread_rwlock_tryrdlock(self.inner.get()); - if r == 0 { - if *self.write_locked.get() { - self.raw_unlock(); - false - } else { - self.num_readers.fetch_add(1, Ordering::Relaxed); - true - } - } else { - false - } - } - #[inline] - pub unsafe fn write(&self) { - let r = libc::pthread_rwlock_wrlock(self.inner.get()); - // See comments above for why we check for EDEADLK and write_locked. We - // also need to check that num_readers is 0. - if r == libc::EDEADLK || *self.write_locked.get() || - self.num_readers.load(Ordering::Relaxed) != 0 { - if r == 0 { - self.raw_unlock(); - } - panic!("rwlock write lock would result in deadlock"); - } else { - debug_assert_eq!(r, 0); - } - *self.write_locked.get() = true; - } - #[inline] - pub unsafe fn try_write(&self) -> bool { - let r = libc::pthread_rwlock_trywrlock(self.inner.get()); - if r == 0 { - if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 { - self.raw_unlock(); - false - } else { - *self.write_locked.get() = true; - true - } - } else { - false - } - } - #[inline] - unsafe fn raw_unlock(&self) { - let r = libc::pthread_rwlock_unlock(self.inner.get()); - debug_assert_eq!(r, 0); - } - #[inline] - pub unsafe fn read_unlock(&self) { - debug_assert!(!*self.write_locked.get()); - self.num_readers.fetch_sub(1, Ordering::Relaxed); - self.raw_unlock(); - } - #[inline] - pub unsafe fn write_unlock(&self) { - debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0); - debug_assert!(*self.write_locked.get()); - *self.write_locked.get() = false; - self.raw_unlock(); - } - #[inline] - pub unsafe fn destroy(&self) { - let r = libc::pthread_rwlock_destroy(self.inner.get()); - // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a - // rwlock that was just initialized with - // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked) - // or pthread_rwlock_init() is called, this behaviour no longer occurs. - if cfg!(target_os = "dragonfly") { - debug_assert!(r == 0 || r == libc::EINVAL); - } else { - debug_assert_eq!(r, 0); - } - } -} diff --git a/src/libstd/sys/wasi/mod.rs b/src/libstd/sys/wasi/mod.rs index a9bb0151d0556..4a9c54c5298ba 100644 --- a/src/libstd/sys/wasi/mod.rs +++ b/src/libstd/sys/wasi/mod.rs @@ -25,15 +25,11 @@ pub mod args; pub mod backtrace; #[path = "../wasm/cmath.rs"] pub mod cmath; -#[path = "../wasm/condvar.rs"] -pub mod condvar; pub mod env; pub mod fd; pub mod fs; #[path = "../wasm/memchr.rs"] pub mod memchr; -#[path = "../wasm/mutex.rs"] -pub mod mutex; pub mod net; pub mod io; pub mod os; @@ -41,8 +37,6 @@ pub use crate::sys_common::os_str_bytes as os_str; pub mod path; pub mod pipe; pub mod process; -#[path = "../wasm/rwlock.rs"] -pub mod rwlock; #[path = "../wasm/stack_overflow.rs"] pub mod stack_overflow; pub mod stdio; diff --git a/src/libstd/sys/wasm/condvar.rs b/src/libstd/sys/wasm/condvar.rs deleted file mode 100644 index 9c7cc3c63b15f..0000000000000 --- a/src/libstd/sys/wasm/condvar.rs +++ /dev/null @@ -1,33 +0,0 @@ -use crate::sys::mutex::Mutex; -use crate::time::Duration; - -pub struct Condvar { } - -impl Condvar { - pub const fn new() -> Condvar { - Condvar { } - } - - #[inline] - pub unsafe fn init(&mut self) {} - - #[inline] - pub unsafe fn notify_one(&self) { - } - - #[inline] - pub unsafe fn notify_all(&self) { - } - - pub unsafe fn wait(&self, _mutex: &Mutex) { - panic!("can't block with web assembly") - } - - pub unsafe fn wait_timeout(&self, _mutex: &Mutex, _dur: Duration) -> bool { - panic!("can't block with web assembly"); - } - - #[inline] - pub unsafe fn destroy(&self) { - } -} diff --git a/src/libstd/sys/wasm/condvar_atomics.rs b/src/libstd/sys/wasm/condvar_atomics.rs deleted file mode 100644 index 580d21218445f..0000000000000 --- a/src/libstd/sys/wasm/condvar_atomics.rs +++ /dev/null @@ -1,94 +0,0 @@ -use crate::arch::wasm32; -use crate::cmp; -use crate::mem; -use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst}; -use crate::sys::mutex::Mutex; -use crate::time::Duration; - -pub struct Condvar { - cnt: AtomicUsize, -} - -// Condition variables are implemented with a simple counter internally that is -// likely to cause spurious wakeups. Blocking on a condition variable will first -// read the value of the internal counter, unlock the given mutex, and then -// block if and only if the counter's value is still the same. Notifying a -// condition variable will modify the counter (add one for now) and then wake up -// a thread waiting on the address of the counter. -// -// A thread waiting on the condition variable will as a result avoid going to -// sleep if it's notified after the lock is unlocked but before it fully goes to -// sleep. A sleeping thread is guaranteed to be woken up at some point as it can -// only be woken up with a call to `wake`. -// -// Note that it's possible for 2 or more threads to be woken up by a call to -// `notify_one` with this implementation. That can happen where the modification -// of `cnt` causes any threads in the middle of `wait` to avoid going to sleep, -// and the subsequent `wake` may wake up a thread that's actually blocking. We -// consider this a spurious wakeup, though, which all users of condition -// variables must already be prepared to handle. As a result, this source of -// spurious wakeups is currently though to be ok, although it may be problematic -// later on if it causes too many spurious wakeups. - -impl Condvar { - pub const fn new() -> Condvar { - Condvar { cnt: AtomicUsize::new(0) } - } - - #[inline] - pub unsafe fn init(&mut self) { - // nothing to do - } - - pub unsafe fn notify_one(&self) { - self.cnt.fetch_add(1, SeqCst); - wasm32::atomic_notify(self.ptr(), 1); - } - - #[inline] - pub unsafe fn notify_all(&self) { - self.cnt.fetch_add(1, SeqCst); - wasm32::atomic_notify(self.ptr(), u32::max_value()); // -1 == "wake everyone" - } - - pub unsafe fn wait(&self, mutex: &Mutex) { - // "atomically block and unlock" implemented by loading our current - // counter's value, unlocking the mutex, and blocking if the counter - // still has the same value. - // - // Notifications happen by incrementing the counter and then waking a - // thread. Incrementing the counter after we unlock the mutex will - // prevent us from sleeping and otherwise the call to `wake` will - // wake us up once we're asleep. - let ticket = self.cnt.load(SeqCst) as i32; - mutex.unlock(); - let val = wasm32::i32_atomic_wait(self.ptr(), ticket, -1); - // 0 == woken, 1 == not equal to `ticket`, 2 == timeout (shouldn't happen) - debug_assert!(val == 0 || val == 1); - mutex.lock(); - } - - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - let ticket = self.cnt.load(SeqCst) as i32; - mutex.unlock(); - let nanos = dur.as_nanos(); - let nanos = cmp::min(i64::max_value() as u128, nanos); - - // If the return value is 2 then a timeout happened, so we return - // `false` as we weren't actually notified. - let ret = wasm32::i32_atomic_wait(self.ptr(), ticket, nanos as i64) != 2; - mutex.lock(); - return ret - } - - #[inline] - pub unsafe fn destroy(&self) { - // nothing to do - } - - #[inline] - fn ptr(&self) -> *mut i32 { - assert_eq!(mem::size_of::(), mem::size_of::()); - &self.cnt as *const AtomicUsize as *mut i32 - } -} diff --git a/src/libstd/sys/wasm/mod.rs b/src/libstd/sys/wasm/mod.rs index 670d07de1d1e0..cad8cde3b5bc9 100644 --- a/src/libstd/sys/wasm/mod.rs +++ b/src/libstd/sys/wasm/mod.rs @@ -44,18 +44,9 @@ pub use crate::sys_common::os_str_bytes as os_str; cfg_if! { if #[cfg(target_feature = "atomics")] { - #[path = "condvar_atomics.rs"] - pub mod condvar; - #[path = "mutex_atomics.rs"] - pub mod mutex; - #[path = "rwlock_atomics.rs"] - pub mod rwlock; #[path = "thread_local_atomics.rs"] pub mod thread_local; } else { - pub mod condvar; - pub mod mutex; - pub mod rwlock; pub mod thread_local; } } diff --git a/src/libstd/sys/wasm/mutex.rs b/src/libstd/sys/wasm/mutex.rs deleted file mode 100644 index 9d713e9b43903..0000000000000 --- a/src/libstd/sys/wasm/mutex.rs +++ /dev/null @@ -1,69 +0,0 @@ -use crate::cell::UnsafeCell; - -pub struct Mutex { - locked: UnsafeCell, -} - -unsafe impl Send for Mutex {} -unsafe impl Sync for Mutex {} // no threads on wasm - -impl Mutex { - pub const fn new() -> Mutex { - Mutex { locked: UnsafeCell::new(false) } - } - - #[inline] - pub unsafe fn init(&mut self) { - } - - #[inline] - pub unsafe fn lock(&self) { - let locked = self.locked.get(); - assert!(!*locked, "cannot recursively acquire mutex"); - *locked = true; - } - - #[inline] - pub unsafe fn unlock(&self) { - *self.locked.get() = false; - } - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - let locked = self.locked.get(); - if *locked { - false - } else { - *locked = true; - true - } - } - - #[inline] - pub unsafe fn destroy(&self) { - } -} - -// All empty stubs because wasm has no threads yet, so lock acquisition always -// succeeds. -pub struct ReentrantMutex { -} - -impl ReentrantMutex { - pub unsafe fn uninitialized() -> ReentrantMutex { - ReentrantMutex { } - } - - pub unsafe fn init(&mut self) {} - - pub unsafe fn lock(&self) {} - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - true - } - - pub unsafe fn unlock(&self) {} - - pub unsafe fn destroy(&self) {} -} diff --git a/src/libstd/sys/wasm/mutex_atomics.rs b/src/libstd/sys/wasm/mutex_atomics.rs deleted file mode 100644 index 0e4f3d80aa938..0000000000000 --- a/src/libstd/sys/wasm/mutex_atomics.rs +++ /dev/null @@ -1,150 +0,0 @@ -use crate::arch::wasm32; -use crate::cell::UnsafeCell; -use crate::mem; -use crate::sync::atomic::{AtomicUsize, AtomicU32, Ordering::SeqCst}; -use crate::sys::thread; - -pub struct Mutex { - locked: AtomicUsize, -} - -// Mutexes have a pretty simple implementation where they contain an `i32` -// internally that is 0 when unlocked and 1 when the mutex is locked. -// Acquisition has a fast path where it attempts to cmpxchg the 0 to a 1, and -// if it fails it then waits for a notification. Releasing a lock is then done -// by swapping in 0 and then notifying any waiters, if present. - -impl Mutex { - pub const fn new() -> Mutex { - Mutex { locked: AtomicUsize::new(0) } - } - - #[inline] - pub unsafe fn init(&mut self) { - // nothing to do - } - - pub unsafe fn lock(&self) { - while !self.try_lock() { - let val = wasm32::i32_atomic_wait( - self.ptr(), - 1, // we expect our mutex is locked - -1, // wait infinitely - ); - // we should have either woke up (0) or got a not-equal due to a - // race (1). We should never time out (2) - debug_assert!(val == 0 || val == 1); - } - } - - pub unsafe fn unlock(&self) { - let prev = self.locked.swap(0, SeqCst); - debug_assert_eq!(prev, 1); - wasm32::atomic_notify(self.ptr(), 1); // wake up one waiter, if any - } - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() - } - - #[inline] - pub unsafe fn destroy(&self) { - // nothing to do - } - - #[inline] - fn ptr(&self) -> *mut i32 { - assert_eq!(mem::size_of::(), mem::size_of::()); - &self.locked as *const AtomicUsize as *mut isize as *mut i32 - } -} - -pub struct ReentrantMutex { - owner: AtomicU32, - recursions: UnsafeCell, -} - -unsafe impl Send for ReentrantMutex {} -unsafe impl Sync for ReentrantMutex {} - -// Reentrant mutexes are similarly implemented to mutexs above except that -// instead of "1" meaning unlocked we use the id of a thread to represent -// whether it has locked a mutex. That way we have an atomic counter which -// always holds the id of the thread that currently holds the lock (or 0 if the -// lock is unlocked). -// -// Once a thread acquires a lock recursively, which it detects by looking at -// the value that's already there, it will update a local `recursions` counter -// in a nonatomic fashion (as we hold the lock). The lock is then fully -// released when this recursion counter reaches 0. - -impl ReentrantMutex { - pub unsafe fn uninitialized() -> ReentrantMutex { - ReentrantMutex { - owner: AtomicU32::new(0), - recursions: UnsafeCell::new(0), - } - } - - pub unsafe fn init(&mut self) { - // nothing to do... - } - - pub unsafe fn lock(&self) { - let me = thread::my_id(); - while let Err(owner) = self._try_lock(me) { - let val = wasm32::i32_atomic_wait(self.ptr(), owner as i32, -1); - debug_assert!(val == 0 || val == 1); - } - } - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - self._try_lock(thread::my_id()).is_ok() - } - - #[inline] - unsafe fn _try_lock(&self, id: u32) -> Result<(), u32> { - let id = id.checked_add(1).unwrap(); // make sure `id` isn't 0 - match self.owner.compare_exchange(0, id, SeqCst, SeqCst) { - // we transitioned from unlocked to locked - Ok(_) => { - debug_assert_eq!(*self.recursions.get(), 0); - Ok(()) - } - - // we currently own this lock, so let's update our count and return - // true. - Err(n) if n == id => { - *self.recursions.get() += 1; - Ok(()) - } - - // Someone else owns the lock, let our caller take care of it - Err(other) => Err(other), - } - } - - pub unsafe fn unlock(&self) { - // If we didn't ever recursively lock the lock then we fully unlock the - // mutex and wake up a waiter, if any. Otherwise we decrement our - // recursive counter and let some one else take care of the zero. - match *self.recursions.get() { - 0 => { - self.owner.swap(0, SeqCst); - wasm32::atomic_notify(self.ptr() as *mut i32, 1); // wake up one waiter, if any - } - ref mut n => *n -= 1, - } - } - - pub unsafe fn destroy(&self) { - // nothing to do... - } - - #[inline] - fn ptr(&self) -> *mut i32 { - &self.owner as *const AtomicU32 as *mut i32 - } -} diff --git a/src/libstd/sys/wasm/rwlock.rs b/src/libstd/sys/wasm/rwlock.rs deleted file mode 100644 index a2b07c7fa1fc0..0000000000000 --- a/src/libstd/sys/wasm/rwlock.rs +++ /dev/null @@ -1,72 +0,0 @@ -use crate::cell::UnsafeCell; - -pub struct RWLock { - mode: UnsafeCell, -} - -unsafe impl Send for RWLock {} -unsafe impl Sync for RWLock {} // no threads on wasm - -impl RWLock { - pub const fn new() -> RWLock { - RWLock { - mode: UnsafeCell::new(0), - } - } - - #[inline] - pub unsafe fn read(&self) { - let mode = self.mode.get(); - if *mode >= 0 { - *mode += 1; - } else { - rtabort!("rwlock locked for writing"); - } - } - - #[inline] - pub unsafe fn try_read(&self) -> bool { - let mode = self.mode.get(); - if *mode >= 0 { - *mode += 1; - true - } else { - false - } - } - - #[inline] - pub unsafe fn write(&self) { - let mode = self.mode.get(); - if *mode == 0 { - *mode = -1; - } else { - rtabort!("rwlock locked for reading") - } - } - - #[inline] - pub unsafe fn try_write(&self) -> bool { - let mode = self.mode.get(); - if *mode == 0 { - *mode = -1; - true - } else { - false - } - } - - #[inline] - pub unsafe fn read_unlock(&self) { - *self.mode.get() -= 1; - } - - #[inline] - pub unsafe fn write_unlock(&self) { - *self.mode.get() += 1; - } - - #[inline] - pub unsafe fn destroy(&self) { - } -} diff --git a/src/libstd/sys/wasm/rwlock_atomics.rs b/src/libstd/sys/wasm/rwlock_atomics.rs deleted file mode 100644 index c705568cec992..0000000000000 --- a/src/libstd/sys/wasm/rwlock_atomics.rs +++ /dev/null @@ -1,151 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::sys::mutex::Mutex; -use crate::sys::condvar::Condvar; - -pub struct RWLock { - lock: Mutex, - cond: Condvar, - state: UnsafeCell, -} - -enum State { - Unlocked, - Reading(usize), - Writing, -} - -unsafe impl Send for RWLock {} -unsafe impl Sync for RWLock {} - -// This rwlock implementation is a relatively simple implementation which has a -// condition variable for readers/writers as well as a mutex protecting the -// internal state of the lock. A current downside of the implementation is that -// unlocking the lock will notify *all* waiters rather than just readers or just -// writers. This can cause lots of "thundering stampede" problems. While -// hopefully correct this implementation is very likely to want to be changed in -// the future. - -impl RWLock { - pub const fn new() -> RWLock { - RWLock { - lock: Mutex::new(), - cond: Condvar::new(), - state: UnsafeCell::new(State::Unlocked), - } - } - - #[inline] - pub unsafe fn read(&self) { - self.lock.lock(); - while !(*self.state.get()).inc_readers() { - self.cond.wait(&self.lock); - } - self.lock.unlock(); - } - - #[inline] - pub unsafe fn try_read(&self) -> bool { - self.lock.lock(); - let ok = (*self.state.get()).inc_readers(); - self.lock.unlock(); - return ok - } - - #[inline] - pub unsafe fn write(&self) { - self.lock.lock(); - while !(*self.state.get()).inc_writers() { - self.cond.wait(&self.lock); - } - self.lock.unlock(); - } - - #[inline] - pub unsafe fn try_write(&self) -> bool { - self.lock.lock(); - let ok = (*self.state.get()).inc_writers(); - self.lock.unlock(); - return ok - } - - #[inline] - pub unsafe fn read_unlock(&self) { - self.lock.lock(); - let notify = (*self.state.get()).dec_readers(); - self.lock.unlock(); - if notify { - // FIXME: should only wake up one of these some of the time - self.cond.notify_all(); - } - } - - #[inline] - pub unsafe fn write_unlock(&self) { - self.lock.lock(); - (*self.state.get()).dec_writers(); - self.lock.unlock(); - // FIXME: should only wake up one of these some of the time - self.cond.notify_all(); - } - - #[inline] - pub unsafe fn destroy(&self) { - self.lock.destroy(); - self.cond.destroy(); - } -} - -impl State { - fn inc_readers(&mut self) -> bool { - match *self { - State::Unlocked => { - *self = State::Reading(1); - true - } - State::Reading(ref mut cnt) => { - *cnt += 1; - true - } - State::Writing => false - } - } - - fn inc_writers(&mut self) -> bool { - match *self { - State::Unlocked => { - *self = State::Writing; - true - } - State::Reading(_) | - State::Writing => false - } - } - - fn dec_readers(&mut self) -> bool { - let zero = match *self { - State::Reading(ref mut cnt) => { - *cnt -= 1; - *cnt == 0 - } - State::Unlocked | - State::Writing => invalid(), - }; - if zero { - *self = State::Unlocked; - } - zero - } - - fn dec_writers(&mut self) { - match *self { - State::Writing => {} - State::Unlocked | - State::Reading(_) => invalid(), - } - *self = State::Unlocked; - } -} - -fn invalid() -> ! { - panic!("inconsistent rwlock"); -} diff --git a/src/libstd/sys/windows/c.rs b/src/libstd/sys/windows/c.rs index 518eccf754cff..a7e3835871db0 100644 --- a/src/libstd/sys/windows/c.rs +++ b/src/libstd/sys/windows/c.rs @@ -7,7 +7,6 @@ use crate::os::raw::{c_int, c_uint, c_ulong, c_long, c_longlong, c_ushort, c_char}; #[cfg(target_arch = "x86_64")] use crate::os::raw::c_ulonglong; -use crate::ptr; use libc::{wchar_t, size_t, c_void}; @@ -31,12 +30,12 @@ pub type USHORT = c_ushort; pub type SIZE_T = usize; pub type WORD = u16; pub type CHAR = c_char; -pub type ULONG_PTR = usize; pub type ULONG = c_ulong; #[cfg(target_arch = "x86_64")] pub type ULONGLONG = u64; #[cfg(target_arch = "x86_64")] pub type DWORDLONG = ULONGLONG; +pub type NTSTATUS = LONG; pub type LPBOOL = *mut BOOL; pub type LPBYTE = *mut BYTE; @@ -62,14 +61,14 @@ pub type LPWSABUF = *mut WSABUF; pub type LPWSAOVERLAPPED = *mut c_void; pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *mut c_void; -pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE; pub type PLARGE_INTEGER = *mut c_longlong; -pub type PSRWLOCK = *mut SRWLOCK; pub type SOCKET = crate::os::windows::raw::SOCKET; pub type socklen_t = c_int; pub type ADDRESS_FAMILY = USHORT; +pub type ACCESS_MASK = DWORD; + pub const TRUE: BOOL = 1; pub const FALSE: BOOL = 0; @@ -108,6 +107,9 @@ pub const SECURITY_SQOS_PRESENT: DWORD = 0x00100000; pub const FIONBIO: c_ulong = 0x8004667e; +pub const STATUS_SUCCESS: NTSTATUS = 0x00000000; +pub const STATUS_TIMEOUT: NTSTATUS = 0x00000102; + #[cfg(target_arch = "arm")] const ARM_MAX_BREAKPOINTS: usize = 8; #[cfg(target_arch = "arm")] @@ -209,11 +211,6 @@ pub const INFINITE: DWORD = !0; pub const DUPLICATE_SAME_ACCESS: DWORD = 0x00000002; -pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { - ptr: ptr::null_mut(), -}; -pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: ptr::null_mut() }; - pub const DETACHED_PROCESS: DWORD = 0x00000008; pub const CREATE_NEW_PROCESS_GROUP: DWORD = 0x00000200; pub const CREATE_UNICODE_ENVIRONMENT: DWORD = 0x00000400; @@ -463,20 +460,6 @@ pub type LPPROGRESS_ROUTINE = crate::option::Option DWORD>; -#[repr(C)] -pub struct CONDITION_VARIABLE { pub ptr: LPVOID } -#[repr(C)] -pub struct SRWLOCK { pub ptr: LPVOID } -#[repr(C)] -pub struct CRITICAL_SECTION { - CriticalSectionDebug: LPVOID, - LockCount: LONG, - RecursionCount: LONG, - OwningThread: HANDLE, - LockSemaphore: HANDLE, - SpinCount: ULONG_PTR -} - #[repr(C)] pub struct REPARSE_MOUNTPOINT_DATA_BUFFER { pub ReparseTag: DWORD, @@ -1022,11 +1005,6 @@ extern "system" { g: GROUP, dwFlags: DWORD) -> SOCKET; pub fn ioctlsocket(s: SOCKET, cmd: c_long, argp: *mut c_ulong) -> c_int; - pub fn InitializeCriticalSection(CriticalSection: *mut CRITICAL_SECTION); - pub fn EnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION); - pub fn TryEnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION) -> BOOLEAN; - pub fn LeaveCriticalSection(CriticalSection: *mut CRITICAL_SECTION); - pub fn DeleteCriticalSection(CriticalSection: *mut CRITICAL_SECTION); pub fn ReadConsoleW(hConsoleInput: HANDLE, lpBuffer: LPVOID, @@ -1131,6 +1109,7 @@ extern "system" { lpFilename: LPWSTR, nSize: DWORD) -> DWORD; + pub fn GetModuleHandleA(lpModuleName: LPCSTR) -> HMODULE; pub fn CreateDirectoryW(lpPathName: LPCWSTR, lpSecurityAttributes: LPSECURITY_ATTRIBUTES) -> BOOL; @@ -1328,38 +1307,6 @@ compat_fn! { _dwBufferSize: DWORD) -> BOOL { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0 } - pub fn SleepConditionVariableSRW(ConditionVariable: PCONDITION_VARIABLE, - SRWLock: PSRWLOCK, - dwMilliseconds: DWORD, - Flags: ULONG) -> BOOL { - panic!("condition variables not available") - } - pub fn WakeConditionVariable(ConditionVariable: PCONDITION_VARIABLE) - -> () { - panic!("condition variables not available") - } - pub fn WakeAllConditionVariable(ConditionVariable: PCONDITION_VARIABLE) - -> () { - panic!("condition variables not available") - } - pub fn AcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> () { - panic!("rwlocks not available") - } - pub fn AcquireSRWLockShared(SRWLock: PSRWLOCK) -> () { - panic!("rwlocks not available") - } - pub fn ReleaseSRWLockExclusive(SRWLock: PSRWLOCK) -> () { - panic!("rwlocks not available") - } - pub fn ReleaseSRWLockShared(SRWLock: PSRWLOCK) -> () { - panic!("rwlocks not available") - } - pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN { - panic!("rwlocks not available") - } - pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN { - panic!("rwlocks not available") - } } #[cfg(all(target_env = "gnu", feature = "backtrace"))] diff --git a/src/libstd/sys/windows/condvar.rs b/src/libstd/sys/windows/condvar.rs deleted file mode 100644 index 62835ea7c94f3..0000000000000 --- a/src/libstd/sys/windows/condvar.rs +++ /dev/null @@ -1,55 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::sys::c; -use crate::sys::mutex::{self, Mutex}; -use crate::sys::os; -use crate::time::Duration; - -pub struct Condvar { inner: UnsafeCell } - -unsafe impl Send for Condvar {} -unsafe impl Sync for Condvar {} - -impl Condvar { - pub const fn new() -> Condvar { - Condvar { inner: UnsafeCell::new(c::CONDITION_VARIABLE_INIT) } - } - - #[inline] - pub unsafe fn init(&mut self) {} - - #[inline] - pub unsafe fn wait(&self, mutex: &Mutex) { - let r = c::SleepConditionVariableSRW(self.inner.get(), - mutex::raw(mutex), - c::INFINITE, - 0); - debug_assert!(r != 0); - } - - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - let r = c::SleepConditionVariableSRW(self.inner.get(), - mutex::raw(mutex), - super::dur2timeout(dur), - 0); - if r == 0 { - debug_assert_eq!(os::errno() as usize, c::ERROR_TIMEOUT as usize); - false - } else { - true - } - } - - #[inline] - pub unsafe fn notify_one(&self) { - c::WakeConditionVariable(self.inner.get()) - } - - #[inline] - pub unsafe fn notify_all(&self) { - c::WakeAllConditionVariable(self.inner.get()) - } - - pub unsafe fn destroy(&self) { - // ... - } -} diff --git a/src/libstd/sys/windows/mod.rs b/src/libstd/sys/windows/mod.rs index 1425254a2e126..9f42244cd2907 100644 --- a/src/libstd/sys/windows/mod.rs +++ b/src/libstd/sys/windows/mod.rs @@ -18,7 +18,6 @@ pub mod args; pub mod backtrace; pub mod c; pub mod cmath; -pub mod condvar; #[cfg(feature = "backtrace")] pub mod dynamic_lib; pub mod env; @@ -28,7 +27,6 @@ pub mod fs; pub mod handle; pub mod io; pub mod memchr; -pub mod mutex; pub mod net; pub mod os; pub mod os_str; @@ -36,7 +34,6 @@ pub mod path; pub mod pipe; pub mod process; pub mod rand; -pub mod rwlock; pub mod stack_overflow; pub mod thread; pub mod thread_local; diff --git a/src/libstd/sys/windows/mutex.rs b/src/libstd/sys/windows/mutex.rs deleted file mode 100644 index 37cbdcefcedcc..0000000000000 --- a/src/libstd/sys/windows/mutex.rs +++ /dev/null @@ -1,180 +0,0 @@ -//! System Mutexes -//! -//! The Windows implementation of mutexes is a little odd and it may not be -//! immediately obvious what's going on. The primary oddness is that SRWLock is -//! used instead of CriticalSection, and this is done because: -//! -//! 1. SRWLock is several times faster than CriticalSection according to -//! benchmarks performed on both Windows 8 and Windows 7. -//! -//! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The -//! Unix implementation deadlocks so consistency is preferred. See #19962 for -//! more details. -//! -//! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy -//! is that there are no guarantees of fairness. -//! -//! The downside of this approach, however, is that SRWLock is not available on -//! Windows XP, so we continue to have a fallback implementation where -//! CriticalSection is used and we keep track of who's holding the mutex to -//! detect recursive locks. - -use crate::cell::UnsafeCell; -use crate::mem::{self, MaybeUninit}; -use crate::sync::atomic::{AtomicUsize, Ordering}; -use crate::sys::c; -use crate::sys::compat; - -pub struct Mutex { - lock: AtomicUsize, - held: UnsafeCell, -} - -unsafe impl Send for Mutex {} -unsafe impl Sync for Mutex {} - -#[derive(Clone, Copy)] -enum Kind { - SRWLock = 1, - CriticalSection = 2, -} - -#[inline] -pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK { - debug_assert!(mem::size_of::() <= mem::size_of_val(&m.lock)); - &m.lock as *const _ as *mut _ -} - -impl Mutex { - pub const fn new() -> Mutex { - Mutex { - // This works because SRWLOCK_INIT is 0 (wrapped in a struct), so we are also properly - // initializing an SRWLOCK here. - lock: AtomicUsize::new(0), - held: UnsafeCell::new(false), - } - } - #[inline] - pub unsafe fn init(&mut self) {} - pub unsafe fn lock(&self) { - match kind() { - Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)), - Kind::CriticalSection => { - let re = self.remutex(); - (*re).lock(); - if !self.flag_locked() { - (*re).unlock(); - panic!("cannot recursively lock a mutex"); - } - } - } - } - pub unsafe fn try_lock(&self) -> bool { - match kind() { - Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0, - Kind::CriticalSection => { - let re = self.remutex(); - if !(*re).try_lock() { - false - } else if self.flag_locked() { - true - } else { - (*re).unlock(); - false - } - } - } - } - pub unsafe fn unlock(&self) { - *self.held.get() = false; - match kind() { - Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)), - Kind::CriticalSection => (*self.remutex()).unlock(), - } - } - pub unsafe fn destroy(&self) { - match kind() { - Kind::SRWLock => {} - Kind::CriticalSection => { - match self.lock.load(Ordering::SeqCst) { - 0 => {} - n => { Box::from_raw(n as *mut ReentrantMutex).destroy(); } - } - } - } - } - - unsafe fn remutex(&self) -> *mut ReentrantMutex { - match self.lock.load(Ordering::SeqCst) { - 0 => {} - n => return n as *mut _, - } - let mut re = box ReentrantMutex::uninitialized(); - re.init(); - let re = Box::into_raw(re); - match self.lock.compare_and_swap(0, re as usize, Ordering::SeqCst) { - 0 => re, - n => { Box::from_raw(re).destroy(); n as *mut _ } - } - } - - unsafe fn flag_locked(&self) -> bool { - if *self.held.get() { - false - } else { - *self.held.get() = true; - true - } - - } -} - -fn kind() -> Kind { - static KIND: AtomicUsize = AtomicUsize::new(0); - - let val = KIND.load(Ordering::SeqCst); - if val == Kind::SRWLock as usize { - return Kind::SRWLock - } else if val == Kind::CriticalSection as usize { - return Kind::CriticalSection - } - - let ret = match compat::lookup("kernel32", "AcquireSRWLockExclusive") { - None => Kind::CriticalSection, - Some(..) => Kind::SRWLock, - }; - KIND.store(ret as usize, Ordering::SeqCst); - return ret; -} - -pub struct ReentrantMutex { inner: UnsafeCell> } - -unsafe impl Send for ReentrantMutex {} -unsafe impl Sync for ReentrantMutex {} - -impl ReentrantMutex { - pub fn uninitialized() -> ReentrantMutex { - ReentrantMutex { inner: UnsafeCell::new(MaybeUninit::uninit()) } - } - - pub unsafe fn init(&mut self) { - c::InitializeCriticalSection((&mut *self.inner.get()).as_mut_ptr()); - } - - pub unsafe fn lock(&self) { - c::EnterCriticalSection((&mut *self.inner.get()).as_mut_ptr()); - } - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - c::TryEnterCriticalSection((&mut *self.inner.get()).as_mut_ptr()) != 0 - } - - pub unsafe fn unlock(&self) { - c::LeaveCriticalSection((&mut *self.inner.get()).as_mut_ptr()); - } - - pub unsafe fn destroy(&self) { - c::DeleteCriticalSection((&mut *self.inner.get()).as_mut_ptr()); - } -} diff --git a/src/libstd/sys/windows/process.rs b/src/libstd/sys/windows/process.rs index e39b7ae889025..fa9c75b54ca69 100644 --- a/src/libstd/sys/windows/process.rs +++ b/src/libstd/sys/windows/process.rs @@ -11,7 +11,7 @@ use crate::mem; use crate::os::windows::ffi::OsStrExt; use crate::path::Path; use crate::ptr; -use crate::sys::mutex::Mutex; +use crate::sync::RawMutex; use crate::sys::c; use crate::sys::fs::{OpenOptions, File}; use crate::sys::handle::Handle; @@ -88,10 +88,6 @@ pub struct StdioPipes { pub stderr: Option, } -struct DropGuard<'a> { - lock: &'a Mutex, -} - impl Command { pub fn new(program: &OsStr) -> Command { Command { @@ -177,8 +173,8 @@ impl Command { // // For more information, msdn also has an article about this race: // http://support.microsoft.com/kb/315939 - static CREATE_PROCESS_LOCK: Mutex = Mutex::new(); - let _guard = DropGuard::new(&CREATE_PROCESS_LOCK); + static CREATE_PROCESS_LOCK: RawMutex = RawMutex::new(); + let _guard = CREATE_PROCESS_LOCK.lock(); let mut pipes = StdioPipes { stdin: None, @@ -228,23 +224,6 @@ impl fmt::Debug for Command { } } -impl<'a> DropGuard<'a> { - fn new(lock: &'a Mutex) -> DropGuard<'a> { - unsafe { - lock.lock(); - DropGuard { lock } - } - } -} - -impl<'a> Drop for DropGuard<'a> { - fn drop(&mut self) { - unsafe { - self.lock.unlock(); - } - } -} - impl Stdio { fn to_handle(&self, stdio_id: c::DWORD, pipe: &mut Option) -> io::Result { diff --git a/src/libstd/sys/windows/rwlock.rs b/src/libstd/sys/windows/rwlock.rs deleted file mode 100644 index ef57562fc3a0b..0000000000000 --- a/src/libstd/sys/windows/rwlock.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::cell::UnsafeCell; -use crate::sys::c; - -pub struct RWLock { inner: UnsafeCell } - -unsafe impl Send for RWLock {} -unsafe impl Sync for RWLock {} - -impl RWLock { - pub const fn new() -> RWLock { - RWLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) } - } - #[inline] - pub unsafe fn read(&self) { - c::AcquireSRWLockShared(self.inner.get()) - } - #[inline] - pub unsafe fn try_read(&self) -> bool { - c::TryAcquireSRWLockShared(self.inner.get()) != 0 - } - #[inline] - pub unsafe fn write(&self) { - c::AcquireSRWLockExclusive(self.inner.get()) - } - #[inline] - pub unsafe fn try_write(&self) -> bool { - c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 - } - #[inline] - pub unsafe fn read_unlock(&self) { - c::ReleaseSRWLockShared(self.inner.get()) - } - #[inline] - pub unsafe fn write_unlock(&self) { - c::ReleaseSRWLockExclusive(self.inner.get()) - } - - #[inline] - pub unsafe fn destroy(&self) { - // ... - } -} diff --git a/src/libstd/sys_common/at_exit_imp.rs b/src/libstd/sys_common/at_exit_imp.rs index cdb72ee872e04..9085531ccd265 100644 --- a/src/libstd/sys_common/at_exit_imp.rs +++ b/src/libstd/sys_common/at_exit_imp.rs @@ -4,7 +4,7 @@ use crate::ptr; use crate::mem; -use crate::sys_common::mutex::Mutex; +use crate::sync::RawMutex; type Queue = Vec>; @@ -12,9 +12,7 @@ type Queue = Vec>; // on poisoning and this module needs to operate at a lower level than requiring // the thread infrastructure to be in place (useful on the borders of // initialization/destruction). -// We never call `LOCK.init()`, so it is UB to attempt to -// acquire this mutex reentrantly! -static LOCK: Mutex = Mutex::new(); +static LOCK: RawMutex = RawMutex::new(); static mut QUEUE: *mut Queue = ptr::null_mut(); const DONE: *mut Queue = 1_usize as *mut _; @@ -61,8 +59,8 @@ pub fn cleanup() { } pub fn push(f: Box) -> bool { + let _guard = LOCK.lock(); unsafe { - let _guard = LOCK.lock(); if init() { // We are just moving `f` around, not calling it. // There is no possibility of reentrancy here. diff --git a/src/libstd/sys_common/backtrace.rs b/src/libstd/sys_common/backtrace.rs index 8d8d8169b4383..9626057c3892c 100644 --- a/src/libstd/sys_common/backtrace.rs +++ b/src/libstd/sys_common/backtrace.rs @@ -7,8 +7,8 @@ use crate::io; use crate::path::{self, Path}; use crate::ptr; use crate::str; +use crate::sync::RawMutex; use crate::sync::atomic::{self, Ordering}; -use crate::sys::mutex::Mutex; use rustc_demangle::demangle; @@ -42,7 +42,7 @@ const MAX_NB_FRAMES: usize = 100; /// Prints the current backtrace. pub fn print(w: &mut dyn Write, format: PrintFormat) -> io::Result<()> { - static LOCK: Mutex = Mutex::new(); + static LOCK: RawMutex = RawMutex::new(); // There are issues currently linking libbacktrace into tests, and in // general during libstd's own unit tests we're not testing this path. In @@ -54,12 +54,8 @@ pub fn print(w: &mut dyn Write, format: PrintFormat) -> io::Result<()> { // Use a lock to prevent mixed output in multithreading context. // Some platforms also requires it, like `SymFromAddr` on Windows. - unsafe { - LOCK.lock(); - let res = _print(w, format); - LOCK.unlock(); - res - } + let _guard = LOCK.lock(); + _print(w, format) } fn _print(w: &mut dyn Write, format: PrintFormat) -> io::Result<()> { diff --git a/src/libstd/sys_common/condvar.rs b/src/libstd/sys_common/condvar.rs deleted file mode 100644 index fc59c8356f46a..0000000000000 --- a/src/libstd/sys_common/condvar.rs +++ /dev/null @@ -1,60 +0,0 @@ -use crate::time::Duration; -use crate::sys_common::mutex::{self, Mutex}; -use crate::sys::condvar as imp; - -/// An OS-based condition variable. -/// -/// This structure is the lowest layer possible on top of the OS-provided -/// condition variables. It is consequently entirely unsafe to use. It is -/// recommended to use the safer types at the top level of this crate instead of -/// this type. -pub struct Condvar(imp::Condvar); - -impl Condvar { - /// Creates a new condition variable for use. - /// - /// Behavior is undefined if the condition variable is moved after it is - /// first used with any of the functions below. - pub const fn new() -> Condvar { Condvar(imp::Condvar::new()) } - - /// Prepares the condition variable for use. - /// - /// This should be called once the condition variable is at a stable memory - /// address. - #[inline] - pub unsafe fn init(&mut self) { self.0.init() } - - /// Signals one waiter on this condition variable to wake up. - #[inline] - pub unsafe fn notify_one(&self) { self.0.notify_one() } - - /// Awakens all current waiters on this condition variable. - #[inline] - pub unsafe fn notify_all(&self) { self.0.notify_all() } - - /// Waits for a signal on the specified mutex. - /// - /// Behavior is undefined if the mutex is not locked by the current thread. - /// Behavior is also undefined if more than one mutex is used concurrently - /// on this condition variable. - #[inline] - pub unsafe fn wait(&self, mutex: &Mutex) { self.0.wait(mutex::raw(mutex)) } - - /// Waits for a signal on the specified mutex with a timeout duration - /// specified by `dur` (a relative time into the future). - /// - /// Behavior is undefined if the mutex is not locked by the current thread. - /// Behavior is also undefined if more than one mutex is used concurrently - /// on this condition variable. - #[inline] - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - self.0.wait_timeout(mutex::raw(mutex), dur) - } - - /// Deallocates all resources associated with this condition variable. - /// - /// Behavior is undefined if there are current or will be future users of - /// this condition variable. - #[inline] - pub unsafe fn destroy(&self) { self.0.destroy() } -} diff --git a/src/libstd/sys_common/mod.rs b/src/libstd/sys_common/mod.rs index 78e159942643f..d8b76e04f4dbb 100644 --- a/src/libstd/sys_common/mod.rs +++ b/src/libstd/sys_common/mod.rs @@ -43,9 +43,7 @@ pub mod alloc; pub mod at_exit_imp; #[cfg(feature = "backtrace")] pub mod backtrace; -pub mod condvar; pub mod io; -pub mod mutex; #[cfg(any(rustdoc, // see `mod os`, docs are generated for multiple platforms unix, target_os = "redox", @@ -54,8 +52,6 @@ pub mod mutex; all(target_vendor = "fortanix", target_env = "sgx")))] pub mod os_str_bytes; pub mod poison; -pub mod remutex; -pub mod rwlock; pub mod thread; pub mod thread_info; pub mod thread_local; diff --git a/src/libstd/sys_common/mutex.rs b/src/libstd/sys_common/mutex.rs deleted file mode 100644 index 28d85949ffa3c..0000000000000 --- a/src/libstd/sys_common/mutex.rs +++ /dev/null @@ -1,84 +0,0 @@ -use crate::sys::mutex as imp; - -/// An OS-based mutual exclusion lock. -/// -/// This is the thinnest cross-platform wrapper around OS mutexes. All usage of -/// this mutex is unsafe and it is recommended to instead use the safe wrapper -/// at the top level of the crate instead of this type. -pub struct Mutex(imp::Mutex); - -unsafe impl Sync for Mutex {} - -impl Mutex { - /// Creates a new mutex for use. - /// - /// Behavior is undefined if the mutex is moved after it is - /// first used with any of the functions below. - /// Also, until `init` is called, behavior is undefined if this - /// mutex is ever used reentrantly, i.e., `raw_lock` or `try_lock` - /// are called by the thread currently holding the lock. - pub const fn new() -> Mutex { Mutex(imp::Mutex::new()) } - - /// Prepare the mutex for use. - /// - /// This should be called once the mutex is at a stable memory address. - /// If called, this must be the very first thing that happens to the mutex. - /// Calling it in parallel with or after any operation (including another - /// `init()`) is undefined behavior. - #[inline] - pub unsafe fn init(&mut self) { self.0.init() } - - /// Locks the mutex blocking the current thread until it is available. - /// - /// Behavior is undefined if the mutex has been moved between this and any - /// previous function call. - #[inline] - pub unsafe fn raw_lock(&self) { self.0.lock() } - - /// Calls raw_lock() and then returns an RAII guard to guarantee the mutex - /// will be unlocked. - #[inline] - pub unsafe fn lock(&self) -> MutexGuard<'_> { - self.raw_lock(); - MutexGuard(&self.0) - } - - /// Attempts to lock the mutex without blocking, returning whether it was - /// successfully acquired or not. - /// - /// Behavior is undefined if the mutex has been moved between this and any - /// previous function call. - #[inline] - pub unsafe fn try_lock(&self) -> bool { self.0.try_lock() } - - /// Unlocks the mutex. - /// - /// Behavior is undefined if the current thread does not actually hold the - /// mutex. - /// - /// Consider switching from the pair of raw_lock() and raw_unlock() to - /// lock() whenever possible. - #[inline] - pub unsafe fn raw_unlock(&self) { self.0.unlock() } - - /// Deallocates all resources associated with this mutex. - /// - /// Behavior is undefined if there are current or will be future users of - /// this mutex. - #[inline] - pub unsafe fn destroy(&self) { self.0.destroy() } -} - -// not meant to be exported to the outside world, just the containing module -pub fn raw(mutex: &Mutex) -> &imp::Mutex { &mutex.0 } - -#[must_use] -/// A simple RAII utility for the above Mutex without the poisoning semantics. -pub struct MutexGuard<'a>(&'a imp::Mutex); - -impl Drop for MutexGuard<'_> { - #[inline] - fn drop(&mut self) { - unsafe { self.0.unlock(); } - } -} diff --git a/src/libstd/sys_common/remutex.rs b/src/libstd/sys_common/remutex.rs deleted file mode 100644 index f08b13c4aa274..0000000000000 --- a/src/libstd/sys_common/remutex.rs +++ /dev/null @@ -1,239 +0,0 @@ -use crate::fmt; -use crate::marker; -use crate::ops::Deref; -use crate::sys_common::poison::{self, TryLockError, TryLockResult, LockResult}; -use crate::sys::mutex as sys; -use crate::panic::{UnwindSafe, RefUnwindSafe}; - -/// A re-entrant mutual exclusion -/// -/// This mutex will block *other* threads waiting for the lock to become -/// available. The thread which has already locked the mutex can lock it -/// multiple times without blocking, preventing a common source of deadlocks. -pub struct ReentrantMutex { - inner: Box, - poison: poison::Flag, - data: T, -} - -unsafe impl Send for ReentrantMutex {} -unsafe impl Sync for ReentrantMutex {} - -impl UnwindSafe for ReentrantMutex {} -impl RefUnwindSafe for ReentrantMutex {} - - -/// An RAII implementation of a "scoped lock" of a mutex. When this structure is -/// dropped (falls out of scope), the lock will be unlocked. -/// -/// The data protected by the mutex can be accessed through this guard via its -/// Deref implementation. -/// -/// # Mutability -/// -/// Unlike `MutexGuard`, `ReentrantMutexGuard` does not implement `DerefMut`, -/// because implementation of the trait would violate Rust’s reference aliasing -/// rules. Use interior mutability (usually `RefCell`) in order to mutate the -/// guarded data. -#[must_use = "if unused the ReentrantMutex will immediately unlock"] -pub struct ReentrantMutexGuard<'a, T: 'a> { - // funny underscores due to how Deref currently works (it disregards field - // privacy). - __lock: &'a ReentrantMutex, - __poison: poison::Guard, -} - -impl !marker::Send for ReentrantMutexGuard<'_, T> {} - - -impl ReentrantMutex { - /// Creates a new reentrant mutex in an unlocked state. - pub fn new(t: T) -> ReentrantMutex { - unsafe { - let mut mutex = ReentrantMutex { - inner: box sys::ReentrantMutex::uninitialized(), - poison: poison::Flag::new(), - data: t, - }; - mutex.inner.init(); - mutex - } - } - - /// Acquires a mutex, blocking the current thread until it is able to do so. - /// - /// This function will block the caller until it is available to acquire the mutex. - /// Upon returning, the thread is the only thread with the mutex held. When the thread - /// calling this method already holds the lock, the call shall succeed without - /// blocking. - /// - /// # Errors - /// - /// If another user of this mutex panicked while holding the mutex, then - /// this call will return failure if the mutex would otherwise be - /// acquired. - pub fn lock(&self) -> LockResult> { - unsafe { self.inner.lock() } - ReentrantMutexGuard::new(&self) - } - - /// Attempts to acquire this lock. - /// - /// If the lock could not be acquired at this time, then `Err` is returned. - /// Otherwise, an RAII guard is returned. - /// - /// This function does not block. - /// - /// # Errors - /// - /// If another user of this mutex panicked while holding the mutex, then - /// this call will return failure if the mutex would otherwise be - /// acquired. - pub fn try_lock(&self) -> TryLockResult> { - if unsafe { self.inner.try_lock() } { - Ok(ReentrantMutexGuard::new(&self)?) - } else { - Err(TryLockError::WouldBlock) - } - } -} - -impl Drop for ReentrantMutex { - fn drop(&mut self) { - // This is actually safe b/c we know that there is no further usage of - // this mutex (it's up to the user to arrange for a mutex to get - // dropped, that's not our job) - unsafe { self.inner.destroy() } - } -} - -impl fmt::Debug for ReentrantMutex { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.try_lock() { - Ok(guard) => f.debug_struct("ReentrantMutex").field("data", &*guard).finish(), - Err(TryLockError::Poisoned(err)) => { - f.debug_struct("ReentrantMutex").field("data", &**err.get_ref()).finish() - }, - Err(TryLockError::WouldBlock) => { - struct LockedPlaceholder; - impl fmt::Debug for LockedPlaceholder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("") - } - } - - f.debug_struct("ReentrantMutex").field("data", &LockedPlaceholder).finish() - } - } - } -} - -impl<'mutex, T> ReentrantMutexGuard<'mutex, T> { - fn new(lock: &'mutex ReentrantMutex) - -> LockResult> { - poison::map_result(lock.poison.borrow(), |guard| { - ReentrantMutexGuard { - __lock: lock, - __poison: guard, - } - }) - } -} - -impl Deref for ReentrantMutexGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - &self.__lock.data - } -} - -impl Drop for ReentrantMutexGuard<'_, T> { - #[inline] - fn drop(&mut self) { - unsafe { - self.__lock.poison.done(&self.__poison); - self.__lock.inner.unlock(); - } - } -} - - -#[cfg(all(test, not(target_os = "emscripten")))] -mod tests { - use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard}; - use crate::cell::RefCell; - use crate::sync::Arc; - use crate::thread; - - #[test] - fn smoke() { - let m = ReentrantMutex::new(()); - { - let a = m.lock().unwrap(); - { - let b = m.lock().unwrap(); - { - let c = m.lock().unwrap(); - assert_eq!(*c, ()); - } - assert_eq!(*b, ()); - } - assert_eq!(*a, ()); - } - } - - #[test] - fn is_mutex() { - let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); - let m2 = m.clone(); - let lock = m.lock().unwrap(); - let child = thread::spawn(move || { - let lock = m2.lock().unwrap(); - assert_eq!(*lock.borrow(), 4950); - }); - for i in 0..100 { - let lock = m.lock().unwrap(); - *lock.borrow_mut() += i; - } - drop(lock); - child.join().unwrap(); - } - - #[test] - fn trylock_works() { - let m = Arc::new(ReentrantMutex::new(())); - let m2 = m.clone(); - let _lock = m.try_lock().unwrap(); - let _lock2 = m.try_lock().unwrap(); - thread::spawn(move || { - let lock = m2.try_lock(); - assert!(lock.is_err()); - }).join().unwrap(); - let _lock3 = m.try_lock().unwrap(); - } - - pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell>); - impl Drop for Answer<'_> { - fn drop(&mut self) { - *self.0.borrow_mut() = 42; - } - } - - #[test] - fn poison_works() { - let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); - let mc = m.clone(); - let result = thread::spawn(move ||{ - let lock = mc.lock().unwrap(); - *lock.borrow_mut() = 1; - let lock2 = mc.lock().unwrap(); - *lock.borrow_mut() = 2; - let _answer = Answer(lock2); - panic!("What the answer to my lifetimes dilemma is?"); - }).join(); - assert!(result.is_err()); - let r = m.lock().err().unwrap().into_inner(); - assert_eq!(*r.borrow(), 42); - } -} diff --git a/src/libstd/sys_common/rwlock.rs b/src/libstd/sys_common/rwlock.rs deleted file mode 100644 index 0b1a092de5422..0000000000000 --- a/src/libstd/sys_common/rwlock.rs +++ /dev/null @@ -1,72 +0,0 @@ -use crate::sys::rwlock as imp; - -/// An OS-based reader-writer lock. -/// -/// This structure is entirely unsafe and serves as the lowest layer of a -/// cross-platform binding of system rwlocks. It is recommended to use the -/// safer types at the top level of this crate instead of this type. -pub struct RWLock(imp::RWLock); - -impl RWLock { - /// Creates a new reader-writer lock for use. - /// - /// Behavior is undefined if the reader-writer lock is moved after it is - /// first used with any of the functions below. - pub const fn new() -> RWLock { RWLock(imp::RWLock::new()) } - - /// Acquires shared access to the underlying lock, blocking the current - /// thread to do so. - /// - /// Behavior is undefined if the rwlock has been moved between this and any - /// previous method call. - #[inline] - pub unsafe fn read(&self) { self.0.read() } - - /// Attempts to acquire shared access to this lock, returning whether it - /// succeeded or not. - /// - /// This function does not block the current thread. - /// - /// Behavior is undefined if the rwlock has been moved between this and any - /// previous method call. - #[inline] - pub unsafe fn try_read(&self) -> bool { self.0.try_read() } - - /// Acquires write access to the underlying lock, blocking the current thread - /// to do so. - /// - /// Behavior is undefined if the rwlock has been moved between this and any - /// previous method call. - #[inline] - pub unsafe fn write(&self) { self.0.write() } - - /// Attempts to acquire exclusive access to this lock, returning whether it - /// succeeded or not. - /// - /// This function does not block the current thread. - /// - /// Behavior is undefined if the rwlock has been moved between this and any - /// previous method call. - #[inline] - pub unsafe fn try_write(&self) -> bool { self.0.try_write() } - - /// Unlocks previously acquired shared access to this lock. - /// - /// Behavior is undefined if the current thread does not have shared access. - #[inline] - pub unsafe fn read_unlock(&self) { self.0.read_unlock() } - - /// Unlocks previously acquired exclusive access to this lock. - /// - /// Behavior is undefined if the current thread does not currently have - /// exclusive access. - #[inline] - pub unsafe fn write_unlock(&self) { self.0.write_unlock() } - - /// Destroys OS-related resources with this RWLock. - /// - /// Behavior is undefined if there are any currently active users of this - /// lock. - #[inline] - pub unsafe fn destroy(&self) { self.0.destroy() } -} diff --git a/src/libstd/sys_common/thread_local.rs b/src/libstd/sys_common/thread_local.rs index bdf79002e906d..fc80e97971515 100644 --- a/src/libstd/sys_common/thread_local.rs +++ b/src/libstd/sys_common/thread_local.rs @@ -49,9 +49,8 @@ #![allow(dead_code)] // sys isn't exported yet use crate::ptr; -use crate::sync::atomic::{self, AtomicUsize, Ordering}; +use crate::sync::{RawMutex, atomic::{self, AtomicUsize, Ordering}}; use crate::sys::thread_local as imp; -use crate::sys_common::mutex::Mutex; /// A type for TLS keys that are statically allocated. /// @@ -151,9 +150,7 @@ impl StaticKey { // Additionally a 0-index of a tls key hasn't been seen on windows, so // we just simplify the whole branch. if imp::requires_synchronized_create() { - // We never call `INIT_LOCK.init()`, so it is UB to attempt to - // acquire this mutex reentrantly! - static INIT_LOCK: Mutex = Mutex::new(); + static INIT_LOCK: RawMutex = RawMutex::new(); let _guard = INIT_LOCK.lock(); let mut key = self.key.load(Ordering::SeqCst); if key == 0 { diff --git a/src/libstd/thread/mod.rs b/src/libstd/thread/mod.rs index fce28ffd9c388..c4c6148e2dc9c 100644 --- a/src/libstd/thread/mod.rs +++ b/src/libstd/thread/mod.rs @@ -166,15 +166,14 @@ use crate::num::NonZeroU64; use crate::panic; use crate::panicking; use crate::str; -use crate::sync::{Mutex, Condvar, Arc}; -use crate::sync::atomic::AtomicUsize; +use crate::sync::{Arc, RawMutex}; +use crate::sync::atomic::AtomicU8; use crate::sync::atomic::Ordering::SeqCst; use crate::sys::thread as imp; -use crate::sys_common::mutex; use crate::sys_common::thread_info; use crate::sys_common::thread; use crate::sys_common::{AsInner, IntoInner}; -use crate::time::Duration; +use crate::time::{Duration, Instant}; //////////////////////////////////////////////////////////////////////////////// // Thread-local storage @@ -781,9 +780,9 @@ pub fn sleep(dur: Duration) { } // constants for park/unpark -const EMPTY: usize = 0; -const PARKED: usize = 1; -const NOTIFIED: usize = 2; +const EMPTY: u8 = 0; +const PARKED: u8 = 1; +const NOTIFIED: u8 = 2; /// Blocks unless or until the current thread's token is made available. /// @@ -874,45 +873,14 @@ const NOTIFIED: usize = 2; /// [`unpark`]: ../../std/thread/struct.Thread.html#method.unpark /// [`thread::park_timeout`]: ../../std/thread/fn.park_timeout.html // -// The implementation currently uses the trivial strategy of a Mutex+Condvar -// with wakeup flag, which does not actually allow spurious wakeups. In the +// The implementation currently just delegates to the park/unpark functionality of +// parking_lot_core. which does not actually allow spurious wakeups. In the // future, this will be implemented in a more efficient way, perhaps along the lines of // http://cr.openjdk.java.net/~stefank/6989984.1/raw_files/new/src/os/linux/vm/os_linux.cpp // or futuxes, and in either case may allow spurious wakeups. #[stable(feature = "rust1", since = "1.0.0")] pub fn park() { - let thread = current(); - - // If we were previously notified then we consume this notification and - // return quickly. - if thread.inner.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { - return - } - - // Otherwise we need to coordinate going to sleep - let mut m = thread.inner.lock.lock().unwrap(); - match thread.inner.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read here, even though we know it will be `NOTIFIED`. - // This is because `unpark` may have been called again since we read - // `NOTIFIED` in the `compare_exchange` above. We must perform an - // acquire operation that synchronizes with that `unpark` to observe - // any writes it made before the call to unpark. To do that we must - // read from the write it made to `state`. - let old = thread.inner.state.swap(EMPTY, SeqCst); - assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - return; - } // should consume this notification, so prohibit spurious wakeups in next park. - Err(_) => panic!("inconsistent park state"), - } - loop { - m = thread.inner.cvar.wait(m).unwrap(); - match thread.inner.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) { - Ok(_) => return, // got a notification - Err(_) => {} // spurious wakeup, go back to sleep - } - } + park_internal(None); } /// Use [`park_timeout`]. @@ -978,35 +946,37 @@ pub fn park_timeout_ms(ms: u32) { /// [park]: fn.park.html #[stable(feature = "park_timeout", since = "1.4.0")] pub fn park_timeout(dur: Duration) { + park_internal(Instant::now().checked_add(dur)); +} + +#[inline] +fn park_internal(timeout: Option) { let thread = current(); - // Like `park` above we have a fast path for an already-notified thread, and - // afterwards we start coordinating for a sleep. + // If we were previously notified then we consume this notification and // return quickly. if thread.inner.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { return } - let m = thread.inner.lock.lock().unwrap(); - match thread.inner.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read again here, see `park`. - let old = thread.inner.state.swap(EMPTY, SeqCst); - assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - return; - } // should consume this notification, so prohibit spurious wakeups in next park. - Err(_) => panic!("inconsistent park_timeout state"), - } - // Wait with a timeout, and if we spuriously wake up or otherwise wake up - // from a notification we just want to unconditionally set the state back to - // empty, either consuming a notification or un-flagging ourselves as - // parked. - let (_m, _result) = thread.inner.cvar.wait_timeout(m, dur).unwrap(); + let validate = || thread.inner.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst).is_ok(); + let before_sleep = || {}; + let timed_out = |_, _| {}; + + unsafe { + crate::parking_lot_core::park( + (&*thread.inner) as *const _ as usize, + validate, + before_sleep, + timed_out, + crate::parking_lot_core::DEFAULT_PARK_TOKEN, + timeout, + ); + } match thread.inner.state.swap(EMPTY, SeqCst) { NOTIFIED => {} // got a notification, hurray! - PARKED => {} // no notification, alas - n => panic!("inconsistent park_timeout state: {}", n), + PARKED if timeout.is_some() => {} // no notification, OK if timed out + _ => unreachable!(), } } @@ -1043,14 +1013,11 @@ pub struct ThreadId(NonZeroU64); impl ThreadId { // Generate a new unique thread ID. fn new() -> ThreadId { - // We never call `GUARD.init()`, so it is UB to attempt to - // acquire this mutex reentrantly! - static GUARD: mutex::Mutex = mutex::Mutex::new(); + static GUARD: RawMutex = RawMutex::new(); static mut COUNTER: u64 = 1; + let _guard = GUARD.lock(); unsafe { - let _guard = GUARD.lock(); - // If we somehow use up all our bits, panic so that we're not // covering up subtle bugs of IDs being reused. if COUNTER == crate::u64::MAX { @@ -1075,9 +1042,7 @@ struct Inner { id: ThreadId, // state for thread park/unpark - state: AtomicUsize, - lock: Mutex<()>, - cvar: Condvar, + state: AtomicU8, } #[derive(Clone)] @@ -1120,9 +1085,7 @@ impl Thread { inner: Arc::new(Inner { name: cname, id: ThreadId::new(), - state: AtomicUsize::new(EMPTY), - lock: Mutex::new(()), - cvar: Condvar::new(), + state: AtomicU8::new(EMPTY), }) } } @@ -1171,22 +1134,15 @@ impl Thread { EMPTY => return, // no one was waiting NOTIFIED => return, // already unparked PARKED => {} // gotta go wake someone up - _ => panic!("inconsistent state in unpark"), + _ => unreachable!(), } - // There is a period between when the parked thread sets `state` to - // `PARKED` (or last checked `state` in the case of a spurious wake - // up) and when it actually waits on `cvar`. If we were to notify - // during this period it would be ignored and then when the parked - // thread went to sleep it would never wake up. Fortunately, it has - // `lock` locked at this stage so we can acquire `lock` to wait until - // it is ready to receive the notification. - // - // Releasing `lock` before the call to `notify_one` means that when the - // parked thread wakes it doesn't get woken only to have to wait for us - // to release `lock`. - drop(self.inner.lock.lock().unwrap()); - self.inner.cvar.notify_one() + unsafe { + crate::parking_lot_core::unpark_all( + (&*self.inner) as *const _ as usize, + crate::parking_lot_core::DEFAULT_UNPARK_TOKEN, + ); + } } /// Gets the thread's unique identifier. diff --git a/src/libstd/time.rs b/src/libstd/time.rs index dc97f8c04a839..2164edb24427b 100644 --- a/src/libstd/time.rs +++ b/src/libstd/time.rs @@ -16,9 +16,9 @@ use crate::cmp; use crate::error::Error; use crate::fmt; use crate::ops::{Add, Sub, AddAssign, SubAssign}; +use crate::sync::RawMutex; use crate::sys::time; use crate::sys_common::FromInner; -use crate::sys_common::mutex::Mutex; #[stable(feature = "time", since = "1.3.0")] pub use core::time::Duration; @@ -183,10 +183,10 @@ impl Instant { return Instant(os_now) } - static LOCK: Mutex = Mutex::new(); + static LOCK: RawMutex = RawMutex::new(); static mut LAST_NOW: time::Instant = time::Instant::zero(); + let _lock = LOCK.lock(); unsafe { - let _lock = LOCK.lock(); let now = cmp::max(LAST_NOW, os_now); LAST_NOW = now; Instant(now) diff --git a/src/parking_lot b/src/parking_lot new file mode 160000 index 0000000000000..b0280b3b025a6 --- /dev/null +++ b/src/parking_lot @@ -0,0 +1 @@ +Subproject commit b0280b3b025a6841922aa4339dccfd30b9076c02 diff --git a/src/test/run-pass/command-pre-exec.rs b/src/test/run-pass/command-pre-exec.rs index 21783fedd39c9..396a534e88d84 100644 --- a/src/test/run-pass/command-pre-exec.rs +++ b/src/test/run-pass/command-pre-exec.rs @@ -17,8 +17,7 @@ fn main() { if let Some(arg) = env::args().nth(1) { match &arg[..] { "test1" => println!("hello2"), - "test2" => assert_eq!(env::var("FOO").unwrap(), "BAR"), - "test3" => assert_eq!(env::current_dir().unwrap().to_str().unwrap(), "/"), + "test2" => assert_eq!(env::current_dir().unwrap().to_str().unwrap(), "/"), "empty" => {} _ => panic!("unknown argument: {}", arg), } @@ -44,20 +43,6 @@ fn main() { let output = unsafe { Command::new(&me) .arg("test2") - .pre_exec(|| { - env::set_var("FOO", "BAR"); - Ok(()) - }) - .output() - .unwrap() - }; - assert!(output.status.success()); - assert!(output.stderr.is_empty()); - assert!(output.stdout.is_empty()); - - let output = unsafe { - Command::new(&me) - .arg("test3") .pre_exec(|| { env::set_current_dir("/").unwrap(); Ok(()) diff --git a/src/tools/tidy/src/lib.rs b/src/tools/tidy/src/lib.rs index d06c99725bc6a..00454b0ec9005 100644 --- a/src/tools/tidy/src/lib.rs +++ b/src/tools/tidy/src/lib.rs @@ -59,6 +59,7 @@ fn filter_dirs(path: &Path) -> bool { "src/target", "src/stdsimd", "src/rust-sgx", + "src/parking_lot", "target", "vendor", ];