Skip to content

Commit cc7a8cc

Browse files
committed
Use fair unlocking (via parking_lot) in tests
In a comming commit we'll add a test that relies heavily on lock fairness, which is not provided by the default Rust `Mutex`. Luckily, `parking_lot` provided an `unlock_fair`, which we use here, though it implies we have to manually implement lock poisoning.
1 parent 7bc1187 commit cc7a8cc

File tree

5 files changed

+65
-36
lines changed

5 files changed

+65
-36
lines changed

lightning-liquidity/Cargo.toml

+1
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ lightning-persister = { version = "0.2.0", path = "../lightning-persister", defa
3737

3838
proptest = "1.0.0"
3939
tokio = { version = "1.35", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] }
40+
parking_lot = { version = "0.12", default-features = false }
4041

4142
[lints.rust.unexpected_cfgs]
4243
level = "forbid"

lightning/Cargo.toml

+1
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ inventory = { version = "0.3", optional = true }
5454
regex = "1.5.6"
5555
lightning-types = { version = "0.3.0", path = "../lightning-types", features = ["_test_utils"] }
5656
lightning-macros = { path = "../lightning-macros" }
57+
parking_lot = { version = "0.12", default-features = false }
5758

5859
[dev-dependencies.bitcoin]
5960
version = "0.32.2"

lightning/src/lib.rs

+2
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,8 @@ extern crate core;
6363

6464
#[cfg(ldk_bench)] extern crate criterion;
6565

66+
#[cfg(all(feature = "std", test))] extern crate parking_lot;
67+
6668
#[macro_use]
6769
pub mod util;
6870
pub mod chain;

lightning/src/ln/monitor_tests.rs

+6-6
Original file line numberDiff line numberDiff line change
@@ -3298,28 +3298,28 @@ fn test_update_replay_panics() {
32983298

32993299
// Ensure applying the force-close update skipping the last normal update fails
33003300
let poisoned_monitor = monitor.clone();
3301-
std::panic::catch_unwind(|| {
3301+
std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
33023302
let _ = poisoned_monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger);
33033303
// We should panic, rather than returning an error here.
3304-
}).unwrap_err();
3304+
})).unwrap_err();
33053305

33063306
// Then apply the last normal and force-close update and make sure applying the preimage
33073307
// updates out-of-order fails.
33083308
monitor.update_monitor(&updates[0], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap();
33093309
monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap();
33103310

33113311
let poisoned_monitor = monitor.clone();
3312-
std::panic::catch_unwind(|| {
3312+
std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
33133313
let _ = poisoned_monitor.update_monitor(&updates[3], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger);
33143314
// We should panic, rather than returning an error here.
3315-
}).unwrap_err();
3315+
})).unwrap_err();
33163316

33173317
// Make sure re-applying the force-close update fails
33183318
let poisoned_monitor = monitor.clone();
3319-
std::panic::catch_unwind(|| {
3319+
std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
33203320
let _ = poisoned_monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger);
33213321
// We should panic, rather than returning an error here.
3322-
}).unwrap_err();
3322+
})).unwrap_err();
33233323

33243324
// ...and finally ensure that applying all the updates succeeds.
33253325
monitor.update_monitor(&updates[2], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap();

lightning/src/sync/debug_sync.rs

+55-30
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,16 @@ use core::time::Duration;
55

66
use std::cell::RefCell;
77

8-
use std::sync::atomic::{AtomicUsize, Ordering};
9-
use std::sync::Condvar as StdCondvar;
10-
use std::sync::Mutex as StdMutex;
11-
use std::sync::MutexGuard as StdMutexGuard;
8+
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
129
use std::sync::RwLock as StdRwLock;
1310
use std::sync::RwLockReadGuard as StdRwLockReadGuard;
1411
use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
1512

16-
pub use std::sync::WaitTimeoutResult;
13+
use parking_lot::Condvar as StdCondvar;
14+
use parking_lot::Mutex as StdMutex;
15+
use parking_lot::MutexGuard as StdMutexGuard;
16+
17+
pub use parking_lot::WaitTimeoutResult;
1718

1819
use crate::prelude::*;
1920

@@ -46,21 +47,19 @@ impl Condvar {
4647
&'a self, guard: MutexGuard<'a, T>, condition: F,
4748
) -> LockResult<MutexGuard<'a, T>> {
4849
let mutex: &'a Mutex<T> = guard.mutex;
49-
self.inner
50-
.wait_while(guard.into_inner(), condition)
51-
.map(|lock| MutexGuard { mutex, lock })
52-
.map_err(|_| ())
50+
let mut lock = guard.into_inner();
51+
self.inner.wait_while(&mut lock, condition);
52+
Ok(MutexGuard { mutex, lock: Some(lock) })
5353
}
5454

5555
#[allow(unused)]
5656
pub fn wait_timeout_while<'a, T, F: FnMut(&mut T) -> bool>(
5757
&'a self, guard: MutexGuard<'a, T>, dur: Duration, condition: F,
5858
) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
5959
let mutex = guard.mutex;
60-
self.inner
61-
.wait_timeout_while(guard.into_inner(), dur, condition)
62-
.map_err(|_| ())
63-
.map(|(lock, e)| (MutexGuard { mutex, lock }, e))
60+
let mut lock = guard.into_inner();
61+
let e = self.inner.wait_while_for(&mut lock, condition, dur);
62+
Ok((MutexGuard { mutex, lock: Some(lock) }, e))
6463
}
6564

6665
pub fn notify_all(&self) {
@@ -150,7 +149,7 @@ impl LockMetadata {
150149
LOCKS_INIT.call_once(|| unsafe {
151150
LOCKS = Some(StdMutex::new(new_hash_map()));
152151
});
153-
let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
152+
let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock();
154153
match locks.entry(lock_constr_location) {
155154
hash_map::Entry::Occupied(e) => {
156155
assert_eq!(lock_constr_colno,
@@ -185,7 +184,7 @@ impl LockMetadata {
185184
}
186185
}
187186
for (_locked_idx, locked) in held.borrow().iter() {
188-
for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() {
187+
for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().iter() {
189188
let is_dep_this_lock = *locked_dep_idx == this.lock_idx;
190189
let has_same_construction = *locked_dep_idx == locked.lock_idx;
191190
if is_dep_this_lock && !has_same_construction {
@@ -210,7 +209,7 @@ impl LockMetadata {
210209
}
211210
}
212211
// Insert any already-held locks in our locked-before set.
213-
let mut locked_before = this.locked_before.lock().unwrap();
212+
let mut locked_before = this.locked_before.lock();
214213
if !locked_before.contains_key(&locked.lock_idx) {
215214
let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
216215
locked_before.insert(lockdep.lock.lock_idx, lockdep);
@@ -237,7 +236,7 @@ impl LockMetadata {
237236
// Since a try-lock will simply fail if the lock is held already, we do not
238237
// consider try-locks to ever generate lockorder inversions. However, if a try-lock
239238
// succeeds, we do consider it to have created lockorder dependencies.
240-
let mut locked_before = this.locked_before.lock().unwrap();
239+
let mut locked_before = this.locked_before.lock();
241240
for (locked_idx, locked) in held.borrow().iter() {
242241
if !locked_before.contains_key(locked_idx) {
243242
let lockdep =
@@ -252,11 +251,17 @@ impl LockMetadata {
252251

253252
pub struct Mutex<T: Sized> {
254253
inner: StdMutex<T>,
254+
poisoned: AtomicBool,
255255
deps: Arc<LockMetadata>,
256256
}
257+
257258
impl<T: Sized> Mutex<T> {
258259
pub(crate) fn into_inner(self) -> LockResult<T> {
259-
self.inner.into_inner().map_err(|_| ())
260+
if self.poisoned.load(Ordering::Acquire) {
261+
Err(())
262+
} else {
263+
Ok(self.inner.into_inner())
264+
}
260265
}
261266
}
262267

@@ -278,14 +283,14 @@ impl<T: Sized + fmt::Debug> fmt::Debug for Mutex<T> {
278283
#[must_use = "if unused the Mutex will immediately unlock"]
279284
pub struct MutexGuard<'a, T: Sized + 'a> {
280285
mutex: &'a Mutex<T>,
281-
lock: StdMutexGuard<'a, T>,
286+
lock: Option<StdMutexGuard<'a, T>>,
282287
}
283288

284289
impl<'a, T: Sized> MutexGuard<'a, T> {
285290
fn into_inner(self) -> StdMutexGuard<'a, T> {
286291
// Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509.
287292
unsafe {
288-
let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock);
293+
let v: StdMutexGuard<'a, T> = std::ptr::read(self.lock.as_ref().unwrap());
289294
std::mem::forget(self);
290295
v
291296
}
@@ -297,44 +302,63 @@ impl<T: Sized> Drop for MutexGuard<'_, T> {
297302
LOCKS_HELD.with(|held| {
298303
held.borrow_mut().remove(&self.mutex.deps.lock_idx);
299304
});
305+
if std::thread::panicking() {
306+
self.mutex.poisoned.store(true, Ordering::Release);
307+
}
308+
StdMutexGuard::unlock_fair(self.lock.take().unwrap());
300309
}
301310
}
302311

303312
impl<T: Sized> Deref for MutexGuard<'_, T> {
304313
type Target = T;
305314

306315
fn deref(&self) -> &T {
307-
&self.lock.deref()
316+
&self.lock.as_ref().unwrap().deref()
308317
}
309318
}
310319

311320
impl<T: Sized> DerefMut for MutexGuard<'_, T> {
312321
fn deref_mut(&mut self) -> &mut T {
313-
self.lock.deref_mut()
322+
self.lock.as_mut().unwrap().deref_mut()
314323
}
315324
}
316325

317326
impl<T> Mutex<T> {
318327
pub fn new(inner: T) -> Mutex<T> {
319-
Mutex { inner: StdMutex::new(inner), deps: LockMetadata::new() }
328+
Mutex {
329+
inner: StdMutex::new(inner),
330+
poisoned: AtomicBool::new(false),
331+
deps: LockMetadata::new(),
332+
}
320333
}
321334

322335
pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
323336
LockMetadata::pre_lock(&self.deps, false);
324-
self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ())
337+
let lock = self.inner.lock();
338+
if self.poisoned.load(Ordering::Acquire) {
339+
Err(())
340+
} else {
341+
Ok(MutexGuard { mutex: self, lock: Some(lock) })
342+
}
325343
}
326344

327345
pub fn try_lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
328-
let res =
329-
self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ());
346+
let res = self.inner.try_lock().ok_or(());
330347
if res.is_ok() {
348+
if self.poisoned.load(Ordering::Acquire) {
349+
return Err(());
350+
}
331351
LockMetadata::try_locked(&self.deps);
332352
}
333-
res
353+
res.map(|lock| MutexGuard { mutex: self, lock: Some(lock) })
334354
}
335355

336356
pub fn get_mut<'a>(&'a mut self) -> LockResult<&'a mut T> {
337-
self.inner.get_mut().map_err(|_| ())
357+
if self.poisoned.load(Ordering::Acquire) {
358+
Err(())
359+
} else {
360+
Ok(self.inner.get_mut())
361+
}
338362
}
339363
}
340364

@@ -345,9 +369,10 @@ impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
345369
}
346370
type ExclLock = MutexGuard<'a, T>;
347371
#[inline]
348-
fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<T> {
372+
fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<'a, T> {
349373
LockMetadata::pre_lock(&self.deps, true);
350-
self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).unwrap()
374+
let lock = self.inner.lock();
375+
MutexGuard { mutex: self, lock: Some(lock) }
351376
}
352377
}
353378

0 commit comments

Comments
 (0)