Skip to content

Commit f32a5df

Browse files
committed
Clean up once code
This was originally intended to just change `swap` to `compare_exchange` however other changes were needed too: * Load with `Release` ordering seemed wrong because if it loaded `DONE` before while loop it wouldn't synchronize with the storing thread. * There's no point in repeatedly checking if the value is `NONE` so this check was moved about while loop * Due to a mistake I wanted to inspect using `miri` but couldn't because of FFI call. Separating the once implementation from consumer allowed writing a test in pure Rust and inspect via `miri`. * The initializing code now skips loading the value again because it already knows it's initialized. * Orderings are now explained in the comments
1 parent f531be3 commit f32a5df

File tree

1 file changed

+84
-29
lines changed

1 file changed

+84
-29
lines changed

secp256k1-sys/src/lib.rs

+84-29
Original file line numberDiff line numberDiff line change
@@ -663,7 +663,6 @@ impl<T> CPtr for [T] {
663663
#[cfg(fuzzing)]
664664
mod fuzz_dummy {
665665
use super::*;
666-
use core::sync::atomic::{AtomicUsize, Ordering};
667666

668667
#[cfg(rust_secp_no_symbol_renaming)] compile_error!("We do not support fuzzing with rust_secp_no_symbol_renaming");
669668

@@ -673,6 +672,80 @@ mod fuzz_dummy {
673672
fn rustsecp256k1_v0_4_1_context_preallocated_clone(cx: *const Context, prealloc: *mut c_void) -> *mut Context;
674673
}
675674

675+
mod once {
676+
use core::sync::atomic::{AtomicUsize, Ordering};
677+
678+
const NONE: usize = 0;
679+
const WORKING: usize = 1;
680+
const DONE: usize = 2;
681+
682+
pub(crate) struct Once(AtomicUsize);
683+
684+
impl Once {
685+
pub(crate) const INIT: Once = Once(AtomicUsize::new(NONE));
686+
687+
pub(crate) fn run(&self, f: impl FnOnce()) {
688+
// Acquire ordering because if it's DONE the following reads must go after this load.
689+
let mut have_ctx = self.0.load(Ordering::Acquire);
690+
if have_ctx == NONE {
691+
// Ordering: on success we're only signalling to other thread that work is in progress
692+
// without transferring other data, so it should be Relaxed, on failure the value may be DONE,
693+
// so we want to Acquire to safely proceed in that case.
694+
// However compare_exchange doesn't allow failure ordering to be stronger than success
695+
// so both are Acquire.
696+
match self.0.compare_exchange(NONE, WORKING, Ordering::Acquire, Ordering::Acquire) {
697+
Ok(_) => {
698+
f();
699+
// We wrote data in memory that others threads may read so we need Release
700+
self.0.store(DONE, Ordering::Release);
701+
return;
702+
},
703+
Err(value) => have_ctx = value,
704+
}
705+
}
706+
while have_ctx != DONE {
707+
// Another thread is building, just busy-loop until they're done.
708+
assert_eq!(have_ctx, WORKING);
709+
// This thread will read whatever the other thread wrote so this needs to be Acquire.
710+
have_ctx = self.0.load(Ordering::Acquire);
711+
#[cfg(feature = "std")]
712+
std::thread::yield_now();
713+
}
714+
}
715+
}
716+
717+
#[cfg(all(test, feature = "std"))]
718+
mod tests {
719+
use super::Once;
720+
721+
#[test]
722+
fn test_once() {
723+
use std::cell::UnsafeCell;
724+
static ONCE: Once = Once::INIT;
725+
struct PretendSync(UnsafeCell<u32>);
726+
727+
static VALUE: PretendSync = PretendSync(UnsafeCell::new(42));
728+
unsafe impl Sync for PretendSync {}
729+
730+
let threads = (0..5).map(|_| std::thread::spawn(|| {
731+
ONCE.run(|| unsafe {
732+
*VALUE.0.get() = 47;
733+
});
734+
unsafe {
735+
assert_eq!(*VALUE.0.get(), 47);
736+
}
737+
}))
738+
.collect::<Vec<_>>();
739+
for thread in threads {
740+
thread.join().unwrap();
741+
}
742+
unsafe {
743+
assert_eq!(*VALUE.0.get(), 47);
744+
}
745+
}
746+
}
747+
}
748+
676749
#[cfg(feature = "lowmemory")]
677750
const CTX_SIZE: usize = 1024 * 65;
678751
#[cfg(not(feature = "lowmemory"))]
@@ -683,41 +756,23 @@ mod fuzz_dummy {
683756
CTX_SIZE
684757
}
685758

686-
static HAVE_PREALLOCATED_CONTEXT: AtomicUsize = AtomicUsize::new(0);
687-
const HAVE_CONTEXT_NONE: usize = 0;
688-
const HAVE_CONTEXT_WORKING: usize = 1;
689-
const HAVE_CONTEXT_DONE: usize = 2;
759+
static HAVE_PREALLOCATED_CONTEXT: once::Once = once::Once::INIT;
690760
static mut PREALLOCATED_CONTEXT: [u8; CTX_SIZE] = [0; CTX_SIZE];
691761
pub unsafe fn secp256k1_context_preallocated_create(prealloc: *mut c_void, flags: c_uint) -> *mut Context {
692762
// While applications should generally avoid creating too many contexts, sometimes fuzzers
693763
// perform tasks repeatedly which real applications may only do rarely. Thus, we want to
694764
// avoid being overly slow here. We do so by having a static context and copying it into
695765
// new buffers instead of recalculating it. Because we shouldn't rely on std, we use a
696766
// simple hand-written OnceFlag built out of an atomic to gate the global static.
697-
let mut have_ctx = HAVE_PREALLOCATED_CONTEXT.load(Ordering::Relaxed);
698-
while have_ctx != HAVE_CONTEXT_DONE {
699-
if have_ctx == HAVE_CONTEXT_NONE {
700-
have_ctx = HAVE_PREALLOCATED_CONTEXT.swap(HAVE_CONTEXT_WORKING, Ordering::AcqRel);
701-
if have_ctx == HAVE_CONTEXT_NONE {
702-
assert!(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_START_SIGN | SECP256K1_START_VERIFY) + std::mem::size_of::<c_uint>() <= CTX_SIZE);
703-
assert_eq!(rustsecp256k1_v0_4_1_context_preallocated_create(
704-
PREALLOCATED_CONTEXT[..].as_ptr() as *mut c_void,
705-
SECP256K1_START_SIGN | SECP256K1_START_VERIFY),
706-
PREALLOCATED_CONTEXT[..].as_ptr() as *mut Context);
707-
assert_eq!(HAVE_PREALLOCATED_CONTEXT.swap(HAVE_CONTEXT_DONE, Ordering::AcqRel),
708-
HAVE_CONTEXT_WORKING);
709-
} else if have_ctx == HAVE_CONTEXT_DONE {
710-
// Another thread finished while we were swapping.
711-
HAVE_PREALLOCATED_CONTEXT.store(HAVE_CONTEXT_DONE, Ordering::Release);
712-
}
713-
} else {
714-
// Another thread is building, just busy-loop until they're done.
715-
assert_eq!(have_ctx, HAVE_CONTEXT_WORKING);
716-
have_ctx = HAVE_PREALLOCATED_CONTEXT.load(Ordering::Acquire);
717-
#[cfg(feature = "std")]
718-
std::thread::yield_now();
719-
}
720-
}
767+
768+
HAVE_PREALLOCATED_CONTEXT.run(|| {
769+
assert!(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_START_SIGN | SECP256K1_START_VERIFY) + std::mem::size_of::<c_uint>() <= CTX_SIZE);
770+
assert_eq!(rustsecp256k1_v0_4_1_context_preallocated_create(
771+
PREALLOCATED_CONTEXT[..].as_ptr() as *mut c_void,
772+
SECP256K1_START_SIGN | SECP256K1_START_VERIFY),
773+
PREALLOCATED_CONTEXT[..].as_ptr() as *mut Context);
774+
});
775+
721776
ptr::copy_nonoverlapping(PREALLOCATED_CONTEXT[..].as_ptr(), prealloc as *mut u8, CTX_SIZE);
722777
let ptr = (prealloc as *mut u8).add(CTX_SIZE).sub(std::mem::size_of::<c_uint>());
723778
(ptr as *mut c_uint).write(flags);

0 commit comments

Comments
 (0)