@@ -663,7 +663,6 @@ impl<T> CPtr for [T] {
663
663
#[ cfg( fuzzing) ]
664
664
mod fuzz_dummy {
665
665
use super :: * ;
666
- use core:: sync:: atomic:: { AtomicUsize , Ordering } ;
667
666
668
667
#[ cfg( rust_secp_no_symbol_renaming) ] compile_error ! ( "We do not support fuzzing with rust_secp_no_symbol_renaming" ) ;
669
668
@@ -673,6 +672,80 @@ mod fuzz_dummy {
673
672
fn rustsecp256k1_v0_4_1_context_preallocated_clone ( cx : * const Context , prealloc : * mut c_void ) -> * mut Context ;
674
673
}
675
674
675
+ mod once {
676
+ use core:: sync:: atomic:: { AtomicUsize , Ordering } ;
677
+
678
+ const NONE : usize = 0 ;
679
+ const WORKING : usize = 1 ;
680
+ const DONE : usize = 2 ;
681
+
682
+ pub ( crate ) struct Once ( AtomicUsize ) ;
683
+
684
+ impl Once {
685
+ pub ( crate ) const INIT : Once = Once ( AtomicUsize :: new ( NONE ) ) ;
686
+
687
+ pub ( crate ) fn run ( & self , f : impl FnOnce ( ) ) {
688
+ // Acquire ordering because if it's DONE the following reads must go after this load.
689
+ let mut have_ctx = self . 0 . load ( Ordering :: Acquire ) ;
690
+ if have_ctx == NONE {
691
+ // Ordering: on success we're only signalling to other thread that work is in progress
692
+ // without transferring other data, so it should be Relaxed, on failure the value may be DONE,
693
+ // so we want to Acquire to safely proceed in that case.
694
+ // However compare_exchange doesn't allow failure ordering to be stronger than success
695
+ // so both are Acquire.
696
+ match self . 0 . compare_exchange ( NONE , WORKING , Ordering :: Acquire , Ordering :: Acquire ) {
697
+ Ok ( _) => {
698
+ f ( ) ;
699
+ // We wrote data in memory that others threads may read so we need Release
700
+ self . 0 . store ( DONE , Ordering :: Release ) ;
701
+ return ;
702
+ } ,
703
+ Err ( value) => have_ctx = value,
704
+ }
705
+ }
706
+ while have_ctx != DONE {
707
+ // Another thread is building, just busy-loop until they're done.
708
+ assert_eq ! ( have_ctx, WORKING ) ;
709
+ // This thread will read whatever the other thread wrote so this needs to be Acquire.
710
+ have_ctx = self . 0 . load ( Ordering :: Acquire ) ;
711
+ #[ cfg( feature = "std" ) ]
712
+ :: std:: thread:: yield_now ( ) ;
713
+ }
714
+ }
715
+ }
716
+
717
+ #[ cfg( all( test, feature = "std" ) ) ]
718
+ mod tests {
719
+ use super :: Once ;
720
+
721
+ #[ test]
722
+ fn test_once ( ) {
723
+ use std:: cell:: UnsafeCell ;
724
+ static ONCE : Once = Once :: INIT ;
725
+ struct PretendSync ( UnsafeCell < u32 > ) ;
726
+
727
+ static VALUE : PretendSync = PretendSync ( UnsafeCell :: new ( 42 ) ) ;
728
+ unsafe impl Sync for PretendSync { }
729
+
730
+ let threads = ( 0 ..5 ) . map ( |_| :: std:: thread:: spawn ( || {
731
+ ONCE . run ( || unsafe {
732
+ * VALUE . 0 . get ( ) = 47 ;
733
+ } ) ;
734
+ unsafe {
735
+ assert_eq ! ( * VALUE . 0 . get( ) , 47 ) ;
736
+ }
737
+ } ) )
738
+ . collect :: < Vec < _ > > ( ) ;
739
+ for thread in threads {
740
+ thread. join ( ) . unwrap ( ) ;
741
+ }
742
+ unsafe {
743
+ assert_eq ! ( * VALUE . 0 . get( ) , 47 ) ;
744
+ }
745
+ }
746
+ }
747
+ }
748
+
676
749
#[ cfg( feature = "lowmemory" ) ]
677
750
const CTX_SIZE : usize = 1024 * 65 ;
678
751
#[ cfg( not( feature = "lowmemory" ) ) ]
@@ -683,41 +756,23 @@ mod fuzz_dummy {
683
756
CTX_SIZE
684
757
}
685
758
686
- static HAVE_PREALLOCATED_CONTEXT : AtomicUsize = AtomicUsize :: new ( 0 ) ;
687
- const HAVE_CONTEXT_NONE : usize = 0 ;
688
- const HAVE_CONTEXT_WORKING : usize = 1 ;
689
- const HAVE_CONTEXT_DONE : usize = 2 ;
759
+ static HAVE_PREALLOCATED_CONTEXT : once:: Once = once:: Once :: INIT ;
690
760
static mut PREALLOCATED_CONTEXT : [ u8 ; CTX_SIZE ] = [ 0 ; CTX_SIZE ] ;
691
761
pub unsafe fn secp256k1_context_preallocated_create ( prealloc : * mut c_void , flags : c_uint ) -> * mut Context {
692
762
// While applications should generally avoid creating too many contexts, sometimes fuzzers
693
763
// perform tasks repeatedly which real applications may only do rarely. Thus, we want to
694
764
// avoid being overly slow here. We do so by having a static context and copying it into
695
765
// new buffers instead of recalculating it. Because we shouldn't rely on std, we use a
696
766
// simple hand-written OnceFlag built out of an atomic to gate the global static.
697
- let mut have_ctx = HAVE_PREALLOCATED_CONTEXT . load ( Ordering :: Relaxed ) ;
698
- while have_ctx != HAVE_CONTEXT_DONE {
699
- if have_ctx == HAVE_CONTEXT_NONE {
700
- have_ctx = HAVE_PREALLOCATED_CONTEXT . swap ( HAVE_CONTEXT_WORKING , Ordering :: AcqRel ) ;
701
- if have_ctx == HAVE_CONTEXT_NONE {
702
- assert ! ( rustsecp256k1_v0_4_1_context_preallocated_size( SECP256K1_START_SIGN | SECP256K1_START_VERIFY ) + std:: mem:: size_of:: <c_uint>( ) <= CTX_SIZE ) ;
703
- assert_eq ! ( rustsecp256k1_v0_4_1_context_preallocated_create(
704
- PREALLOCATED_CONTEXT [ ..] . as_ptr( ) as * mut c_void,
705
- SECP256K1_START_SIGN | SECP256K1_START_VERIFY ) ,
706
- PREALLOCATED_CONTEXT [ ..] . as_ptr( ) as * mut Context ) ;
707
- assert_eq ! ( HAVE_PREALLOCATED_CONTEXT . swap( HAVE_CONTEXT_DONE , Ordering :: AcqRel ) ,
708
- HAVE_CONTEXT_WORKING ) ;
709
- } else if have_ctx == HAVE_CONTEXT_DONE {
710
- // Another thread finished while we were swapping.
711
- HAVE_PREALLOCATED_CONTEXT . store ( HAVE_CONTEXT_DONE , Ordering :: Release ) ;
712
- }
713
- } else {
714
- // Another thread is building, just busy-loop until they're done.
715
- assert_eq ! ( have_ctx, HAVE_CONTEXT_WORKING ) ;
716
- have_ctx = HAVE_PREALLOCATED_CONTEXT . load ( Ordering :: Acquire ) ;
717
- #[ cfg( feature = "std" ) ]
718
- std:: thread:: yield_now ( ) ;
719
- }
720
- }
767
+
768
+ HAVE_PREALLOCATED_CONTEXT . run ( || {
769
+ assert ! ( rustsecp256k1_v0_4_1_context_preallocated_size( SECP256K1_START_SIGN | SECP256K1_START_VERIFY ) + std:: mem:: size_of:: <c_uint>( ) <= CTX_SIZE ) ;
770
+ assert_eq ! ( rustsecp256k1_v0_4_1_context_preallocated_create(
771
+ PREALLOCATED_CONTEXT [ ..] . as_ptr( ) as * mut c_void,
772
+ SECP256K1_START_SIGN | SECP256K1_START_VERIFY ) ,
773
+ PREALLOCATED_CONTEXT [ ..] . as_ptr( ) as * mut Context ) ;
774
+ } ) ;
775
+
721
776
ptr:: copy_nonoverlapping ( PREALLOCATED_CONTEXT [ ..] . as_ptr ( ) , prealloc as * mut u8 , CTX_SIZE ) ;
722
777
let ptr = ( prealloc as * mut u8 ) . add ( CTX_SIZE ) . sub ( std:: mem:: size_of :: < c_uint > ( ) ) ;
723
778
( ptr as * mut c_uint ) . write ( flags) ;
0 commit comments