@@ -54,9 +54,10 @@ impl MintClient {
54
54
55
55
let mut task_group = task_group. make_subgroup ( ) . await ;
56
56
57
- // TODO: If the client attempts any operations between while the recovery is working,
58
- // the recovery code will most probably miss them, which might lead to incorrect state.
59
- // We should probably lock everything in some way during recovery for corectness.
57
+ // TODO: If the client attempts any operations between while the recovery is
58
+ // working, the recovery code will most probably miss them, which might
59
+ // lead to incorrect state. We should probably lock everything in some
60
+ // way during recovery for corectness.
60
61
let snapshot = match self
61
62
. restore_current_state_from_backup ( & mut task_group, backup, gap_limit)
62
63
. await ?
@@ -106,7 +107,8 @@ impl MintClient {
106
107
107
108
/// Delete all the note-related data from the database
108
109
///
109
- /// Useful for cleaning previous data before restoring data recovered from backup.
110
+ /// Useful for cleaning previous data before restoring data recovered from
111
+ /// backup.
110
112
async fn wipe_notes_static ( dbtx : & mut DatabaseTransaction < ' _ > ) -> Result < ( ) > {
111
113
dbtx. remove_by_prefix ( & NoteKeyPrefix ) . await ?;
112
114
dbtx. remove_by_prefix ( & OutputFinalizationKeyPrefix ) . await ?;
@@ -142,7 +144,8 @@ impl MintClient {
142
144
Ok ( responses. into_iter ( ) . next ( ) )
143
145
}
144
146
145
- /// Static version of [`Self::get_derived_backup_encryption_key`] for testing without creating whole `MintClient`
147
+ /// Static version of [`Self::get_derived_backup_encryption_key`] for
148
+ /// testing without creating whole `MintClient`
146
149
fn get_derived_backup_encryption_key_static ( secret : & DerivableSecret ) -> aead:: LessSafeKey {
147
150
aead:: LessSafeKey :: new (
148
151
secret
@@ -151,9 +154,11 @@ impl MintClient {
151
154
)
152
155
}
153
156
154
- /// Static version of [`Self::get_derived_backup_signing_key`] for testing without creating whole `MintClient`
157
+ /// Static version of [`Self::get_derived_backup_signing_key`] for testing
158
+ /// without creating whole `MintClient`
155
159
fn get_derived_backup_signing_key_static ( secret : & DerivableSecret ) -> secp256k1_zkp:: KeyPair {
156
- // TODO: Do we need that one derivation level? This key is already derived for the mint itself, and internally another kdf will be done with key type tag.
160
+ // TODO: Do we need that one derivation level? This key is already derived for
161
+ // the mint itself, and internally another kdf will be done with key type tag.
157
162
secret
158
163
. child_key ( MINT_E_CASH_BACKUP_SNAPSHOT_TYPE_CHILD_ID )
159
164
. to_secp_key ( & Secp256k1 :: < secp256k1:: SignOnly > :: gen_new ( ) )
@@ -252,7 +257,10 @@ impl MintClient {
252
257
return Err ( e. into ( ) ) ;
253
258
}
254
259
} ;
255
- // TODO: This -1 is probably not necessary, as it should be enough to start from the exact epoch the snapshot was taken, but it is harmless to start from any epoch in the past, and starting a bit earlier makes it more robust in face of some inconsistency that we've missed.
260
+ // TODO: This -1 is probably not necessary, as it should be enough to start from
261
+ // the exact epoch the snapshot was taken, but it is harmless to start from any
262
+ // epoch in the past, and starting a bit earlier makes it more robust in face of
263
+ // some inconsistency that we've missed.
256
264
let start_epoch = backup. epoch_count . saturating_sub ( 1 ) ;
257
265
let epoch_range = start_epoch..current_epoch_count;
258
266
@@ -273,7 +281,8 @@ impl MintClient {
273
281
return Ok ( Err ( Cancelled ) ) ;
274
282
}
275
283
// if `recv` returned `None` that means fetch_epoch finished prematurelly,
276
- // withouth sending an `Err` which is supposed to mean `is_shutting_down() == true`
284
+ // withouth sending an `Err` which is supposed to mean `is_shutting_down() ==
285
+ // true`
277
286
info ! ( epoch, "Awaiting epoch" ) ;
278
287
let epoch_history = epoch_res?;
279
288
assert_eq ! ( epoch_history. outcome. epoch, epoch) ;
@@ -387,20 +396,23 @@ pub struct EcashRecoveryFinalState {
387
396
next_note_idx : Tiered < NoteIndex > ,
388
397
}
389
398
390
- /// The state machine used for fast-fowarding backup from point when it was taken to the present time
391
- /// by following epoch history items from the time the snapshot was taken.
399
+ /// The state machine used for fast-fowarding backup from point when it was
400
+ /// taken to the present time by following epoch history items from the time the
401
+ /// snapshot was taken.
392
402
///
393
- /// The caller is responsible for creating it, and then feeding it in order all valid
394
- /// consensus items from the epoch history between time taken (or even somewhat before it) and
395
- /// present time.
403
+ /// The caller is responsible for creating it, and then feeding it in order all
404
+ /// valid consensus items from the epoch history between time taken (or even
405
+ /// somewhat before it) and present time.
396
406
#[ derive( Debug ) ]
397
407
struct EcashRecoveryTracker {
398
408
/// Nonces that we track that are currently spendable.
399
409
spendable_note_by_nonce : HashMap < Nonce , ( Amount , SpendableNote ) > ,
400
410
401
- /// Outputs (by `OutPoint`) we track federation member confirmations for blind nonces.
411
+ /// Outputs (by `OutPoint`) we track federation member confirmations for
412
+ /// blind nonces.
402
413
///
403
- /// Once we get enough confirmation (valid shares), these become new spendable notes.
414
+ /// Once we get enough confirmation (valid shares), these become new
415
+ /// spendable notes.
404
416
///
405
417
/// Note that `NoteIssuanceRequest` is optional, as sometimes we might need
406
418
/// to handle a tx where only some of the blind nonces were in the pool.
@@ -419,18 +431,19 @@ struct EcashRecoveryTracker {
419
431
/// Once we see them, we move the tracking to `pending_outputs`
420
432
///
421
433
/// Note: since looking up nonces is going to be the most common operation
422
- /// the pool is kept shared (so only one lookup is enough), and replenishment
423
- /// is done each time a note is consumed.
434
+ /// the pool is kept shared (so only one lookup is enough), and
435
+ /// replenishment is done each time a note is consumed.
424
436
pending_nonces : HashMap < BlindedMessage , ( NoteIssuanceRequest , NoteIndex , Amount ) > ,
425
437
426
- /// Tail of `pending`. `pending_notes` is filled by generating note with this index
427
- /// and incrementing it.
438
+ /// Tail of `pending`. `pending_notes` is filled by generating note with
439
+ /// this index and incrementing it.
428
440
next_pending_note_idx : Tiered < NoteIndex > ,
429
441
430
- /// `LastECashNoteIndex` but tracked in flight. Basically max index of any note that got
431
- /// a partial sig from the federation (initialled from the backup value).
432
- /// TODO: One could imagine a case where the note was issued but not get any partial sigs yet.
433
- /// Very unlikely in real life scenario, but worth considering.
442
+ /// `LastECashNoteIndex` but tracked in flight. Basically max index of any
443
+ /// note that got a partial sig from the federation (initialled from the
444
+ /// backup value). TODO: One could imagine a case where the note was
445
+ /// issued but not get any partial sigs yet. Very unlikely in real life
446
+ /// scenario, but worth considering.
434
447
last_mined_nonce_idx : Tiered < NoteIndex > ,
435
448
436
449
/// Threshold
@@ -447,7 +460,8 @@ struct EcashRecoveryTracker {
447
460
/// Aggregate public key for each amount tier
448
461
tbs_pks : Tiered < AggregatePublicKey > ,
449
462
450
- /// The number of nonces we look-ahead when looking for mints (per each amount).
463
+ /// The number of nonces we look-ahead when looking for mints (per each
464
+ /// amount).
451
465
gap_limit : usize ,
452
466
}
453
467
@@ -544,23 +558,25 @@ impl EcashRecoveryTracker {
544
558
// a nonce mined for as smaller amount, but it doesn't eliminate completely
545
559
// the possibility that we might use a note mined in a different transaction,
546
560
// that our original one.
547
- // While it is harmless to us, as such duplicated blind nonces are effective as good
548
- // the as the original ones (same amount), it breaks the assumption that all our
549
- // blind nonces in an our output need to be in the pending pool. It forces us to be
550
- // greedy no matter what and take what we can, and just report anything suspicious.
561
+ // While it is harmless to us, as such duplicated blind nonces are effective as
562
+ // good the as the original ones (same amount), it breaks the assumption
563
+ // that all our blind nonces in an our output need to be in the pending
564
+ // pool. It forces us to be greedy no matter what and take what we can,
565
+ // and just report anything suspicious.
551
566
//
552
567
// found - all nonces that we found in the pool with the correct amount
553
- // missing - all the nonces we have not found in the pool, either because they are not ours
554
- // or were consumed by a previous transaction using this nonce, or possibly gap
555
- // buffer was too small
568
+ // missing - all the nonces we have not found in the pool, either because they
569
+ // are not ours or were consumed by a previous transaction
570
+ // using this nonce, or possibly gap buffer was too small
556
571
// wrong - nonces that were ours but were mined to a wrong
557
572
let ( found, missing, wrong) = output. 0 . iter_items ( ) . fold (
558
573
( vec ! [ ] , vec ! [ ] , vec ! [ ] ) ,
559
574
|( mut found, mut missing, mut wrong) , ( amount_from_output, nonce) | {
560
575
match self . pending_nonces . get ( & nonce. 0 ) . cloned ( ) {
561
576
Some ( ( issuance_request, note_idx, pending_amount) ) => {
562
- // the moment we see our blind nonce in the epoch history, correctly or incorrectly used,
563
- // we know that we must have used already
577
+ // the moment we see our blind nonce in the epoch history, correctly or
578
+ // incorrectly used, we know that we must have used
579
+ // already
564
580
self . observe_nonce_idx_being_used ( pending_amount, note_idx) ;
565
581
566
582
if pending_amount == amount_from_output {
@@ -595,7 +611,8 @@ impl EcashRecoveryTracker {
595
611
expected_amount = %wrong. 2 ,
596
612
found_amount = %wrong. 3 ,
597
613
"Transaction output contains blind nonce that looks like ours but is of the wrong amount. Ignoring." ) ;
598
- // Any blind nonce mined with a wrong amount means that this transaction can't be ours
614
+ // Any blind nonce mined with a wrong amount means that this
615
+ // transaction can't be ours
599
616
}
600
617
601
618
if !wrong. is_empty ( ) {
@@ -613,8 +630,8 @@ impl EcashRecoveryTracker {
613
630
"Missing nonce in pending pool for a transaction with other valid nonces that belong to us. This indicate an issue." ) ;
614
631
}
615
632
616
- // ok, now that we know we track this output as ours and use the nonces we've found
617
- // delete them from the pool and replace them
633
+ // ok, now that we know we track this output as ours and use the nonces we've
634
+ // found delete them from the pool and replace them
618
635
for & ( _amount, ( nonce, _) ) in & found {
619
636
assert ! ( self . pending_nonces. remove( & nonce) . is_some( ) ) ;
620
637
}
@@ -628,10 +645,12 @@ impl EcashRecoveryTracker {
628
645
) ;
629
646
}
630
647
631
- /// React to a valid pending nonce being tracked being used in the epoch history
648
+ /// React to a valid pending nonce being tracked being used in the epoch
649
+ /// history
632
650
///
633
- /// (Possibly) increment the `self.last_mined_nonce_idx`, then replenish the pending pool
634
- /// to always maintain at least `gap_limit` of pending onces in each amount tier.
651
+ /// (Possibly) increment the `self.last_mined_nonce_idx`, then replenish the
652
+ /// pending pool to always maintain at least `gap_limit` of pending
653
+ /// onces in each amount tier.
635
654
fn observe_nonce_idx_being_used ( & mut self , amount : Amount , note_idx : NoteIndex ) {
636
655
* self . last_mined_nonce_idx . entry ( amount) . or_default ( ) = max (
637
656
self . last_mined_nonce_idx
@@ -763,14 +782,16 @@ impl EcashRecoveryTracker {
763
782
let txid = tx. tx_hash ( ) ;
764
783
765
784
if !processed_txs. insert ( txid) {
766
- // Just like server side consensus, do not attempt to process the same transaction twice.
785
+ // Just like server side consensus, do not attempt to process the same
786
+ // transaction twice.
767
787
return ;
768
788
}
769
789
770
790
if rejected_txs. contains ( & txid) {
771
791
// Do not process invalid transactions.
772
- // Consensus history contains all data proposed by each peer, even invalid (e.g. due to double spent)
773
- // transactions. Precisely to save downstream users from having to run the consensus themselves,
792
+ // Consensus history contains all data proposed by each peer, even invalid (e.g.
793
+ // due to double spent) transactions. Precisely to save
794
+ // downstream users from having to run the consensus themselves,
774
795
// each epoch contains a list of transactions that turned out to be invalid.
775
796
return ;
776
797
}
0 commit comments