diff --git a/.github/workflows/cont_integration.yml b/.github/workflows/cont_integration.yml index 260209791..61f02c501 100644 --- a/.github/workflows/cont_integration.yml +++ b/.github/workflows/cont_integration.yml @@ -140,29 +140,3 @@ jobs: cache: true - name: Clippy run: cargo clippy --all-features --all-targets -- -D warnings - - build-examples: - needs: prepare - name: Build & Test Examples - runs-on: ubuntu-latest - strategy: - matrix: - example-dir: - - example_cli - - example_bitcoind_rpc_polling - - example_electrum - - example_esplora - steps: - - name: checkout - uses: actions/checkout@v4 - with: - persist-credentials: false - - name: Install Rust toolchain - uses: actions-rust-lang/setup-rust-toolchain@v1 - with: - toolchain: ${{ needs.prepare.outputs.rust_version }} - override: true - cache: true - - name: Build - working-directory: examples/${{ matrix.example-dir }} - run: cargo build diff --git a/Cargo.toml b/Cargo.toml index d505c1a0a..fc0253b16 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,10 +8,6 @@ members = [ "crates/esplora", "crates/bitcoind_rpc", "crates/testenv", - "examples/example_cli", - "examples/example_electrum", - "examples/example_esplora", - "examples/example_bitcoind_rpc_polling", ] [workspace.package] diff --git a/crates/chain/src/indexed_tx_graph.rs b/crates/chain/src/indexed_tx_graph.rs index 9adf7ed93..12f22220f 100644 --- a/crates/chain/src/indexed_tx_graph.rs +++ b/crates/chain/src/indexed_tx_graph.rs @@ -83,6 +83,15 @@ where /// /// The underlying `TxGraph` is initialized with `TxGraph::default()`, and the provided /// `index`er is used as‐is (since there are no existing transactions to process). + /// + /// # Example + /// + /// ``` + /// use bdk_chain::{keychain_txout::KeychainTxOutIndex, BlockId, IndexedTxGraph}; + /// + /// let index = KeychainTxOutIndex::<&str>::new(10, true); + /// let graph = IndexedTxGraph::::new(index); + /// ``` pub fn new(index: I) -> Self { Self { index, @@ -363,6 +372,21 @@ where /// Relevancy is determined by the internal [`Indexer::is_tx_relevant`] implementation of `I`. /// A transaction that conflicts with a relevant transaction is also considered relevant. /// Irrelevant transactions in `block` will be ignored. +<<<<<<< HEAD +======= + /// + /// # Example + /// + /// ```no_run + /// use bdk_chain::{IndexedTxGraph, keychain_txout::KeychainTxOutIndex, BlockId}; + /// use bitcoin::Block; + /// + /// let mut graph = IndexedTxGraph::::new(KeychainTxOutIndex::<&str>::new(10, true)); + /// # let block = Block { header: bitcoin::block::Header::from(bitcoin::constants::genesis_block(bitcoin::Network::Bitcoin).header), txdata: vec![] }; + /// + /// let changeset = graph.apply_block_relevant(&block, 100); + /// ``` +>>>>>>> 8d6a3c64 (refactor: replace examples with focused rustdoc examples) pub fn apply_block_relevant( &mut self, block: &Block, diff --git a/crates/chain/src/indexer/keychain_txout.rs b/crates/chain/src/indexer/keychain_txout.rs index 99931cf5e..78c3785dc 100644 --- a/crates/chain/src/indexer/keychain_txout.rs +++ b/crates/chain/src/indexer/keychain_txout.rs @@ -456,6 +456,22 @@ impl KeychainTxOutIndex { /// (one keychain just becomes the defacto owner of that spk arbitrarily) but this may have /// subtle implications up the application stack like one UTXO being missing from one keychain /// because it has been assigned to another which produces the same script pubkey. + /// + /// # Example + /// + /// ``` + /// use bdk_chain::keychain_txout::KeychainTxOutIndex; + /// use bdk_chain::miniscript::{Descriptor, DescriptorPublicKey}; + /// # use std::str::FromStr; + /// + /// let mut index = KeychainTxOutIndex::<&str>::new(10, true); + /// let desc = Descriptor::::from_str( + /// "wpkh([d34db33f/84h/0h/0h]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/0/*)" + /// )?; + /// + /// index.insert_descriptor("external", desc)?; + /// # Ok::<_, Box>(()) + /// ``` pub fn insert_descriptor( &mut self, keychain: K, @@ -837,6 +853,22 @@ impl KeychainTxOutIndex { /// 1. The descriptor has no wildcard and already has one script revealed. /// 2. The descriptor has already revealed scripts up to the numeric bound. /// 3. There is no descriptor associated with the given keychain. + /// + /// # Example + /// + /// ``` + /// use bdk_chain::keychain_txout::KeychainTxOutIndex; + /// use bdk_chain::miniscript::{Descriptor, DescriptorPublicKey}; + /// # use std::str::FromStr; + /// + /// let mut index = KeychainTxOutIndex::<&str>::new(10, true); + /// let desc = Descriptor::::from_str( + /// "wpkh([d34db33f/84h/0h/0h]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/0/*)" + /// ).unwrap(); + /// index.insert_descriptor("external", desc).unwrap(); + /// let (spk, changeset) = index.reveal_next_spk("external").unwrap(); + /// assert_eq!(spk.0, 0); + /// ``` pub fn reveal_next_spk(&mut self, keychain: K) -> Option<(Indexed, ChangeSet)> { let mut changeset = ChangeSet::default(); let indexed_spk = self._reveal_next_spk(&mut changeset, keychain)?; diff --git a/crates/chain/src/tx_graph.rs b/crates/chain/src/tx_graph.rs index 97d4ecc02..a77db62ac 100644 --- a/crates/chain/src/tx_graph.rs +++ b/crates/chain/src/tx_graph.rs @@ -627,6 +627,24 @@ impl TxGraph { /// * A smaller witness has precedence over a larger witness. /// * If the witness sizes are the same, we prioritize the two witnesses with lexicographical /// order. + /// + /// # Example + /// + /// ``` + /// use bdk_chain::{tx_graph::TxGraph, BlockId}; + /// use bitcoin::Transaction; + /// + /// let mut graph = TxGraph::::default(); + /// let tx = Transaction { + /// version: bitcoin::transaction::Version::ONE, + /// lock_time: bitcoin::locktime::absolute::LockTime::ZERO, + /// input: vec![], + /// output: vec![], + /// }; + /// + /// let changeset = graph.insert_tx(tx.clone()); + /// assert_eq!(changeset.txs.len(), 1); + /// ``` pub fn insert_tx>>(&mut self, tx: T) -> ChangeSet { // This returns `Some` only if the merged tx is different to the `original_tx`. fn _merge_tx_witnesses( diff --git a/crates/electrum/src/bdk_electrum_client.rs b/crates/electrum/src/bdk_electrum_client.rs index 05b501871..b55f01816 100644 --- a/crates/electrum/src/bdk_electrum_client.rs +++ b/crates/electrum/src/bdk_electrum_client.rs @@ -29,6 +29,15 @@ pub struct BdkElectrumClient { impl BdkElectrumClient { /// Creates a new bdk client from a [`electrum_client::ElectrumApi`] + /// + /// # Example + /// ```no_run + /// use bdk_electrum::{electrum_client, BdkElectrumClient}; + /// + /// let client = electrum_client::Client::new("ssl://electrum.blockstream.info:50002")?; + /// let bdk_client = BdkElectrumClient::new(client); + /// # Ok::<_, electrum_client::Error>(()) + /// ``` pub fn new(client: E) -> Self { Self { inner: client, @@ -107,6 +116,26 @@ impl BdkElectrumClient { /// [`CalculateFeeError::MissingTxOut`] error if those `TxOut`s are not present in the /// transaction graph. /// + /// # Example + /// ```no_run + /// use bdk_core::{spk_client::FullScanRequest, BlockId, CheckPoint}; + /// use bdk_electrum::BdkElectrumClient; + /// # use bdk_electrum::electrum_client; + /// # use electrum_client::bitcoin::{constants, Network}; + /// + /// # let client = electrum_client::Client::new("ssl://electrum.blockstream.info:50002")?; + /// # let bdk_client = BdkElectrumClient::new(client); + /// let request = FullScanRequest::<&str>::builder() + /// .chain_tip(CheckPoint::new(BlockId { + /// height: 0, + /// hash: constants::genesis_block(Network::Bitcoin).block_hash(), + /// })) + /// .build(); + /// + /// let response = bdk_client.full_scan(request, 10, 50, false)?; + /// # Ok::<_, electrum_client::Error>(()) + /// ``` + /// /// [`bdk_chain`]: ../bdk_chain/index.html /// [`CalculateFeeError::MissingTxOut`]: ../bdk_chain/tx_graph/enum.CalculateFeeError.html#variant.MissingTxOut /// [`Wallet.calculate_fee`]: ../bdk_wallet/struct.Wallet.html#method.calculate_fee @@ -190,6 +219,23 @@ impl BdkElectrumClient { /// If the scripts to sync are unknown, such as when restoring or importing a keychain that /// may include scripts that have been used, use [`full_scan`] with the keychain. /// + /// # Example + /// ```no_run + /// use bdk_core::bitcoin::ScriptBuf; + /// use bdk_core::spk_client::SyncRequest; + /// use bdk_electrum::BdkElectrumClient; + /// # use bdk_electrum::electrum_client; + /// + /// # let client = electrum_client::Client::new("ssl://electrum.blockstream.info:50002")?; + /// # let bdk_client = BdkElectrumClient::new(client); + /// let request = SyncRequest::builder() + /// .spks([ScriptBuf::new_op_return(&[0x00; 20])]) + /// .build(); + /// + /// let response = bdk_client.sync(request, 50, false)?; + /// # Ok::<_, electrum_client::Error>(()) + /// ``` + /// /// [`full_scan`]: Self::full_scan /// [`bdk_chain`]: ../bdk_chain/index.html /// [`CalculateFeeError::MissingTxOut`]: ../bdk_chain/tx_graph/enum.CalculateFeeError.html#variant.MissingTxOut diff --git a/crates/esplora/src/blocking_ext.rs b/crates/esplora/src/blocking_ext.rs index 5f8ab531c..35db314fa 100644 --- a/crates/esplora/src/blocking_ext.rs +++ b/crates/esplora/src/blocking_ext.rs @@ -26,6 +26,27 @@ pub trait EsploraExt { /// `stop_gap` script pubkeys with no associated transactions. `parallel_requests` specifies /// the maximum number of HTTP requests to make in parallel. /// + /// # Example + /// + /// ```no_run + /// use bdk_core::bitcoin::{constants, Network}; + /// use bdk_core::spk_client::FullScanRequest; + /// use bdk_core::{BlockId, CheckPoint}; + /// use bdk_esplora::{esplora_client, EsploraExt}; + /// + /// let client = esplora_client::Builder::new("https://blockstream.info/api").build_blocking(); + /// + /// let request = FullScanRequest::<&str>::builder() + /// .chain_tip(CheckPoint::new(BlockId { + /// height: 0, + /// hash: constants::genesis_block(Network::Bitcoin).block_hash(), + /// })) + /// .build(); + /// + /// let response = client.full_scan(request, 10, 5)?; + /// # Ok::<_, Box>(()) + /// ``` + /// /// Refer to [crate-level docs](crate) for more. fn full_scan>>( &self, diff --git a/examples/example_bitcoind_rpc_polling/Cargo.toml b/examples/example_bitcoind_rpc_polling/Cargo.toml deleted file mode 100644 index 6728bb13a..000000000 --- a/examples/example_bitcoind_rpc_polling/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "example_bitcoind_rpc_polling" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -bdk_chain = { path = "../../crates/chain", features = ["serde"] } -bdk_bitcoind_rpc = { path = "../../crates/bitcoind_rpc" } -example_cli = { path = "../example_cli" } -ctrlc = { version = "^2" } diff --git a/examples/example_bitcoind_rpc_polling/README.md b/examples/example_bitcoind_rpc_polling/README.md deleted file mode 100644 index fef82ab1c..000000000 --- a/examples/example_bitcoind_rpc_polling/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# Example RPC CLI - -### Simple Regtest Test - -1. Start local regtest bitcoind. - ``` - mkdir -p /tmp/regtest/bitcoind - bitcoind -regtest -server -fallbackfee=0.0002 -rpcuser= -rpcpassword= -datadir=/tmp/regtest/bitcoind -daemon - ``` -2. Create a test bitcoind wallet and set bitcoind env. - ``` - bitcoin-cli -datadir=/tmp/regtest/bitcoind -regtest -rpcuser= -rpcpassword= -named createwallet wallet_name="test" - export RPC_URL=127.0.0.1:18443 - export RPC_USER= - export RPC_PASS= - ``` -3. Get test bitcoind wallet info. - ``` - bitcoin-cli -rpcwallet="test" -rpcuser= -rpcpassword= -datadir=/tmp/regtest/bitcoind -regtest getwalletinfo - ``` -4. Get new test bitcoind wallet address. - ``` - BITCOIND_ADDRESS=$(bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser= -rpcpassword= getnewaddress) - echo $BITCOIND_ADDRESS - ``` -5. Generate 101 blocks with reward to test bitcoind wallet address. - ``` - bitcoin-cli -datadir=/tmp/regtest/bitcoind -regtest -rpcuser= -rpcpassword= generatetoaddress 101 $BITCOIND_ADDRESS - ``` -6. Verify test bitcoind wallet balance. - ``` - bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser= -rpcpassword= getbalances - ``` -7. Set descriptor env and get address from RPC CLI wallet. - ``` - export DESCRIPTOR="wpkh(tprv8ZgxMBicQKsPfK9BTf82oQkHhawtZv19CorqQKPFeaHDMA4dXYX6eWsJGNJ7VTQXWmoHdrfjCYuDijcRmNFwSKcVhswzqs4fugE8turndGc/1/*)" - cargo run -- --network regtest address next - ``` -8. Send 5 test bitcoin to RPC CLI wallet. - ``` - bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser= -rpcpassword= sendtoaddress
5 - ``` -9. Sync blockchain with RPC CLI wallet. - ``` - cargo run -- --network regtest sync - - ``` -10. Get RPC CLI wallet unconfirmed balances. - ``` - cargo run -- --network regtest balance - ``` -11. Generate 1 block with reward to test bitcoind wallet address. - ``` - bitcoin-cli -datadir=/tmp/regtest/bitcoind -rpcuser= -rpcpassword= -regtest generatetoaddress 10 $BITCOIND_ADDRESS - ``` -12. Sync the blockchain with RPC CLI wallet. - ``` - cargo run -- --network regtest sync - - ``` -13. Get RPC CLI wallet confirmed balances. - ``` - cargo run -- --network regtest balance - ``` -14. Get RPC CLI wallet transactions. - ``` - cargo run -- --network regtest txout list - ``` \ No newline at end of file diff --git a/examples/example_bitcoind_rpc_polling/src/main.rs b/examples/example_bitcoind_rpc_polling/src/main.rs deleted file mode 100644 index 0263c5b0b..000000000 --- a/examples/example_bitcoind_rpc_polling/src/main.rs +++ /dev/null @@ -1,411 +0,0 @@ -use std::{ - path::PathBuf, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - time::{Duration, Instant}, -}; - -use bdk_bitcoind_rpc::{ - bitcoincore_rpc::{Auth, Client, RpcApi}, - Emitter, -}; -use bdk_chain::{bitcoin::Block, local_chain, CanonicalizationParams, Merge}; -use example_cli::{ - anyhow, - clap::{self, Args, Subcommand}, - ChangeSet, Keychain, -}; - -const DB_MAGIC: &[u8] = b"bdk_example_rpc"; -const DB_PATH: &str = ".bdk_example_rpc.db"; - -/// The mpsc channel bound for emissions from [`Emitter`]. -const CHANNEL_BOUND: usize = 10; -/// Delay for printing status to stdout. -const STDOUT_PRINT_DELAY: Duration = Duration::from_secs(6); -/// Delay between mempool emissions. -const MEMPOOL_EMIT_DELAY: Duration = Duration::from_secs(30); -/// Delay for committing to persistence. -const DB_COMMIT_DELAY: Duration = Duration::from_secs(60); - -#[derive(Debug)] -enum Emission { - Block(bdk_bitcoind_rpc::BlockEvent), - Mempool(bdk_bitcoind_rpc::MempoolEvent), - Tip(u32), -} - -#[derive(Args, Debug, Clone)] -struct RpcArgs { - /// RPC URL - #[clap(env = "RPC_URL", long, default_value = "127.0.0.1:8332")] - url: String, - /// RPC auth cookie file - #[clap(env = "RPC_COOKIE", long)] - rpc_cookie: Option, - /// RPC auth username - #[clap(env = "RPC_USER", long)] - rpc_user: Option, - /// RPC auth password - #[clap(env = "RPC_PASS", long)] - rpc_password: Option, - /// Starting block height to fallback to if no point of agreement if found - #[clap(env = "FALLBACK_HEIGHT", long, default_value = "0")] - fallback_height: u32, -} - -impl From for Auth { - fn from(args: RpcArgs) -> Self { - match (args.rpc_cookie, args.rpc_user, args.rpc_password) { - (None, None, None) => Self::None, - (Some(path), _, _) => Self::CookieFile(path), - (_, Some(user), Some(pass)) => Self::UserPass(user, pass), - (_, Some(_), None) => panic!("rpc auth: missing rpc_pass"), - (_, None, Some(_)) => panic!("rpc auth: missing rpc_user"), - } - } -} - -impl RpcArgs { - fn new_client(&self) -> anyhow::Result { - Ok(Client::new( - &self.url, - match (&self.rpc_cookie, &self.rpc_user, &self.rpc_password) { - (None, None, None) => Auth::None, - (Some(path), _, _) => Auth::CookieFile(path.clone()), - (_, Some(user), Some(pass)) => Auth::UserPass(user.clone(), pass.clone()), - (_, Some(_), None) => panic!("rpc auth: missing rpc_pass"), - (_, None, Some(_)) => panic!("rpc auth: missing rpc_user"), - }, - )?) - } -} - -#[derive(Subcommand, Debug, Clone)] -enum RpcCommands { - /// Syncs local state with remote state via RPC (starting from last point of agreement) and - /// stores/indexes relevant transactions - Sync { - #[clap(flatten)] - rpc_args: RpcArgs, - }, - /// Sync by having the emitter logic in a separate thread - Live { - #[clap(flatten)] - rpc_args: RpcArgs, - }, -} - -fn main() -> anyhow::Result<()> { - let start = Instant::now(); - - let example_cli::Init { - args, - graph, - chain, - db, - network, - } = match example_cli::init_or_load::(DB_MAGIC, DB_PATH)? { - Some(init) => init, - None => return Ok(()), - }; - - let rpc_cmd = match args.command { - example_cli::Commands::ChainSpecific(rpc_cmd) => rpc_cmd, - general_cmd => { - return example_cli::handle_commands( - &graph, - &chain, - &db, - network, - |rpc_args, tx| { - let client = rpc_args.new_client()?; - client.send_raw_transaction(tx)?; - Ok(()) - }, - general_cmd, - ); - } - }; - - match rpc_cmd { - RpcCommands::Sync { rpc_args } => { - let RpcArgs { - fallback_height, .. - } = rpc_args; - - let rpc_client = rpc_args.new_client()?; - let mut emitter = { - let chain = chain.lock().unwrap(); - let graph = graph.lock().unwrap(); - Emitter::new( - &rpc_client, - chain.tip(), - fallback_height, - graph - .canonical_view( - &*chain, - chain.tip().block_id(), - CanonicalizationParams::default(), - ) - .txs() - .filter(|tx| tx.pos.is_unconfirmed()) - .map(|tx| tx.tx), - ) - }; - let mut db_stage = ChangeSet::default(); - - let mut last_db_commit = Instant::now(); - let mut last_print = Instant::now(); - - while let Some(emission) = emitter.next_block()? { - let height = emission.block_height(); - - let mut chain = chain.lock().unwrap(); - let mut graph = graph.lock().unwrap(); - - let chain_changeset = chain - .apply_update(emission.checkpoint) - .expect("must always apply as we receive blocks in order from emitter"); - let graph_changeset = graph.apply_block_relevant(&emission.block, height); - db_stage.merge(ChangeSet { - local_chain: chain_changeset, - tx_graph: graph_changeset.tx_graph, - indexer: graph_changeset.indexer, - ..Default::default() - }); - - // commit staged db changes in intervals - if last_db_commit.elapsed() >= DB_COMMIT_DELAY { - let db = &mut *db.lock().unwrap(); - last_db_commit = Instant::now(); - if let Some(changeset) = db_stage.take() { - db.append(&changeset)?; - } - println!( - "[{:>10}s] committed to db (took {}s)", - start.elapsed().as_secs_f32(), - last_db_commit.elapsed().as_secs_f32() - ); - } - - // print synced-to height and current balance in intervals - if last_print.elapsed() >= STDOUT_PRINT_DELAY { - last_print = Instant::now(); - let synced_to = chain.tip(); - let balance = { - graph - .canonical_view( - &*chain, - synced_to.block_id(), - CanonicalizationParams::default(), - ) - .balance( - graph.index.outpoints().iter().cloned(), - |(k, _), _| k == &Keychain::Internal, - 1, - ) - }; - println!( - "[{:>10}s] synced to {} @ {} | total: {}", - start.elapsed().as_secs_f32(), - synced_to.hash(), - synced_to.height(), - balance.total() - ); - } - } - - let mempool_txs = emitter.mempool()?; - let graph_changeset = graph - .lock() - .unwrap() - .batch_insert_relevant_unconfirmed(mempool_txs.update); - { - let db = &mut *db.lock().unwrap(); - db_stage.merge(ChangeSet { - tx_graph: graph_changeset.tx_graph, - indexer: graph_changeset.indexer, - ..Default::default() - }); - if let Some(changeset) = db_stage.take() { - db.append(&changeset)?; - } - } - } - RpcCommands::Live { rpc_args } => { - let RpcArgs { - fallback_height, .. - } = rpc_args; - let sigterm_flag = start_ctrlc_handler(); - - let rpc_client = Arc::new(rpc_args.new_client()?); - let mut emitter = { - let chain = chain.lock().unwrap(); - let graph = graph.lock().unwrap(); - Emitter::new( - rpc_client.clone(), - chain.tip(), - fallback_height, - graph - .canonical_view( - &*chain, - chain.tip().block_id(), - CanonicalizationParams::default(), - ) - .txs() - .filter(|tx| tx.pos.is_unconfirmed()) - .map(|tx| tx.tx), - ) - }; - - println!( - "[{:>10}s] starting emitter thread...", - start.elapsed().as_secs_f32() - ); - let (tx, rx) = std::sync::mpsc::sync_channel::(CHANNEL_BOUND); - let emission_jh = std::thread::spawn(move || -> anyhow::Result<()> { - let mut block_count = rpc_client.get_block_count()? as u32; - tx.send(Emission::Tip(block_count))?; - - loop { - match emitter.next_block()? { - Some(block_emission) => { - let height = block_emission.block_height(); - if sigterm_flag.load(Ordering::Acquire) { - break; - } - if height > block_count { - block_count = rpc_client.get_block_count()? as u32; - tx.send(Emission::Tip(block_count))?; - } - tx.send(Emission::Block(block_emission))?; - } - None => { - if await_flag(&sigterm_flag, MEMPOOL_EMIT_DELAY) { - break; - } - println!("preparing mempool emission..."); - let now = Instant::now(); - tx.send(Emission::Mempool(emitter.mempool()?))?; - println!("mempool emission prepared in {}s", now.elapsed().as_secs()); - continue; - } - }; - } - - println!("emitter thread shutting down..."); - Ok(()) - }); - - let mut tip_height = 0_u32; - let mut last_db_commit = Instant::now(); - let mut last_print = Option::::None; - let mut db_stage = ChangeSet::default(); - - for emission in rx { - let mut graph = graph.lock().unwrap(); - let mut chain = chain.lock().unwrap(); - - let (chain_changeset, graph_changeset) = match emission { - Emission::Block(block_emission) => { - let height = block_emission.block_height(); - let chain_changeset = chain - .apply_update(block_emission.checkpoint) - .expect("must always apply as we receive blocks in order from emitter"); - let graph_changeset = - graph.apply_block_relevant(&block_emission.block, height); - (chain_changeset, graph_changeset) - } - Emission::Mempool(mempool_txs) => { - let mut graph_changeset = - graph.batch_insert_relevant_unconfirmed(mempool_txs.update.clone()); - graph_changeset - .merge(graph.batch_insert_relevant_evicted_at(mempool_txs.evicted)); - (local_chain::ChangeSet::default(), graph_changeset) - } - Emission::Tip(h) => { - tip_height = h; - continue; - } - }; - - db_stage.merge(ChangeSet { - local_chain: chain_changeset, - tx_graph: graph_changeset.tx_graph, - indexer: graph_changeset.indexer, - ..Default::default() - }); - - if last_db_commit.elapsed() >= DB_COMMIT_DELAY { - let db = &mut *db.lock().unwrap(); - last_db_commit = Instant::now(); - if let Some(changeset) = db_stage.take() { - db.append(&changeset)?; - } - println!( - "[{:>10}s] committed to db (took {}s)", - start.elapsed().as_secs_f32(), - last_db_commit.elapsed().as_secs_f32() - ); - } - - if last_print.map_or(Duration::MAX, |i| i.elapsed()) >= STDOUT_PRINT_DELAY { - last_print = Some(Instant::now()); - let synced_to = chain.tip(); - let balance = { - graph - .canonical_view( - &*chain, - synced_to.block_id(), - CanonicalizationParams::default(), - ) - .balance( - graph.index.outpoints().iter().cloned(), - |(k, _), _| k == &Keychain::Internal, - 1, - ) - }; - println!( - "[{:>10}s] synced to {} @ {} / {} | total: {}", - start.elapsed().as_secs_f32(), - synced_to.hash(), - synced_to.height(), - tip_height, - balance.total() - ); - } - } - - emission_jh.join().expect("must join emitter thread")?; - } - } - - Ok(()) -} - -#[allow(dead_code)] -fn start_ctrlc_handler() -> Arc { - let flag = Arc::new(AtomicBool::new(false)); - let cloned_flag = flag.clone(); - - ctrlc::set_handler(move || cloned_flag.store(true, Ordering::Release)); - - flag -} - -#[allow(dead_code)] -fn await_flag(flag: &AtomicBool, duration: Duration) -> bool { - let start = Instant::now(); - loop { - if flag.load(Ordering::Acquire) { - return true; - } - if start.elapsed() >= duration { - return false; - } - std::thread::sleep(Duration::from_secs(1)); - } -} diff --git a/examples/example_cli/Cargo.toml b/examples/example_cli/Cargo.toml deleted file mode 100644 index 0a467db84..000000000 --- a/examples/example_cli/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "example_cli" -version = "0.2.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -bdk_chain = { path = "../../crates/chain", features = ["serde", "miniscript"]} -bdk_coin_select = "0.4" -bdk_file_store = { path = "../../crates/file_store" } -bitcoin = { version = "0.32.0", features = ["base64"], default-features = false } - -anyhow = "1" -clap = { version = "4.5.17", features = ["derive", "env"] } -rand = "0.8" -serde = { version = "1", features = ["derive"] } -serde_json = "1.0" diff --git a/examples/example_cli/src/lib.rs b/examples/example_cli/src/lib.rs deleted file mode 100644 index baa17e6d7..000000000 --- a/examples/example_cli/src/lib.rs +++ /dev/null @@ -1,965 +0,0 @@ -use bdk_chain::keychain_txout::DEFAULT_LOOKAHEAD; -use serde_json::json; -use std::cmp; -use std::collections::HashMap; -use std::env; -use std::fmt; -use std::str::FromStr; -use std::sync::Mutex; - -use anyhow::bail; -use anyhow::Context; -use bdk_chain::bitcoin::{ - absolute, address::NetworkUnchecked, bip32, consensus, constants, hex::DisplayHex, relative, - secp256k1::Secp256k1, transaction, Address, Amount, Network, NetworkKind, PrivateKey, Psbt, - PublicKey, Sequence, Transaction, TxIn, TxOut, -}; -use bdk_chain::miniscript::{ - descriptor::{DescriptorSecretKey, SinglePubKey}, - plan::{Assets, Plan}, - psbt::PsbtExt, - Descriptor, DescriptorPublicKey, ForEachKey, -}; -use bdk_chain::CanonicalizationParams; -use bdk_chain::ConfirmationBlockTime; -use bdk_chain::{ - indexer::keychain_txout::{self, KeychainTxOutIndex}, - local_chain::{self, LocalChain}, - tx_graph, ChainOracle, DescriptorExt, FullTxOut, IndexedTxGraph, Merge, -}; -use bdk_coin_select::{ - metrics::LowestFee, Candidate, ChangePolicy, CoinSelector, DrainWeights, FeeRate, Target, - TargetFee, TargetOutputs, -}; -use bdk_file_store::Store; -use clap::{Parser, Subcommand}; -use rand::prelude::*; - -pub use anyhow; -pub use clap; - -/// Alias for a `IndexedTxGraph` with specific `Anchor` and `Indexer`. -pub type KeychainTxGraph = IndexedTxGraph>; - -/// ChangeSet -#[derive(Default, Debug, Clone, PartialEq, serde::Deserialize, serde::Serialize)] -pub struct ChangeSet { - /// Descriptor for recipient addresses. - pub descriptor: Option>, - /// Descriptor for change addresses. - pub change_descriptor: Option>, - /// Stores the network type of the transaction data. - pub network: Option, - /// Changes to the [`LocalChain`]. - pub local_chain: local_chain::ChangeSet, - /// Changes to [`TxGraph`](tx_graph::TxGraph). - pub tx_graph: tx_graph::ChangeSet, - /// Changes to [`KeychainTxOutIndex`]. - pub indexer: keychain_txout::ChangeSet, -} - -#[derive(Parser)] -#[clap(author, version, about, long_about = None)] -#[clap(propagate_version = true)] -pub struct Args { - #[clap(subcommand)] - pub command: Commands, -} - -#[derive(Subcommand, Debug, Clone)] -pub enum Commands { - /// Initialize a new data store. - Init { - /// Network - #[clap(long, short, default_value = "signet")] - network: Network, - /// Descriptor - #[clap(env = "DESCRIPTOR")] - descriptor: String, - /// Change descriptor - #[clap(long, short, env = "CHANGE_DESCRIPTOR")] - change_descriptor: Option, - }, - #[clap(flatten)] - ChainSpecific(CS), - /// Address generation and inspection. - Address { - #[clap(subcommand)] - addr_cmd: AddressCmd, - }, - /// Get the wallet balance. - Balance, - /// TxOut related commands. - #[clap(name = "txout")] - TxOut { - #[clap(subcommand)] - txout_cmd: TxOutCmd, - }, - /// PSBT operations - Psbt { - #[clap(subcommand)] - psbt_cmd: PsbtCmd, - }, - /// Generate new BIP86 descriptors. - Generate { - /// Network - #[clap(long, short, default_value = "signet")] - network: Network, - }, -} - -#[derive(Subcommand, Debug, Clone)] -pub enum AddressCmd { - /// Get the next unused address. - Next, - /// Get a new address regardless of the existing unused addresses. - New, - /// List all addresses - List { - /// List change addresses - #[clap(long)] - change: bool, - }, - /// Get last revealed address index for each keychain. - Index, -} - -#[derive(Subcommand, Debug, Clone)] -pub enum TxOutCmd { - /// List transaction outputs. - List { - /// Return only spent outputs. - #[clap(short, long)] - spent: bool, - /// Return only unspent outputs. - #[clap(short, long)] - unspent: bool, - /// Return only confirmed outputs. - #[clap(long)] - confirmed: bool, - /// Return only unconfirmed outputs. - #[clap(long)] - unconfirmed: bool, - }, -} - -#[derive(Subcommand, Debug, Clone)] -pub enum PsbtCmd { - /// Create a new PSBT. - New { - /// Amount to send in satoshis - #[clap(required = true)] - value: u64, - /// Recipient address - #[clap(required = true)] - address: Address, - /// Set the feerate of the tx (sat/vbyte) - #[clap(long, short, default_value = "1.0")] - feerate: Option, - /// Set max absolute timelock (from consensus value) - #[clap(long, short)] - after: Option, - /// Set max relative timelock (from consensus value) - #[clap(long, short)] - older: Option, - /// Coin selection algorithm - #[clap(long, short, default_value = "bnb")] - coin_select: CoinSelectionAlgo, - /// Debug print the PSBT - #[clap(long, short)] - debug: bool, - }, - /// Sign with a hot signer - Sign { - /// Private descriptor [env: DESCRIPTOR=] - #[clap(long, short)] - descriptor: Option, - /// PSBT - #[clap(long, short, required = true)] - psbt: String, - }, - /// Extract transaction - Extract { - /// PSBT - #[clap(long, short, required = true)] - psbt: String, - /// Whether to try broadcasting the tx - #[clap(long, short)] - broadcast: bool, - #[clap(flatten)] - chain_specific: S, - }, -} - -#[derive( - Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, serde::Deserialize, serde::Serialize, -)] -pub enum Keychain { - External, - Internal, -} - -impl fmt::Display for Keychain { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Keychain::External => write!(f, "external"), - Keychain::Internal => write!(f, "internal"), - } - } -} - -#[derive(Clone, Debug, Default)] -pub enum CoinSelectionAlgo { - LargestFirst, - SmallestFirst, - OldestFirst, - NewestFirst, - #[default] - BranchAndBound, -} - -impl FromStr for CoinSelectionAlgo { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - use CoinSelectionAlgo::*; - Ok(match s { - "largest-first" => LargestFirst, - "smallest-first" => SmallestFirst, - "oldest-first" => OldestFirst, - "newest-first" => NewestFirst, - "bnb" => BranchAndBound, - unknown => bail!("unknown coin selection algorithm '{unknown}'"), - }) - } -} - -impl fmt::Display for CoinSelectionAlgo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - use CoinSelectionAlgo::*; - write!( - f, - "{}", - match self { - LargestFirst => "largest-first", - SmallestFirst => "smallest-first", - OldestFirst => "oldest-first", - NewestFirst => "newest-first", - BranchAndBound => "bnb", - } - ) - } -} - -// Records changes to the internal keychain when we -// have to include a change output during tx creation. -#[derive(Debug)] -pub struct ChangeInfo { - pub change_keychain: Keychain, - pub indexer: keychain_txout::ChangeSet, - pub index: u32, -} - -pub fn create_tx( - graph: &mut KeychainTxGraph, - chain: &O, - assets: &Assets, - cs_algorithm: CoinSelectionAlgo, - address: Address, - value: u64, - feerate: f32, -) -> anyhow::Result<(Psbt, Option)> -where - O::Error: std::error::Error + Send + Sync + 'static, -{ - let mut changeset = keychain_txout::ChangeSet::default(); - - // get planned utxos - let mut plan_utxos = planned_utxos(graph, chain, assets)?; - - // sort utxos if cs-algo requires it - match cs_algorithm { - CoinSelectionAlgo::LargestFirst => { - plan_utxos.sort_by_key(|(_, utxo)| cmp::Reverse(utxo.txout.value)) - } - CoinSelectionAlgo::SmallestFirst => plan_utxos.sort_by_key(|(_, utxo)| utxo.txout.value), - CoinSelectionAlgo::OldestFirst => plan_utxos.sort_by_key(|(_, utxo)| utxo.chain_position), - CoinSelectionAlgo::NewestFirst => { - plan_utxos.sort_by_key(|(_, utxo)| cmp::Reverse(utxo.chain_position)) - } - CoinSelectionAlgo::BranchAndBound => plan_utxos.shuffle(&mut thread_rng()), - } - - // build candidate set - let candidates: Vec = plan_utxos - .iter() - .map(|(plan, utxo)| { - Candidate::new( - utxo.txout.value.to_sat(), - plan.satisfaction_weight() as u64, - plan.witness_version().is_some(), - ) - }) - .collect(); - - // create recipient output(s) - let mut outputs = vec![TxOut { - value: Amount::from_sat(value), - script_pubkey: address.script_pubkey(), - }]; - - let (change_keychain, _) = graph - .index - .keychains() - .last() - .expect("must have a keychain"); - - let ((change_index, change_script), index_changeset) = graph - .index - .next_unused_spk(change_keychain) - .expect("Must exist"); - changeset.merge(index_changeset); - - let mut change_output = TxOut { - value: Amount::ZERO, - script_pubkey: change_script, - }; - - let change_desc = graph - .index - .keychains() - .find(|(k, _)| k == &change_keychain) - .expect("must exist") - .1; - - let min_drain_value = change_desc.dust_value().to_sat(); - - let target = Target { - outputs: TargetOutputs::fund_outputs( - outputs - .iter() - .map(|output| (output.weight().to_wu(), output.value.to_sat())), - ), - fee: TargetFee { - rate: FeeRate::from_sat_per_vb(feerate), - ..Default::default() - }, - }; - - let change_policy = ChangePolicy { - min_value: min_drain_value, - drain_weights: DrainWeights::TR_KEYSPEND, - }; - - // run coin selection - let mut selector = CoinSelector::new(&candidates); - match cs_algorithm { - CoinSelectionAlgo::BranchAndBound => { - let metric = LowestFee { - target, - long_term_feerate: FeeRate::from_sat_per_vb(10.0), - change_policy, - }; - match selector.run_bnb(metric, 10_000) { - Ok(_) => {} - Err(_) => selector - .select_until_target_met(target) - .context("selecting coins")?, - } - } - _ => selector - .select_until_target_met(target) - .context("selecting coins")?, - } - - // get the selected plan utxos - let selected: Vec<_> = selector.apply_selection(&plan_utxos).collect(); - - // if the selection tells us to use change and the change value is sufficient, we add it as an - // output - let mut change_info = Option::::None; - let drain = selector.drain(target, change_policy); - if drain.value > min_drain_value { - change_output.value = Amount::from_sat(drain.value); - outputs.push(change_output); - change_info = Some(ChangeInfo { - change_keychain, - indexer: changeset, - index: change_index, - }); - outputs.shuffle(&mut thread_rng()); - } - - let unsigned_tx = Transaction { - version: transaction::Version::TWO, - lock_time: assets - .absolute_timelock - .unwrap_or(absolute::LockTime::from_height( - chain.get_chain_tip()?.height, - )?), - input: selected - .iter() - .map(|(plan, utxo)| TxIn { - previous_output: utxo.outpoint, - sequence: plan - .relative_timelock - .map_or(Sequence::ENABLE_RBF_NO_LOCKTIME, Sequence::from), - ..Default::default() - }) - .collect(), - output: outputs, - }; - - // update psbt with plan - let mut psbt = Psbt::from_unsigned_tx(unsigned_tx)?; - for (i, (plan, utxo)) in selected.iter().enumerate() { - let psbt_input = &mut psbt.inputs[i]; - plan.update_psbt_input(psbt_input); - psbt_input.witness_utxo = Some(utxo.txout.clone()); - } - - Ok((psbt, change_info)) -} - -// Alias the elements of `planned_utxos` -pub type PlanUtxo = (Plan, FullTxOut); - -pub fn planned_utxos( - graph: &KeychainTxGraph, - chain: &O, - assets: &Assets, -) -> Result, O::Error> { - let chain_tip = chain.get_chain_tip()?; - let outpoints = graph.index.outpoints(); - graph - .try_canonical_view(chain, chain_tip, CanonicalizationParams::default())? - .filter_unspent_outpoints(outpoints.iter().cloned()) - .filter_map(|((k, i), full_txo)| -> Option> { - let desc = graph - .index - .keychains() - .find(|(keychain, _)| *keychain == k) - .expect("keychain must exist") - .1 - .at_derivation_index(i) - .expect("i can't be hardened"); - - let plan = desc.plan(assets).ok()?; - - Some(Ok((plan, full_txo))) - }) - .collect() -} - -pub fn handle_commands( - graph: &Mutex, - chain: &Mutex, - db: &Mutex>, - network: Network, - broadcast_fn: impl FnOnce(S, &Transaction) -> anyhow::Result<()>, - cmd: Commands, -) -> anyhow::Result<()> { - match cmd { - Commands::Init { .. } => unreachable!("handled by init command"), - Commands::Generate { .. } => unreachable!("handled by generate command"), - Commands::ChainSpecific(_) => unreachable!("example code should handle this!"), - Commands::Address { addr_cmd } => { - let graph = &mut *graph.lock().unwrap(); - let index = &mut graph.index; - - match addr_cmd { - AddressCmd::Next | AddressCmd::New => { - let spk_chooser = match addr_cmd { - AddressCmd::Next => KeychainTxOutIndex::next_unused_spk, - AddressCmd::New => KeychainTxOutIndex::reveal_next_spk, - _ => unreachable!("only these two variants exist in match arm"), - }; - - let ((spk_i, spk), index_changeset) = - spk_chooser(index, Keychain::External).expect("Must exist"); - let db = &mut *db.lock().unwrap(); - db.append(&ChangeSet { - indexer: index_changeset, - ..Default::default() - })?; - let addr = Address::from_script(spk.as_script(), network)?; - println!("[address @ {spk_i}] {addr}"); - Ok(()) - } - AddressCmd::Index => { - for (keychain, derivation_index) in index.last_revealed_indices() { - println!("{keychain:?}: {derivation_index}"); - } - Ok(()) - } - AddressCmd::List { change } => { - let target_keychain = match change { - true => Keychain::Internal, - false => Keychain::External, - }; - for (spk_i, spk) in index.revealed_keychain_spks(target_keychain) { - let address = Address::from_script(spk.as_script(), network) - .expect("should always be able to derive address"); - println!( - "{:?} {} used:{}", - spk_i, - address, - index.is_used(target_keychain, spk_i) - ); - } - Ok(()) - } - } - } - Commands::Balance => { - let graph = &*graph.lock().unwrap(); - let chain = &*chain.lock().unwrap(); - fn print_balances<'a>( - title_str: &'a str, - items: impl IntoIterator, - ) { - println!("{title_str}:"); - for (name, amount) in items.into_iter() { - println!(" {:<10} {:>12} sats", name, amount.to_sat()) - } - } - - let balance = graph - .try_canonical_view( - chain, - chain.get_chain_tip()?, - CanonicalizationParams::default(), - )? - .balance( - graph.index.outpoints().iter().cloned(), - |(k, _), _| k == &Keychain::Internal, - 1, - ); - - let confirmed_total = balance.confirmed + balance.immature; - let unconfirmed_total = balance.untrusted_pending + balance.trusted_pending; - - print_balances( - "confirmed", - [ - ("total", confirmed_total), - ("spendable", balance.confirmed), - ("immature", balance.immature), - ], - ); - print_balances( - "unconfirmed", - [ - ("total", unconfirmed_total), - ("trusted", balance.trusted_pending), - ("untrusted", balance.untrusted_pending), - ], - ); - - Ok(()) - } - Commands::TxOut { txout_cmd } => { - let graph = &*graph.lock().unwrap(); - let chain = &*chain.lock().unwrap(); - let chain_tip = chain.get_chain_tip()?; - let outpoints = graph.index.outpoints(); - - match txout_cmd { - TxOutCmd::List { - spent, - unspent, - confirmed, - unconfirmed, - } => { - let txouts = graph - .try_canonical_view(chain, chain_tip, CanonicalizationParams::default())? - .filter_outpoints(outpoints.iter().cloned()) - .filter(|(_, full_txo)| match (spent, unspent) { - (true, false) => full_txo.spent_by.is_some(), - (false, true) => full_txo.spent_by.is_none(), - _ => true, - }) - .filter(|(_, full_txo)| match (confirmed, unconfirmed) { - (true, false) => full_txo.chain_position.is_confirmed(), - (false, true) => !full_txo.chain_position.is_confirmed(), - _ => true, - }) - .collect::>(); - - for (spk_i, full_txo) in txouts { - let addr = Address::from_script(&full_txo.txout.script_pubkey, network)?; - println!( - "{:?} {} {} {} spent:{:?}", - spk_i, full_txo.txout.value, full_txo.outpoint, addr, full_txo.spent_by - ) - } - Ok(()) - } - } - } - Commands::Psbt { psbt_cmd } => match psbt_cmd { - PsbtCmd::New { - value, - address, - feerate, - after, - older, - coin_select, - debug, - } => { - let address = address.require_network(network)?; - - let (psbt, change_info) = { - let mut graph = graph.lock().unwrap(); - let chain = chain.lock().unwrap(); - - // collect assets we can sign for - let mut pks = vec![]; - for (_, desc) in graph.index.keychains() { - desc.for_each_key(|k| { - pks.push(k.clone()); - true - }); - } - let mut assets = Assets::new().add(pks); - if let Some(n) = after { - assets = assets.after(absolute::LockTime::from_consensus(n)); - } - if let Some(n) = older { - assets = assets.older(relative::LockTime::from_consensus(n)?); - } - - create_tx( - &mut graph, - &*chain, - &assets, - coin_select, - address, - value, - feerate.expect("must have feerate"), - )? - }; - - if let Some(ChangeInfo { - change_keychain, - indexer, - index, - }) = change_info - { - // We must first persist to disk the fact that we've got a new address from the - // change keychain so future scans will find the tx we're about to broadcast. - // If we're unable to persist this, then we don't want to broadcast. - { - let db = &mut *db.lock().unwrap(); - db.append(&ChangeSet { - indexer, - ..Default::default() - })?; - } - - // We don't want other callers/threads to use this address while we're using it - // but we also don't want to scan the tx we just created because it's not - // technically in the blockchain yet. - graph - .lock() - .unwrap() - .index - .mark_used(change_keychain, index); - } - - if debug { - dbg!(psbt); - } else { - // print base64 encoded psbt - let fee = psbt.fee()?.to_sat(); - let mut obj = serde_json::Map::new(); - obj.insert("psbt".to_string(), json!(psbt.to_string())); - obj.insert("fee".to_string(), json!(fee)); - println!("{}", serde_json::to_string_pretty(&obj)?); - }; - - Ok(()) - } - PsbtCmd::Sign { psbt, descriptor } => { - let mut psbt = Psbt::from_str(&psbt)?; - - let desc_str = match descriptor { - Some(s) => s, - None => env::var("DESCRIPTOR").context("unable to sign")?, - }; - - let secp = Secp256k1::new(); - let (_, keymap) = Descriptor::parse_descriptor(&secp, &desc_str)?; - if keymap.is_empty() { - bail!("unable to sign") - } - - // note: we're only looking at the first entry in the keymap - // the idea is to find something that impls `GetKey` - let sign_res = match keymap.iter().next().expect("not empty") { - (DescriptorPublicKey::Single(single_pub), DescriptorSecretKey::Single(prv)) => { - let pk = match single_pub.key { - SinglePubKey::FullKey(pk) => pk, - SinglePubKey::XOnly(_) => unimplemented!("single xonly pubkey"), - }; - let keys: HashMap = [(pk, prv.key)].into(); - psbt.sign(&keys, &secp) - } - (_, DescriptorSecretKey::XPrv(k)) => psbt.sign(&k.xkey, &secp), - _ => unimplemented!("multi xkey signer"), - }; - - let _ = - sign_res.map_err(|errors| anyhow::anyhow!("failed to sign PSBT {errors:?}"))?; - - let mut obj = serde_json::Map::new(); - obj.insert("psbt".to_string(), json!(psbt.to_string())); - println!("{}", serde_json::to_string_pretty(&obj)?); - - Ok(()) - } - PsbtCmd::Extract { - broadcast, - chain_specific, - psbt, - } => { - let mut psbt = Psbt::from_str(&psbt)?; - psbt.finalize_mut(&Secp256k1::new()) - .map_err(|errors| anyhow::anyhow!("failed to finalize PSBT {errors:?}"))?; - - let tx = psbt.extract_tx()?; - - if broadcast { - let mut graph = graph.lock().unwrap(); - - match broadcast_fn(chain_specific, &tx) { - Ok(_) => { - println!("Broadcasted Tx: {}", tx.compute_txid()); - - let changeset = graph.insert_tx(tx); - - // We know the tx is at least unconfirmed now. Note if persisting here - // fails, it's not a big deal since we can - // always find it again from the blockchain. - db.lock().unwrap().append(&ChangeSet { - tx_graph: changeset.tx_graph, - indexer: changeset.indexer, - ..Default::default() - })?; - } - Err(e) => { - // We failed to broadcast, so allow our change address to be used in the - // future - let (change_keychain, _) = graph - .index - .keychains() - .last() - .expect("must have a keychain"); - let change_index = tx.output.iter().find_map(|txout| { - let spk = txout.script_pubkey.as_script(); - match graph.index.index_of_spk(spk) { - Some(&(keychain, index)) if keychain == change_keychain => { - Some((keychain, index)) - } - _ => None, - } - }); - if let Some((keychain, index)) = change_index { - graph.index.unmark_used(keychain, index); - } - bail!(e); - } - } - } else { - // encode raw tx hex - let hex = consensus::serialize(&tx).to_lower_hex_string(); - let mut obj = serde_json::Map::new(); - obj.insert("tx".to_string(), json!(hex)); - println!("{}", serde_json::to_string_pretty(&obj)?); - } - - Ok(()) - } - }, - } -} - -/// The initial state returned by [`init_or_load`]. -pub struct Init { - /// CLI args - pub args: Args, - /// Indexed graph - pub graph: Mutex, - /// Local chain - pub chain: Mutex, - /// Database - pub db: Mutex>, - /// Network - pub network: Network, -} - -/// Loads from persistence or creates new -pub fn init_or_load( - db_magic: &[u8], - db_path: &str, -) -> anyhow::Result>> { - let args = Args::::parse(); - - match args.command { - // initialize new db - Commands::Init { .. } => initialize::(args, db_magic, db_path).map(|_| None), - // generate keys - Commands::Generate { network } => generate_bip86_helper(network).map(|_| None), - // try load - _ => { - let (mut db, changeset) = - Store::::load(db_magic, db_path).context("could not open file store")?; - - let changeset = changeset.expect("should not be empty"); - let network = changeset.network.expect("changeset network"); - - let chain = Mutex::new({ - let (mut chain, _) = - LocalChain::from_genesis(constants::genesis_block(network).block_hash()); - chain.apply_changeset(&changeset.local_chain)?; - chain - }); - - let (graph, changeset) = IndexedTxGraph::from_changeset( - (changeset.tx_graph, changeset.indexer).into(), - |c| -> anyhow::Result<_> { - let mut indexer = - KeychainTxOutIndex::from_changeset(DEFAULT_LOOKAHEAD, true, c); - if let Some(desc) = changeset.descriptor { - indexer.insert_descriptor(Keychain::External, desc)?; - } - if let Some(change_desc) = changeset.change_descriptor { - indexer.insert_descriptor(Keychain::Internal, change_desc)?; - } - Ok(indexer) - }, - )?; - db.append(&ChangeSet { - indexer: changeset.indexer, - tx_graph: changeset.tx_graph, - ..Default::default() - })?; - - let graph = Mutex::new(graph); - let db = Mutex::new(db); - - Ok(Some(Init { - args, - graph, - chain, - db, - network, - })) - } - } -} - -/// Initialize db backend. -fn initialize(args: Args, db_magic: &[u8], db_path: &str) -> anyhow::Result<()> -where - CS: clap::Subcommand, - S: clap::Args, -{ - if let Commands::Init { - network, - descriptor, - change_descriptor, - } = args.command - { - let mut changeset = ChangeSet::default(); - - // parse descriptors - let secp = Secp256k1::new(); - let mut index = KeychainTxOutIndex::default(); - let (descriptor, _) = - Descriptor::::parse_descriptor(&secp, &descriptor)?; - let _ = index.insert_descriptor(Keychain::External, descriptor.clone())?; - changeset.descriptor = Some(descriptor); - - if let Some(desc) = change_descriptor { - let (change_descriptor, _) = - Descriptor::::parse_descriptor(&secp, &desc)?; - let _ = index.insert_descriptor(Keychain::Internal, change_descriptor.clone())?; - changeset.change_descriptor = Some(change_descriptor); - } - - // create new - let (_, chain_changeset) = - LocalChain::from_genesis(constants::genesis_block(network).block_hash()); - changeset.network = Some(network); - changeset.local_chain = chain_changeset; - let mut db = Store::::create(db_magic, db_path)?; - db.append(&changeset)?; - println!("New database {db_path}"); - } - - Ok(()) -} - -/// Generate BIP86 descriptors. -fn generate_bip86_helper(network: impl Into) -> anyhow::Result<()> { - let secp = Secp256k1::new(); - let mut seed = [0x00; 32]; - thread_rng().fill_bytes(&mut seed); - - let m = bip32::Xpriv::new_master(network, &seed)?; - let fp = m.fingerprint(&secp); - let path = if m.network.is_mainnet() { - "86h/0h/0h" - } else { - "86h/1h/0h" - }; - - let descriptors: Vec = [0, 1] - .iter() - .map(|i| format!("tr([{fp}]{m}/{path}/{i}/*)")) - .collect(); - let external_desc = &descriptors[0]; - let internal_desc = &descriptors[1]; - let (descriptor, keymap) = - >::parse_descriptor(&secp, external_desc)?; - let (internal_descriptor, internal_keymap) = - >::parse_descriptor(&secp, internal_desc)?; - println!("Public"); - println!("{descriptor}"); - println!("{internal_descriptor}"); - println!("\nPrivate"); - println!("{}", descriptor.to_string_with_secret(&keymap)); - println!( - "{}", - internal_descriptor.to_string_with_secret(&internal_keymap) - ); - - Ok(()) -} - -impl Merge for ChangeSet { - fn merge(&mut self, other: Self) { - if other.descriptor.is_some() { - self.descriptor = other.descriptor; - } - if other.change_descriptor.is_some() { - self.change_descriptor = other.change_descriptor; - } - if other.network.is_some() { - self.network = other.network; - } - Merge::merge(&mut self.local_chain, other.local_chain); - Merge::merge(&mut self.tx_graph, other.tx_graph); - Merge::merge(&mut self.indexer, other.indexer); - } - - fn is_empty(&self) -> bool { - self.descriptor.is_none() - && self.change_descriptor.is_none() - && self.network.is_none() - && self.local_chain.is_empty() - && self.tx_graph.is_empty() - && self.indexer.is_empty() - } -} diff --git a/examples/example_electrum/Cargo.toml b/examples/example_electrum/Cargo.toml deleted file mode 100644 index 9dcd54000..000000000 --- a/examples/example_electrum/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "example_electrum" -version = "0.2.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -bdk_chain = { path = "../../crates/chain", features = ["serde"] } -bdk_electrum = { path = "../../crates/electrum" } -example_cli = { path = "../example_cli" } diff --git a/examples/example_electrum/src/main.rs b/examples/example_electrum/src/main.rs deleted file mode 100644 index aa89f07e1..000000000 --- a/examples/example_electrum/src/main.rs +++ /dev/null @@ -1,283 +0,0 @@ -use std::io::{self, Write}; - -use bdk_chain::{ - bitcoin::Network, - collections::BTreeSet, - indexed_tx_graph, - spk_client::{FullScanRequest, SyncRequest}, - CanonicalizationParams, ConfirmationBlockTime, Merge, -}; -use bdk_electrum::{ - electrum_client::{self, Client, ElectrumApi}, - BdkElectrumClient, -}; -use example_cli::{ - self, - anyhow::{self, Context}, - clap::{self, Parser, Subcommand}, - ChangeSet, Keychain, -}; - -const DB_MAGIC: &[u8] = b"bdk_example_electrum"; -const DB_PATH: &str = ".bdk_example_electrum.db"; - -#[derive(Subcommand, Debug, Clone)] -enum ElectrumCommands { - /// Scans the addresses in the wallet using the electrum API. - Scan { - /// When a gap this large has been found for a keychain, it will stop. - #[clap(long, default_value = "5")] - stop_gap: usize, - #[clap(flatten)] - scan_options: ScanOptions, - #[clap(flatten)] - electrum_args: ElectrumArgs, - }, - /// Scans particular addresses using the electrum API. - Sync { - /// Scan all the unused addresses. - #[clap(long)] - unused_spks: bool, - /// Scan every address that you have derived. - #[clap(long)] - all_spks: bool, - /// Scan unspent outpoints for spends or changes to confirmation status of residing tx. - #[clap(long)] - utxos: bool, - /// Scan unconfirmed transactions for updates. - #[clap(long)] - unconfirmed: bool, - #[clap(flatten)] - scan_options: ScanOptions, - #[clap(flatten)] - electrum_args: ElectrumArgs, - }, -} - -impl ElectrumCommands { - fn electrum_args(&self) -> ElectrumArgs { - match self { - ElectrumCommands::Scan { electrum_args, .. } => electrum_args.clone(), - ElectrumCommands::Sync { electrum_args, .. } => electrum_args.clone(), - } - } -} - -#[derive(clap::Args, Debug, Clone)] -pub struct ElectrumArgs { - /// The electrum url to use to connect to. If not provided it will use a default electrum - /// server for your chosen network. - electrum_url: Option, -} - -impl ElectrumArgs { - pub fn client(&self, network: Network) -> anyhow::Result { - let electrum_url = self.electrum_url.as_deref().unwrap_or(match network { - Network::Bitcoin => "ssl://electrum.blockstream.info:50002", - Network::Testnet => "ssl://electrum.blockstream.info:60002", - Network::Regtest => "tcp://localhost:60401", - Network::Signet => "tcp://signet-electrumx.wakiyamap.dev:50001", - _ => panic!("Unknown network"), - }); - let config = electrum_client::Config::builder() - .validate_domain(matches!(network, Network::Bitcoin)) - .build(); - - Ok(electrum_client::Client::from_config(electrum_url, config)?) - } -} - -#[derive(Parser, Debug, Clone, PartialEq)] -pub struct ScanOptions { - /// Set batch size for each script_history call to electrum client. - #[clap(long, default_value = "25")] - pub batch_size: usize, -} - -fn main() -> anyhow::Result<()> { - let example_cli::Init { - args, - graph, - chain, - db, - network, - } = match example_cli::init_or_load::(DB_MAGIC, DB_PATH)? { - Some(init) => init, - None => return Ok(()), - }; - - let electrum_cmd = match &args.command { - example_cli::Commands::ChainSpecific(electrum_cmd) => electrum_cmd, - general_cmd => { - return example_cli::handle_commands( - &graph, - &chain, - &db, - network, - |electrum_args, tx| { - let client = electrum_args.client(network)?; - client.transaction_broadcast(tx)?; - Ok(()) - }, - general_cmd.clone(), - ); - } - }; - - let client = BdkElectrumClient::new(electrum_cmd.electrum_args().client(network)?); - - // Tell the electrum client about the txs and anchors we've already got locally so it doesn't - // re-download .them - { - let graph = graph.lock().unwrap(); - client.populate_tx_cache(graph.graph().full_txs().map(|tx_node| tx_node.tx)); - client.populate_anchor_cache(graph.graph().all_anchors().clone()); - } - - let (chain_update, tx_update, keychain_update) = match electrum_cmd.clone() { - ElectrumCommands::Scan { - stop_gap, - scan_options, - .. - } => { - let request = { - let graph = &*graph.lock().unwrap(); - let chain = &*chain.lock().unwrap(); - - FullScanRequest::builder() - .chain_tip(chain.tip()) - .spks_for_keychain( - Keychain::External, - graph - .index - .unbounded_spk_iter(Keychain::External) - .into_iter() - .flatten(), - ) - .spks_for_keychain( - Keychain::Internal, - graph - .index - .unbounded_spk_iter(Keychain::Internal) - .into_iter() - .flatten(), - ) - .inspect({ - let mut once = BTreeSet::new(); - move |k, spk_i, _| { - if once.insert(k) { - eprint!("\nScanning {k}: {spk_i} "); - } else { - eprint!("{spk_i} "); - } - io::stdout().flush().expect("must flush"); - } - }) - }; - - let res = client - .full_scan::<_>(request, stop_gap, scan_options.batch_size, false) - .context("scanning the blockchain")?; - ( - res.chain_update, - res.tx_update, - Some(res.last_active_indices), - ) - } - ElectrumCommands::Sync { - mut unused_spks, - all_spks, - mut utxos, - mut unconfirmed, - scan_options, - .. - } => { - // Get a short lock on the tracker to get the spks we're interested in - let graph = graph.lock().unwrap(); - let chain = chain.lock().unwrap(); - - if !(all_spks || unused_spks || utxos || unconfirmed) { - unused_spks = true; - unconfirmed = true; - utxos = true; - } else if all_spks { - unused_spks = false; - } - - let chain_tip = chain.tip(); - let mut request = - SyncRequest::builder() - .chain_tip(chain_tip.clone()) - .inspect(|item, progress| { - let pc = (100 * progress.consumed()) as f32 / progress.total() as f32; - eprintln!("[ SCANNING {pc:03.0}% ] {item}"); - }); - - let canonical_view = graph.canonical_view( - &*chain, - chain_tip.block_id(), - CanonicalizationParams::default(), - ); - - request = request - .expected_spk_txids(canonical_view.list_expected_spk_txids(&graph.index, ..)); - if all_spks { - request = request.spks_with_indexes(graph.index.revealed_spks(..)); - } - if unused_spks { - request = request.spks_with_indexes(graph.index.unused_spks()); - } - if utxos { - let init_outpoints = graph.index.outpoints(); - request = request.outpoints( - canonical_view - .filter_unspent_outpoints(init_outpoints.iter().cloned()) - .map(|(_, utxo)| utxo.outpoint), - ); - }; - if unconfirmed { - request = request.txids( - canonical_view - .txs() - .filter(|canonical_tx| !canonical_tx.pos.is_confirmed()) - .map(|canonical_tx| canonical_tx.txid), - ); - } - - let res = client - .sync(request, scan_options.batch_size, false) - .context("scanning the blockchain")?; - - // drop lock on graph and chain - drop((graph, chain)); - - (res.chain_update, res.tx_update, None) - } - }; - - let db_changeset = { - let mut chain = chain.lock().unwrap(); - let mut graph = graph.lock().unwrap(); - - let chain_changeset = chain.apply_update(chain_update.expect("request has chain tip"))?; - - let mut indexed_tx_graph_changeset = - indexed_tx_graph::ChangeSet::::default(); - if let Some(keychain_update) = keychain_update { - let keychain_changeset = graph.index.reveal_to_target_multi(&keychain_update); - indexed_tx_graph_changeset.merge(keychain_changeset.into()); - } - indexed_tx_graph_changeset.merge(graph.apply_update(tx_update)); - - ChangeSet { - local_chain: chain_changeset, - tx_graph: indexed_tx_graph_changeset.tx_graph, - indexer: indexed_tx_graph_changeset.indexer, - ..Default::default() - } - }; - - let mut db = db.lock().unwrap(); - db.append(&db_changeset)?; - Ok(()) -} diff --git a/examples/example_esplora/Cargo.toml b/examples/example_esplora/Cargo.toml deleted file mode 100644 index ccad862e9..000000000 --- a/examples/example_esplora/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "example_esplora" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -bdk_chain = { path = "../../crates/chain", features = ["serde"] } -bdk_esplora = { path = "../../crates/esplora", features = ["blocking"] } -example_cli = { path = "../example_cli" } - diff --git a/examples/example_esplora/src/main.rs b/examples/example_esplora/src/main.rs deleted file mode 100644 index 99f72391c..000000000 --- a/examples/example_esplora/src/main.rs +++ /dev/null @@ -1,289 +0,0 @@ -use core::f32; -use std::{ - collections::BTreeSet, - io::{self, Write}, -}; - -use bdk_chain::{ - bitcoin::Network, - keychain_txout::FullScanRequestBuilderExt, - spk_client::{FullScanRequest, SyncRequest}, - CanonicalizationParams, Merge, -}; -use bdk_esplora::{esplora_client, EsploraExt}; -use example_cli::{ - anyhow::{self, Context}, - clap::{self, Parser, Subcommand}, - ChangeSet, Keychain, -}; - -const DB_MAGIC: &[u8] = b"bdk_example_esplora"; -const DB_PATH: &str = ".bdk_example_esplora.db"; - -#[derive(Subcommand, Debug, Clone)] -enum EsploraCommands { - /// Scans the addresses in the wallet using the esplora API. - Scan { - /// When a gap this large has been found for a keychain, it will stop. - #[clap(long, short = 'g', default_value = "10")] - stop_gap: usize, - #[clap(flatten)] - scan_options: ScanOptions, - #[clap(flatten)] - esplora_args: EsploraArgs, - }, - /// Scan for particular addresses and unconfirmed transactions using the esplora API. - Sync { - /// Scan all the unused addresses. - #[clap(long)] - unused_spks: bool, - /// Scan every address that you have derived. - #[clap(long)] - all_spks: bool, - /// Scan unspent outpoints for spends or changes to confirmation status of residing tx. - #[clap(long)] - utxos: bool, - /// Scan unconfirmed transactions for updates. - #[clap(long)] - unconfirmed: bool, - #[clap(flatten)] - scan_options: ScanOptions, - #[clap(flatten)] - esplora_args: EsploraArgs, - }, -} - -impl EsploraCommands { - fn esplora_args(&self) -> EsploraArgs { - match self { - EsploraCommands::Scan { esplora_args, .. } => esplora_args.clone(), - EsploraCommands::Sync { esplora_args, .. } => esplora_args.clone(), - } - } -} - -#[derive(clap::Args, Debug, Clone)] -pub struct EsploraArgs { - /// The esplora url endpoint to connect to. - #[clap(long, short = 'u', env = "ESPLORA_SERVER")] - esplora_url: Option, -} - -impl EsploraArgs { - pub fn client(&self, network: Network) -> anyhow::Result { - let esplora_url = self.esplora_url.as_deref().unwrap_or(match network { - Network::Bitcoin => "https://blockstream.info/api", - Network::Testnet => "https://blockstream.info/testnet/api", - Network::Regtest => "http://localhost:3002", - Network::Signet => "http://signet.bitcoindevkit.net", - _ => panic!("unsupported network"), - }); - - let client = esplora_client::Builder::new(esplora_url).build_blocking(); - Ok(client) - } -} - -#[derive(Parser, Debug, Clone, PartialEq)] -pub struct ScanOptions { - /// Max number of concurrent esplora server requests. - #[clap(long, default_value = "2")] - pub parallel_requests: usize, -} - -fn main() -> anyhow::Result<()> { - let example_cli::Init { - args, - graph, - chain, - db, - network, - } = match example_cli::init_or_load::(DB_MAGIC, DB_PATH)? { - Some(init) => init, - None => return Ok(()), - }; - - let esplora_cmd = match &args.command { - // These are commands that are handled by this example (sync, scan). - example_cli::Commands::ChainSpecific(esplora_cmd) => esplora_cmd, - // These are general commands handled by example_cli. Execute the cmd and return. - general_cmd => { - return example_cli::handle_commands( - &graph, - &chain, - &db, - network, - |esplora_args, tx| { - let client = esplora_args.client(network)?; - client - .broadcast(tx) - .map(|_| ()) - .map_err(anyhow::Error::from) - }, - general_cmd.clone(), - ); - } - }; - - let client = esplora_cmd.esplora_args().client(network)?; - // Prepare the `IndexedTxGraph` and `LocalChain` updates based on whether we are scanning or - // syncing. - // - // Scanning: We are iterating through spks of all keychains and scanning for transactions for - // each spk. We start with the lowest derivation index spk and stop scanning after `stop_gap` - // number of consecutive spks have no transaction history. A Scan is done in situations of - // wallet restoration. It is a special case. Applications should use "sync" style updates - // after an initial scan. - // - // Syncing: We only check for specified spks, utxos and txids to update their confirmation - // status or fetch missing transactions. - let (local_chain_changeset, indexed_tx_graph_changeset) = match &esplora_cmd { - EsploraCommands::Scan { - stop_gap, - scan_options, - .. - } => { - let request = { - let chain_tip = chain.lock().expect("mutex must not be poisoned").tip(); - let indexed_graph = &*graph.lock().expect("mutex must not be poisoned"); - FullScanRequest::builder() - .chain_tip(chain_tip) - .spks_from_indexer(&indexed_graph.index) - .inspect({ - let mut once = BTreeSet::::new(); - move |keychain, spk_i, _| { - if once.insert(keychain) { - eprint!("\nscanning {keychain}: "); - } - eprint!("{spk_i} "); - // Flush early to ensure we print at every iteration. - let _ = io::stderr().flush(); - } - }) - .build() - }; - - // The client scans keychain spks for transaction histories, stopping after `stop_gap` - // is reached. It returns a `TxGraph` update (`tx_update`) and a structure that - // represents the last active spk derivation indices of keychains - // (`keychain_indices_update`). - let update = client - .full_scan(request, *stop_gap, scan_options.parallel_requests) - .context("scanning for transactions")?; - - let mut graph = graph.lock().expect("mutex must not be poisoned"); - let mut chain = chain.lock().expect("mutex must not be poisoned"); - // Because we did a stop gap based scan we are likely to have some updates to our - // deriviation indices. Usually before a scan you are on a fresh wallet with no - // addresses derived so we need to derive up to last active addresses the scan found - // before adding the transactions. - ( - chain.apply_update(update.chain_update.expect("request included chain tip"))?, - { - let index_changeset = graph - .index - .reveal_to_target_multi(&update.last_active_indices); - let mut indexed_tx_graph_changeset = graph.apply_update(update.tx_update); - indexed_tx_graph_changeset.merge(index_changeset.into()); - indexed_tx_graph_changeset - }, - ) - } - EsploraCommands::Sync { - mut unused_spks, - all_spks, - mut utxos, - mut unconfirmed, - scan_options, - .. - } => { - if !(*all_spks || unused_spks || utxos || unconfirmed) { - // If nothing is specifically selected, we select everything (except all spks). - unused_spks = true; - unconfirmed = true; - utxos = true; - } else if *all_spks { - // If all spks is selected, we don't need to also select unused spks (as unused spks - // is a subset of all spks). - unused_spks = false; - } - - let local_tip = chain.lock().expect("mutex must not be poisoned").tip(); - // Spks, outpoints and txids we want updates on will be accumulated here. - let mut request = - SyncRequest::builder() - .chain_tip(local_tip.clone()) - .inspect(|item, progress| { - let pc = (100 * progress.consumed()) as f32 / progress.total() as f32; - eprintln!("[ SCANNING {pc:03.0}% ] {item}"); - // Flush early to ensure we print at every iteration. - let _ = io::stderr().flush(); - }); - - // Get a short lock on the structures to get spks, utxos, and txs that we are interested - // in. - { - let graph = graph.lock().unwrap(); - let chain = chain.lock().unwrap(); - let canonical_view = graph.canonical_view( - &*chain, - local_tip.block_id(), - CanonicalizationParams::default(), - ); - - request = request - .expected_spk_txids(canonical_view.list_expected_spk_txids(&graph.index, ..)); - if *all_spks { - request = request.spks_with_indexes(graph.index.revealed_spks(..)); - } - if unused_spks { - request = request.spks_with_indexes(graph.index.unused_spks()); - } - if utxos { - // We want to search for whether the UTXO is spent, and spent by which - // transaction. We provide the outpoint of the UTXO to - // `EsploraExt::update_tx_graph_without_keychain`. - let init_outpoints = graph.index.outpoints(); - request = request.outpoints( - canonical_view - .filter_unspent_outpoints(init_outpoints.iter().cloned()) - .map(|(_, utxo)| utxo.outpoint), - ); - }; - if unconfirmed { - // We want to search for whether the unconfirmed transaction is now confirmed. - // We provide the unconfirmed txids to - // `EsploraExt::update_tx_graph_without_keychain`. - request = request.txids( - canonical_view - .txs() - .filter(|canonical_tx| !canonical_tx.pos.is_confirmed()) - .map(|canonical_tx| canonical_tx.txid), - ); - } - } - - let update = client.sync(request, scan_options.parallel_requests)?; - - ( - chain - .lock() - .unwrap() - .apply_update(update.chain_update.expect("request has chain tip"))?, - graph.lock().unwrap().apply_update(update.tx_update), - ) - } - }; - - println!(); - - // We persist the changes - let mut db = db.lock().unwrap(); - db.append(&ChangeSet { - local_chain: local_chain_changeset, - tx_graph: indexed_tx_graph_changeset.tx_graph, - indexer: indexed_tx_graph_changeset.indexer, - ..Default::default() - })?; - Ok(()) -}