From 9f143a9742dc226b1a76d6a4cc275039b8385115 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 30 Jul 2023 16:11:30 -0400 Subject: [PATCH] Replace "coin" with "network" The Processor's coins folder referred to the networks it could process, as did its Coin trait. This, and other similar cases throughout the codebase, have now been corrected. Also corrects dated documentation for a key pair is confirmed under the validator-sets pallet. --- coordinator/src/main.rs | 4 +- coordinator/src/substrate/mod.rs | 6 +- docs/integrations/Instructions.md | 2 +- docs/protocol/Constants.md | 1 + docs/protocol/Validator Sets.md | 29 ++- processor/messages/src/lib.rs | 8 +- processor/src/additional_key.rs | 8 +- processor/src/db.rs | 12 +- processor/src/key_gen.rs | 91 ++++----- processor/src/lib.rs | 2 +- processor/src/main.rs | 173 +++++++++--------- processor/src/{coins => networks}/bitcoin.rs | 49 ++--- processor/src/{coins => networks}/mod.rs | 56 +++--- processor/src/{coins => networks}/monero.rs | 57 +++--- processor/src/plan.rs | 34 ++-- processor/src/scanner.rs | 147 +++++++-------- processor/src/scheduler.rs | 58 +++--- processor/src/signer.rs | 82 ++++----- processor/src/tests/addresses.rs | 46 ++--- processor/src/tests/key_gen.rs | 23 ++- processor/src/tests/literal/mod.rs | 8 +- processor/src/tests/mod.rs | 16 +- processor/src/tests/scanner.rs | 22 +-- processor/src/tests/signer.rs | 45 ++--- processor/src/tests/wallet.rs | 46 ++--- substrate/client/Cargo.toml | 6 +- substrate/client/src/lib.rs | 4 +- .../client/src/{coins => networks}/bitcoin.rs | 0 .../client/src/{coins => networks}/mod.rs | 0 .../client/src/{coins => networks}/monero.rs | 0 substrate/client/src/tests/mod.rs | 4 +- .../src/tests/{coins => networks}/bitcoin.rs | 0 .../src/tests/{coins => networks}/mod.rs | 0 .../src/tests/{coins => networks}/monero.rs | 0 substrate/primitives/src/lib.rs | 4 +- .../primitives/src/{coins.rs => networks.rs} | 0 tests/processor/src/networks.rs | 20 +- tests/processor/src/tests/batch.rs | 2 +- tests/processor/src/tests/key_gen.rs | 14 +- tests/processor/src/tests/send.rs | 4 +- 40 files changed, 551 insertions(+), 532 deletions(-) rename processor/src/{coins => networks}/bitcoin.rs (92%) rename processor/src/{coins => networks}/mod.rs (89%) rename processor/src/{coins => networks}/monero.rs (92%) rename substrate/client/src/{coins => networks}/bitcoin.rs (100%) rename substrate/client/src/{coins => networks}/mod.rs (100%) rename substrate/client/src/{coins => networks}/monero.rs (100%) rename substrate/client/src/tests/{coins => networks}/bitcoin.rs (100%) rename substrate/client/src/tests/{coins => networks}/mod.rs (100%) rename substrate/client/src/tests/{coins => networks}/monero.rs (100%) rename substrate/primitives/src/{coins.rs => networks.rs} (100%) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index a0d09cb1..c4f3643d 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -398,7 +398,7 @@ pub async fn handle_processors( key_gen::ProcessorMessage::Shares { id, shares } => { Some(Transaction::DkgShares(id.attempt, shares, Transaction::empty_signed())) } - key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, coin_key } => { + key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => { assert_eq!( id.set.network, msg.network, "processor claimed to be a different network than it was for GeneratedKeyPair", @@ -411,7 +411,7 @@ pub async fn handle_processors( id.set.network, ( Public(substrate_key), - coin_key + network_key .try_into() .expect("external key from processor exceeded max external key length"), ), diff --git a/coordinator/src/substrate/mod.rs b/coordinator/src/substrate/mod.rs index db00eff6..7b4fa078 100644 --- a/coordinator/src/substrate/mod.rs +++ b/coordinator/src/substrate/mod.rs @@ -102,7 +102,7 @@ async fn handle_key_gen( processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair { context: SubstrateContext { serai_time: block.time().unwrap(), - coin_latest_finalized_block: serai + network_latest_finalized_block: serai .get_latest_block_for_network(block.hash(), set.network) .await? // The processor treats this as a magic value which will cause it to find a network @@ -176,7 +176,7 @@ async fn handle_batch_and_burns( assert_eq!(HashSet::<&_>::from_iter(networks_with_event.iter()).len(), networks_with_event.len()); for network in networks_with_event { - let coin_latest_finalized_block = if let Some(block) = batch_block.remove(&network) { + let network_latest_finalized_block = if let Some(block) = batch_block.remove(&network) { block } else { // If it's had a batch or a burn, it must have had a block acknowledged @@ -194,7 +194,7 @@ async fn handle_batch_and_burns( processor_messages::substrate::CoordinatorMessage::SubstrateBlock { context: SubstrateContext { serai_time: block.time().unwrap(), - coin_latest_finalized_block, + network_latest_finalized_block, }, network, block: block.number(), diff --git a/docs/integrations/Instructions.md b/docs/integrations/Instructions.md index 43516214..f509ca71 100644 --- a/docs/integrations/Instructions.md +++ b/docs/integrations/Instructions.md @@ -8,7 +8,7 @@ encoded into transactions on connected networks. Serai will parse included instructions when it receives coins, executing the included specs. - Out Instructions detail how to transfer coins, either to a Serai address or -an address native to the coin in question. +an address native to the network of the coins in question. A transaction containing an In Instruction and an Out Instruction (to a native address) will receive coins to Serai and send coins from Serai, without diff --git a/docs/protocol/Constants.md b/docs/protocol/Constants.md index dacfae1d..88f73093 100644 --- a/docs/protocol/Constants.md +++ b/docs/protocol/Constants.md @@ -15,6 +15,7 @@ protocol. | Session | u32 | | Validator Set | (Session, NetworkId) | | Key | BoundedVec\ | +| KeyPair | (SeraiAddress, Key) | | ExternalAddress | BoundedVec\ | | Data | BoundedVec\ | diff --git a/docs/protocol/Validator Sets.md b/docs/protocol/Validator Sets.md index 1cb5b460..582fb3c0 100644 --- a/docs/protocol/Validator Sets.md +++ b/docs/protocol/Validator Sets.md @@ -30,19 +30,16 @@ reject newly added coins which would cross that threshold. Multisigs are created by processors, communicating via their Coordinators. They're then confirmed on chain via the `validator-sets` pallet. This is done by having 100% of participants agree on the resulting group key. While this isn't -fault tolerant, a malicious actor who forces a `t`-of-`n` multisig to be -`t`-of-`n-1` reduces the fault tolerance of the multisig which is a greater -issue. If a node does prevent multisig creation, other validators should issue -slashes for it/remove it from the Validator Set entirely. +fault tolerant regarding liveliness, a malicious actor who forces a `t`-of-`n` +multisig to be `t`-of-`n-1` reduces the fault tolerance of the created multisig +which is a greater issue. If a node does prevent multisig creation, other +validators should issue slashes for it/remove it from the Validator Set +entirely. -Due to the fact multiple key generations may occur to account for -faulty/malicious nodes, voting on multiple keys for a single coin is allowed, -with the first key to be confirmed becoming the key for that coin. - -Placing it on chain also solves the question of if the multisig was successfully -created or not. Processors cannot simply ask each other if they succeeded -without creating an instance of the Byzantine Generals Problem. Placing results -within a Byzantine Fault Tolerant system resolves this. +Placing the creation on chain also solves the question of if the multisig was +successfully created or not. Processors cannot simply ask each other if they +succeeded without creating an instance of the Byzantine Generals Problem. +Placing results within a Byzantine Fault Tolerant system resolves this. ### Multisig Lifetime @@ -61,9 +58,11 @@ no longer eligible to receive coins and they forward all of their coins to the new set of keys. It is only then that validators in the previous instance of the set, yet not the current instance, may unbond their stake. -### Vote (message) +### Set Keys (message) - - `coin` (Coin): Coin whose key is being voted for. - - `key` (Key): Key being voted on. + - `network` (Network): Network whose key is being voted for. + - `key_pair` (KeyPair): Key pair being set for this `Session`. + - `signature` (Signature): A MuSig-style signature of all validators, +confirming this key. Once a key is voted on by every member, it's adopted as detailed above. diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index ea1cadbd..d47ae233 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -14,7 +14,7 @@ use validator_sets_primitives::{ValidatorSet, KeyPair}; #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, Serialize, Deserialize)] pub struct SubstrateContext { pub serai_time: u64, - pub coin_latest_finalized_block: BlockHash, + pub network_latest_finalized_block: BlockHash, } pub mod key_gen { @@ -50,7 +50,7 @@ pub mod key_gen { // Created shares for the specified key generation protocol. Shares { id: KeyGenId, shares: HashMap> }, // Resulting keys from the specified key generation protocol. - GeneratedKeyPair { id: KeyGenId, substrate_key: [u8; 32], coin_key: Vec }, + GeneratedKeyPair { id: KeyGenId, substrate_key: [u8; 32], network_key: Vec }, } } @@ -165,7 +165,7 @@ pub mod substrate { CoordinatorMessage::ConfirmKeyPair { context, .. } => context, CoordinatorMessage::SubstrateBlock { context, .. } => context, }; - Some(context.coin_latest_finalized_block) + Some(context.network_latest_finalized_block) } } @@ -263,7 +263,7 @@ impl CoordinatorMessage { } CoordinatorMessage::Sign(msg) => { let (sub, id) = match msg { - // Unique since SignId includes a hash of the coin, and specific transaction info + // Unique since SignId includes a hash of the network, and specific transaction info sign::CoordinatorMessage::Preprocesses { id, .. } => (0, bincode::serialize(id).unwrap()), sign::CoordinatorMessage::Shares { id, .. } => (1, bincode::serialize(id).unwrap()), sign::CoordinatorMessage::Reattempt { id } => (2, bincode::serialize(id).unwrap()), diff --git a/processor/src/additional_key.rs b/processor/src/additional_key.rs index 0c492581..f875950d 100644 --- a/processor/src/additional_key.rs +++ b/processor/src/additional_key.rs @@ -1,14 +1,14 @@ use ciphersuite::Ciphersuite; -use crate::coins::Coin; +use crate::networks::Network; // Generate a static additional key for a given chain in a globally consistent manner // Doesn't consider the current group key to increase the simplicity of verifying Serai's status // Takes an index, k, to support protocols which use multiple secondary keys // Presumably a view key -pub fn additional_key(k: u64) -> ::F { - ::hash_to_F( +pub fn additional_key(k: u64) -> ::F { + ::hash_to_F( b"Serai DEX Additional Key", - &[C::ID.as_bytes(), &k.to_le_bytes()].concat(), + &[N::ID.as_bytes(), &k.to_le_bytes()].concat(), ) } diff --git a/processor/src/db.rs b/processor/src/db.rs index d9803b77..916c35dd 100644 --- a/processor/src/db.rs +++ b/processor/src/db.rs @@ -2,11 +2,11 @@ use core::marker::PhantomData; pub use serai_db::*; -use crate::{Plan, coins::Coin}; +use crate::{Plan, networks::Network}; #[derive(Debug)] -pub struct MainDb(D, PhantomData); -impl MainDb { +pub struct MainDb(D, PhantomData); +impl MainDb { pub fn new(db: D) -> Self { Self(db, PhantomData) } @@ -31,7 +31,7 @@ impl MainDb { fn signing_key(key: &[u8]) -> Vec { Self::main_key(b"signing", key) } - pub fn save_signing(txn: &mut D::Transaction<'_>, key: &[u8], block_number: u64, plan: &Plan) { + pub fn save_signing(txn: &mut D::Transaction<'_>, key: &[u8], block_number: u64, plan: &Plan) { let id = plan.id(); { @@ -56,7 +56,7 @@ impl MainDb { } } - pub fn signing(&self, key: &[u8]) -> Vec<(u64, Plan)> { + pub fn signing(&self, key: &[u8]) -> Vec<(u64, Plan)> { let signing = self.0.get(Self::signing_key(key)).unwrap_or(vec![]); let mut res = vec![]; @@ -66,7 +66,7 @@ impl MainDb { let buf = self.0.get(Self::plan_key(id)).unwrap(); let block_number = u64::from_le_bytes(buf[.. 8].try_into().unwrap()); - let plan = Plan::::read::<&[u8]>(&mut &buf[16 ..]).unwrap(); + let plan = Plan::::read::<&[u8]>(&mut &buf[16 ..]).unwrap(); assert_eq!(id, &plan.id()); res.push((block_number, plan)); } diff --git a/processor/src/key_gen.rs b/processor/src/key_gen.rs index 9b015d78..6901df08 100644 --- a/processor/src/key_gen.rs +++ b/processor/src/key_gen.rs @@ -18,17 +18,17 @@ use log::info; use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair}; use messages::key_gen::*; -use crate::{Get, DbTxn, Db, coins::Coin}; +use crate::{Get, DbTxn, Db, networks::Network}; #[derive(Debug)] pub struct KeyConfirmed { pub substrate_keys: ThresholdKeys, - pub coin_keys: ThresholdKeys, + pub network_keys: ThresholdKeys, } #[derive(Clone, Debug)] -struct KeyGenDb(PhantomData, PhantomData); -impl KeyGenDb { +struct KeyGenDb(PhantomData, PhantomData); +impl KeyGenDb { fn key_gen_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { D::key(b"KEY_GEN", dst, key) } @@ -71,39 +71,42 @@ impl KeyGenDb { txn: &mut D::Transaction<'_>, id: &KeyGenId, substrate_keys: &ThresholdCore, - coin_keys: &ThresholdKeys, + network_keys: &ThresholdKeys, ) { let mut keys = substrate_keys.serialize(); - keys.extend(coin_keys.serialize().iter()); + keys.extend(network_keys.serialize().iter()); txn.put( Self::generated_keys_key( id.set, - (substrate_keys.group_key().to_bytes().as_ref(), coin_keys.group_key().to_bytes().as_ref()), + ( + substrate_keys.group_key().to_bytes().as_ref(), + network_keys.group_key().to_bytes().as_ref(), + ), ), keys, ); } - fn keys_key(key: &::G) -> Vec { + fn keys_key(key: &::G) -> Vec { Self::key_gen_key(b"keys", key.to_bytes()) } #[allow(clippy::type_complexity)] fn read_keys( getter: &G, key: &[u8], - ) -> (Vec, (ThresholdKeys, ThresholdKeys)) { + ) -> (Vec, (ThresholdKeys, ThresholdKeys)) { let keys_vec = getter.get(key).unwrap(); let mut keys_ref: &[u8] = keys_vec.as_ref(); let substrate_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap()); - let mut coin_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap()); - C::tweak_keys(&mut coin_keys); - (keys_vec, (substrate_keys, coin_keys)) + let mut network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap()); + N::tweak_keys(&mut network_keys); + (keys_vec, (substrate_keys, network_keys)) } fn confirm_keys( txn: &mut D::Transaction<'_>, set: ValidatorSet, key_pair: KeyPair, - ) -> (ThresholdKeys, ThresholdKeys) { + ) -> (ThresholdKeys, ThresholdKeys) { let (keys_vec, keys) = Self::read_keys( txn, &Self::generated_keys_key(set, (key_pair.0.as_ref(), key_pair.1.as_ref())), @@ -111,8 +114,8 @@ impl KeyGenDb { assert_eq!(key_pair.0 .0, keys.0.group_key().to_bytes()); assert_eq!( { - let coin_key: &[u8] = key_pair.1.as_ref(); - coin_key + let network_key: &[u8] = key_pair.1.as_ref(); + network_key }, keys.1.group_key().to_bytes().as_ref(), ); @@ -121,8 +124,8 @@ impl KeyGenDb { } fn keys( getter: &G, - key: &::G, - ) -> (ThresholdKeys, ThresholdKeys) { + key: &::G, + ) -> (ThresholdKeys, ThresholdKeys) { let res = Self::read_keys(getter, &Self::keys_key(key)).1; assert_eq!(&res.1.group_key(), key); res @@ -133,32 +136,32 @@ impl KeyGenDb { /// 1) It either didn't send its response, so the attempt will be aborted /// 2) It did send its response, and has locally saved enough data to continue #[derive(Debug)] -pub struct KeyGen { +pub struct KeyGen { db: D, entropy: Zeroizing<[u8; 32]>, active_commit: - HashMap, SecretShareMachine)>, - active_share: HashMap, KeyMachine)>, + HashMap, SecretShareMachine)>, + active_share: HashMap, KeyMachine)>, } -impl KeyGen { +impl KeyGen { #[allow(clippy::new_ret_no_self)] - pub fn new(db: D, entropy: Zeroizing<[u8; 32]>) -> KeyGen { + pub fn new(db: D, entropy: Zeroizing<[u8; 32]>) -> KeyGen { KeyGen { db, entropy, active_commit: HashMap::new(), active_share: HashMap::new() } } pub fn keys( &self, - key: &::G, - ) -> (ThresholdKeys, ThresholdKeys) { + key: &::G, + ) -> (ThresholdKeys, ThresholdKeys) { // This is safe, despite not having a txn, since it's a static value // The only concern is it may not be set when expected, or it may be set unexpectedly // Since this unwraps, it being unset when expected to be set will cause a panic // The only other concern is if it's set when it's not safe to use // The keys are only written on confirmation, and the transaction writing them is atomic to // every associated operation - KeyGenDb::::keys(&self.db, key) + KeyGenDb::::keys(&self.db, key) } pub async fn handle( @@ -187,8 +190,8 @@ impl KeyGen { let key_gen_machines = |id, params| { let mut rng = coefficients_rng(id); let substrate = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng); - let coin = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng); - ((substrate.0, coin.0), (substrate.1, coin.1)) + let network = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng); + ((substrate.0, network.0), (substrate.1, network.1)) }; match msg { @@ -200,7 +203,7 @@ impl KeyGen { self.active_share.remove(&id.set).is_none() { // If we haven't handled this set before, save the params - KeyGenDb::::save_params(txn, &id.set, ¶ms); + KeyGenDb::::save_params(txn, &id.set, ¶ms); } let (machines, commitments) = key_gen_machines(id, params); @@ -221,7 +224,7 @@ impl KeyGen { panic!("commitments when already handled commitments"); } - let params = KeyGenDb::::params(txn, &id.set); + let params = KeyGenDb::::params(txn, &id.set); // Unwrap the machines, rebuilding them if we didn't have them in our cache // We won't if the processor rebooted @@ -264,7 +267,7 @@ impl KeyGen { let (substrate_machine, mut substrate_shares) = handle_machine::(&mut rng, params, machines.0, &mut commitments_ref); - let (coin_machine, coin_shares) = + let (network_machine, network_shares) = handle_machine(&mut rng, params, machines.1, &mut commitments_ref); for (_, commitments) in commitments_ref { @@ -273,15 +276,15 @@ impl KeyGen { } } - self.active_share.insert(id.set, (substrate_machine, coin_machine)); + self.active_share.insert(id.set, (substrate_machine, network_machine)); let mut shares: HashMap<_, _> = substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect(); for (i, share) in shares.iter_mut() { - share.extend(coin_shares[i].serialize()); + share.extend(network_shares[i].serialize()); } - KeyGenDb::::save_commitments(txn, &id, &commitments); + KeyGenDb::::save_commitments(txn, &id, &commitments); ProcessorMessage::Shares { id, shares } } @@ -289,13 +292,13 @@ impl KeyGen { CoordinatorMessage::Shares { id, shares } => { info!("Received shares for {:?}", id); - let params = KeyGenDb::::params(txn, &id.set); + let params = KeyGenDb::::params(txn, &id.set); // Same commentary on inconsistency as above exists let machines = self.active_share.remove(&id.set).unwrap_or_else(|| { let machines = key_gen_machines(id, params).0; let mut rng = secret_shares_rng(id); - let commitments = KeyGenDb::::commitments(txn, &id); + let commitments = KeyGenDb::::commitments(txn, &id); let mut commitments_ref: HashMap = commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect(); @@ -358,7 +361,7 @@ impl KeyGen { } let substrate_keys = handle_machine(&mut rng, params, machines.0, &mut shares_ref); - let coin_keys = handle_machine(&mut rng, params, machines.1, &mut shares_ref); + let network_keys = handle_machine(&mut rng, params, machines.1, &mut shares_ref); for (_, shares) in shares_ref { if !shares.is_empty() { @@ -366,15 +369,15 @@ impl KeyGen { } } - let mut coin_keys = ThresholdKeys::new(coin_keys); - C::tweak_keys(&mut coin_keys); + let mut network_keys = ThresholdKeys::new(network_keys); + N::tweak_keys(&mut network_keys); - KeyGenDb::::save_keys(txn, &id, &substrate_keys, &coin_keys); + KeyGenDb::::save_keys(txn, &id, &substrate_keys, &network_keys); ProcessorMessage::GeneratedKeyPair { id, substrate_key: substrate_keys.group_key().to_bytes(), - coin_key: coin_keys.group_key().to_bytes().as_ref().to_vec(), + network_key: network_keys.group_key().to_bytes().as_ref().to_vec(), } } } @@ -385,16 +388,16 @@ impl KeyGen { txn: &mut D::Transaction<'_>, set: ValidatorSet, key_pair: KeyPair, - ) -> KeyConfirmed { - let (substrate_keys, coin_keys) = KeyGenDb::::confirm_keys(txn, set, key_pair); + ) -> KeyConfirmed { + let (substrate_keys, network_keys) = KeyGenDb::::confirm_keys(txn, set, key_pair); info!( "Confirmed key pair {} {} for set {:?}", hex::encode(substrate_keys.group_key().to_bytes()), - hex::encode(coin_keys.group_key().to_bytes()), + hex::encode(network_keys.group_key().to_bytes()), set, ); - KeyConfirmed { substrate_keys, coin_keys } + KeyConfirmed { substrate_keys, network_keys } } } diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 265cbb5f..378b852d 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -1,7 +1,7 @@ mod plan; pub use plan::*; -pub mod coins; +pub mod networks; mod additional_key; pub use additional_key::additional_key; diff --git a/processor/src/main.rs b/processor/src/main.rs index 608a708a..bd6ef0eb 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -31,12 +31,12 @@ use message_queue::{Service, client::MessageQueue}; mod plan; pub use plan::*; -mod coins; -use coins::{OutputType, Output, PostFeeBranch, Block, Coin}; +mod networks; +use networks::{OutputType, Output, PostFeeBranch, Block, Network}; #[cfg(feature = "bitcoin")] -use coins::Bitcoin; +use networks::Bitcoin; #[cfg(feature = "monero")] -use coins::Monero; +use networks::Monero; mod additional_key; pub use additional_key::additional_key; @@ -65,9 +65,9 @@ use scheduler::Scheduler; #[cfg(test)] mod tests; -async fn get_latest_block_number(coin: &C) -> usize { +async fn get_latest_block_number(network: &N) -> usize { loop { - match coin.get_latest_block_number().await { + match network.get_latest_block_number().await { Ok(number) => { return number; } @@ -82,9 +82,9 @@ async fn get_latest_block_number(coin: &C) -> usize { } } -async fn get_block(coin: &C, block_number: usize) -> C::Block { +async fn get_block(network: &N, block_number: usize) -> N::Block { loop { - match coin.get_block(block_number).await { + match network.get_block(block_number).await { Ok(block) => { return block; } @@ -96,20 +96,20 @@ async fn get_block(coin: &C, block_number: usize) -> C::Block { } } -async fn get_fee(coin: &C, block_number: usize) -> C::Fee { +async fn get_fee(network: &N, block_number: usize) -> N::Fee { // TODO2: Use an fee representative of several blocks - get_block(coin, block_number).await.median_fee() + get_block(network, block_number).await.median_fee() } -async fn prepare_send( - coin: &C, - keys: ThresholdKeys, +async fn prepare_send( + network: &N, + keys: ThresholdKeys, block_number: usize, - fee: C::Fee, - plan: Plan, -) -> (Option<(C::SignableTransaction, C::Eventuality)>, Vec) { + fee: N::Fee, + plan: Plan, +) -> (Option<(N::SignableTransaction, N::Eventuality)>, Vec) { loop { - match coin.prepare_send(keys.clone(), block_number, plan.clone(), fee).await { + match network.prepare_send(keys.clone(), block_number, plan.clone(), fee).await { Ok(prepared) => { return prepared; } @@ -129,7 +129,7 @@ async fn prepare_send( // Items which are mutably borrowed by Tributary. // Any exceptions to this have to be carefully monitored in order to ensure consistency isn't // violated. -struct TributaryMutable { +struct TributaryMutable { // The following are actually mutably borrowed by Substrate as well. // - Substrate triggers key gens, and determines which to use. // - SubstrateBlock events cause scheduling which causes signing. @@ -148,8 +148,8 @@ struct TributaryMutable { // The only other note is how the scanner may cause a signer task to be dropped, effectively // invalidating the Tributary's mutable borrow. The signer is coded to allow for attempted usage // of a dropped task. - key_gen: KeyGen, - signers: HashMap, Signer>, + key_gen: KeyGen, + signers: HashMap, Signer>, // This is also mutably borrowed by the Scanner. // The Scanner starts new sign tasks. @@ -164,7 +164,7 @@ struct TributaryMutable { // Items which are mutably borrowed by Substrate. // Any exceptions to this have to be carefully monitored in order to ensure consistency isn't // violated. -struct SubstrateMutable { +struct SubstrateMutable { // The scanner is expected to autonomously operate, scanning blocks as they appear. // When a block is sufficiently confirmed, the scanner mutates the signer to try and get a Batch // signed. @@ -174,26 +174,26 @@ struct SubstrateMutable { // This can't be mutated as soon as a Batch is signed since the mutation which occurs then is // paired with the mutations caused by Burn events. Substrate's ordering determines if such a // pairing exists. - scanner: ScannerHandle, + scanner: ScannerHandle, // Schedulers take in new outputs, from the scanner, and payments, from Burn events on Substrate. // These are paired when possible, in the name of efficiency. Accordingly, both mutations must // happen by Substrate. - schedulers: HashMap, Scheduler>, + schedulers: HashMap, Scheduler>, } -async fn sign_plans( +async fn sign_plans( txn: &mut D::Transaction<'_>, - coin: &C, - substrate_mutable: &mut SubstrateMutable, - signers: &mut HashMap, Signer>, + network: &N, + substrate_mutable: &mut SubstrateMutable, + signers: &mut HashMap, Signer>, context: SubstrateContext, - plans: Vec>, + plans: Vec>, ) { let mut plans = VecDeque::from(plans); - let mut block_hash = >::Id::default(); - block_hash.as_mut().copy_from_slice(&context.coin_latest_finalized_block.0); + let mut block_hash = >::Id::default(); + block_hash.as_mut().copy_from_slice(&context.network_latest_finalized_block.0); // block_number call is safe since it unwraps let block_number = substrate_mutable .scanner @@ -201,16 +201,16 @@ async fn sign_plans( .await .expect("told to sign_plans on a context we're not synced to"); - let fee = get_fee(coin, block_number).await; + let fee = get_fee(network, block_number).await; while let Some(plan) = plans.pop_front() { let id = plan.id(); info!("preparing plan {}: {:?}", hex::encode(id), plan); let key = plan.key.to_bytes(); - MainDb::::save_signing(txn, key.as_ref(), block_number.try_into().unwrap(), &plan); + MainDb::::save_signing(txn, key.as_ref(), block_number.try_into().unwrap(), &plan); let (tx, branches) = - prepare_send(coin, signers.get_mut(key.as_ref()).unwrap().keys(), block_number, fee, plan) + prepare_send(network, signers.get_mut(key.as_ref()).unwrap().keys(), block_number, fee, plan) .await; for branch in branches { @@ -228,17 +228,17 @@ async fn sign_plans( } } -async fn handle_coordinator_msg( +async fn handle_coordinator_msg( txn: &mut D::Transaction<'_>, - coin: &C, + network: &N, coordinator: &mut Co, - tributary_mutable: &mut TributaryMutable, - substrate_mutable: &mut SubstrateMutable, + tributary_mutable: &mut TributaryMutable, + substrate_mutable: &mut SubstrateMutable, msg: &Message, ) { // If this message expects a higher block number than we have, halt until synced - async fn wait(scanner: &ScannerHandle, block_hash: &BlockHash) { - let mut needed_hash = >::Id::default(); + async fn wait(scanner: &ScannerHandle, block_hash: &BlockHash) { + let mut needed_hash = >::Id::default(); needed_hash.as_mut().copy_from_slice(&block_hash.0); let block_number = loop { @@ -249,7 +249,7 @@ async fn handle_coordinator_msg( warn!( "node is desynced. we haven't scanned {} which should happen after {} confirms", hex::encode(&needed_hash), - C::CONFIRMATIONS, + N::CONFIRMATIONS, ); sleep(Duration::from_secs(10)).await; continue; @@ -272,11 +272,11 @@ async fn handle_coordinator_msg( let synced = |context: &SubstrateContext, key| -> Result<(), ()> { // Check that we've synced this block and can actually operate on it ourselves let latest = scanner.latest_scanned(key); - if usize::try_from(context.coin_latest_finalized_block).unwrap() < latest { + if usize::try_from(context.network_latest_finalized_block).unwrap() < latest { log::warn!( - "coin node disconnected/desynced from rest of the network. \ + "external network node disconnected/desynced from rest of the network. \ our block: {latest:?}, network's acknowledged: {}", - context.coin_latest_finalized_block, + context.network_latest_finalized_block, ); Err(())?; } @@ -312,21 +312,21 @@ async fn handle_coordinator_msg( CoordinatorMessage::Substrate(msg) => { match msg { messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, set, key_pair } => { - // This is the first key pair for this coin so no block has been finalized yet - let activation_number = if context.coin_latest_finalized_block.0 == [0; 32] { + // This is the first key pair for this network so no block has been finalized yet + let activation_number = if context.network_latest_finalized_block.0 == [0; 32] { assert!(tributary_mutable.signers.is_empty()); assert!(tributary_mutable.substrate_signers.is_empty()); assert!(substrate_mutable.schedulers.is_empty()); - // Wait until a coin's block's time exceeds Serai's time - // TODO: This assumes the coin has a monotonic clock for its blocks' times, which + // Wait until a network's block's time exceeds Serai's time + // TODO: This assumes the network has a monotonic clock for its blocks' times, which // isn't a viable assumption // If the latest block number is 10, then the block indexed by 1 has 10 confirms // 10 + 1 - 10 = 1 while get_block( - coin, - (get_latest_block_number(coin).await + 1).saturating_sub(C::CONFIRMATIONS), + network, + (get_latest_block_number(network).await + 1).saturating_sub(N::CONFIRMATIONS), ) .await .time() < @@ -334,7 +334,7 @@ async fn handle_coordinator_msg( { info!( "serai confirmed the first key pair for a set. {} {}", - "we're waiting for a coin's finalized block's time to exceed unix time ", + "we're waiting for a network's finalized block's time to exceed unix time ", context.serai_time, ); sleep(Duration::from_secs(5)).await; @@ -342,13 +342,13 @@ async fn handle_coordinator_msg( // Find the first block to do so let mut earliest = - (get_latest_block_number(coin).await + 1).saturating_sub(C::CONFIRMATIONS); - assert!(get_block(coin, earliest).await.time() >= context.serai_time); + (get_latest_block_number(network).await + 1).saturating_sub(N::CONFIRMATIONS); + assert!(get_block(network, earliest).await.time() >= context.serai_time); // earliest > 0 prevents a panic if Serai creates keys before the genesis block // which... should be impossible // Yet a prevented panic is a prevented panic while (earliest > 0) && - (get_block(coin, earliest - 1).await.time() >= context.serai_time) + (get_block(network, earliest - 1).await.time() >= context.serai_time) { earliest -= 1; } @@ -356,8 +356,8 @@ async fn handle_coordinator_msg( // Use this as the activation block earliest } else { - let mut activation_block = >::Id::default(); - activation_block.as_mut().copy_from_slice(&context.coin_latest_finalized_block.0); + let mut activation_block = >::Id::default(); + activation_block.as_mut().copy_from_slice(&context.network_latest_finalized_block.0); // This block_number call is safe since it unwraps substrate_mutable .scanner @@ -369,38 +369,38 @@ async fn handle_coordinator_msg( info!("activating {set:?}'s keys at {activation_number}"); // See TributaryMutable's struct definition for why this block is safe - let KeyConfirmed { substrate_keys, coin_keys } = + let KeyConfirmed { substrate_keys, network_keys } = tributary_mutable.key_gen.confirm(txn, set, key_pair).await; tributary_mutable.substrate_signers.insert( substrate_keys.group_key().to_bytes().to_vec(), SubstrateSigner::new(substrate_keys), ); - let key = coin_keys.group_key(); + let key = network_keys.group_key(); substrate_mutable.scanner.rotate_key(txn, activation_number, key).await; substrate_mutable .schedulers - .insert(key.to_bytes().as_ref().to_vec(), Scheduler::::new::(txn, key)); + .insert(key.to_bytes().as_ref().to_vec(), Scheduler::::new::(txn, key)); tributary_mutable .signers - .insert(key.to_bytes().as_ref().to_vec(), Signer::new(coin.clone(), coin_keys)); + .insert(key.to_bytes().as_ref().to_vec(), Signer::new(network.clone(), network_keys)); } messages::substrate::CoordinatorMessage::SubstrateBlock { context, - network, + network: network_id, block, key: key_vec, burns, } => { - assert_eq!(network, C::NETWORK); + assert_eq!(network_id, N::NETWORK, "coordinator sent us data for another network"); - let mut block_id = >::Id::default(); - block_id.as_mut().copy_from_slice(&context.coin_latest_finalized_block.0); + let mut block_id = >::Id::default(); + block_id.as_mut().copy_from_slice(&context.network_latest_finalized_block.0); - let key = ::read_G::<&[u8]>(&mut key_vec.as_ref()).unwrap(); + let key = ::read_G::<&[u8]>(&mut key_vec.as_ref()).unwrap(); // We now have to acknowledge every block for this key up to the acknowledged block let (blocks, outputs) = @@ -418,9 +418,9 @@ async fn handle_coordinator_msg( instruction: OutInstruction { address, data }, balance, } = out; - assert_eq!(balance.coin.network(), C::NETWORK); + assert_eq!(balance.coin.network(), N::NETWORK); - if let Ok(address) = C::Address::try_from(address.consume()) { + if let Ok(address) = N::Address::try_from(address.consume()) { // TODO: Add coin to payment payments.push(Payment { address, @@ -439,7 +439,7 @@ async fn handle_coordinator_msg( coordinator .send(ProcessorMessage::Coordinator( messages::coordinator::ProcessorMessage::SubstrateBlockAck { - network, + network: N::NETWORK, block, plans: plans.iter().map(|plan| plan.id()).collect(), }, @@ -448,7 +448,7 @@ async fn handle_coordinator_msg( sign_plans( txn, - coin, + network, substrate_mutable, // See commentary in TributaryMutable for why this is safe &mut tributary_mutable.signers, @@ -462,10 +462,10 @@ async fn handle_coordinator_msg( } } -async fn boot( +async fn boot( raw_db: &mut D, - coin: &C, -) -> (MainDb, TributaryMutable, SubstrateMutable) { + network: &N, +) -> (MainDb, TributaryMutable, SubstrateMutable) { let mut entropy_transcript = { let entropy = Zeroizing::new(env::var("ENTROPY").expect("entropy wasn't specified")); if entropy.len() != 64 { @@ -494,11 +494,11 @@ async fn boot( // We don't need to re-issue GenerateKey orders because the coordinator is expected to // schedule/notify us of new attempts - let key_gen = KeyGen::::new(raw_db.clone(), entropy(b"key-gen_entropy")); + let key_gen = KeyGen::::new(raw_db.clone(), entropy(b"key-gen_entropy")); // The scanner has no long-standing orders to re-issue - let (mut scanner, active_keys) = Scanner::new(coin.clone(), raw_db.clone()); + let (mut scanner, active_keys) = Scanner::new(network.clone(), raw_db.clone()); - let mut schedulers = HashMap::, Scheduler>::new(); + let mut schedulers = HashMap::, Scheduler>::new(); let mut substrate_signers = HashMap::new(); let mut signers = HashMap::new(); @@ -507,7 +507,7 @@ async fn boot( for key in &active_keys { schedulers.insert(key.to_bytes().as_ref().to_vec(), Scheduler::from_db(raw_db, *key).unwrap()); - let (substrate_keys, coin_keys) = key_gen.keys(key); + let (substrate_keys, network_keys) = key_gen.keys(key); let substrate_key = substrate_keys.group_key(); let substrate_signer = SubstrateSigner::new(substrate_keys); @@ -515,25 +515,25 @@ async fn boot( // necessary substrate_signers.insert(substrate_key.to_bytes().to_vec(), substrate_signer); - let mut signer = Signer::new(coin.clone(), coin_keys); + let mut signer = Signer::new(network.clone(), network_keys); // Load any TXs being actively signed let key = key.to_bytes(); for (block_number, plan) in main_db.signing(key.as_ref()) { let block_number = block_number.try_into().unwrap(); - let fee = get_fee(coin, block_number).await; + let fee = get_fee(network, block_number).await; let id = plan.id(); info!("reloading plan {}: {:?}", hex::encode(id), plan); let (Some((tx, eventuality)), _) = - prepare_send(coin, signer.keys(), block_number, fee, plan).await else { + prepare_send(network, signer.keys(), block_number, fee, plan).await else { panic!("previously created transaction is no longer being created") }; scanner.register_eventuality(block_number, id, eventuality.clone()).await; - // TODO: Reconsider if the Signer should have the eventuality, or if just the coin/scanner + // TODO: Reconsider if the Signer should have the eventuality, or if just the network/scanner // should let mut txn = raw_db.txn(); signer.sign_transaction(&mut txn, id, tx, eventuality).await; @@ -551,14 +551,15 @@ async fn boot( ) } -async fn run(mut raw_db: D, coin: C, mut coordinator: Co) { +async fn run(mut raw_db: D, network: N, mut coordinator: Co) { // We currently expect a contextless bidirectional mapping between these two values // (which is that any value of A can be interpreted as B and vice versa) // While we can write a contextual mapping, we have yet to do so - // This check ensures no coin which doesn't have a bidirectional mapping is defined - assert_eq!(>::Id::default().as_ref().len(), BlockHash([0u8; 32]).0.len()); + // This check ensures no network which doesn't have a bidirectional mapping is defined + assert_eq!(>::Id::default().as_ref().len(), BlockHash([0u8; 32]).0.len()); - let (mut main_db, mut tributary_mutable, mut substrate_mutable) = boot(&mut raw_db, &coin).await; + let (mut main_db, mut tributary_mutable, mut substrate_mutable) = + boot(&mut raw_db, &network).await; // We can't load this from the DB as we can't guarantee atomic increments with the ack function let mut last_coordinator_msg = None; @@ -625,7 +626,7 @@ async fn run(mut raw_db: D, coin: C, mut coordi // Only handle this if we haven't already if !main_db.handled_message(msg.id) { let mut txn = raw_db.txn(); - MainDb::::handle_message(&mut txn, msg.id); + MainDb::::handle_message(&mut txn, msg.id); // This is isolated to better think about how its ordered, or rather, about how the other // cases aren't ordered @@ -639,7 +640,7 @@ async fn run(mut raw_db: D, coin: C, mut coordi // references over the same data handle_coordinator_msg( &mut txn, - &coin, + &network, &mut coordinator, &mut tributary_mutable, &mut substrate_mutable, @@ -661,7 +662,7 @@ async fn run(mut raw_db: D, coin: C, mut coordi block_hash.copy_from_slice(block.as_ref()); let batch = Batch { - network: C::NETWORK, + network: N::NETWORK, id: batch, block: BlockHash(block_hash), instructions: outputs.iter().filter_map(|output| { diff --git a/processor/src/coins/bitcoin.rs b/processor/src/networks/bitcoin.rs similarity index 92% rename from processor/src/coins/bitcoin.rs rename to processor/src/networks/bitcoin.rs index a7e2682a..4cfede53 100644 --- a/processor/src/coins/bitcoin.rs +++ b/processor/src/networks/bitcoin.rs @@ -17,7 +17,7 @@ use bitcoin_serai::{ hashes::Hash as HashTrait, consensus::{Encodable, Decodable}, script::Instruction, - OutPoint, Transaction, Block, Network, + OutPoint, Transaction, Block, Network as BitcoinNetwork, }, wallet::{ tweak_keys, address, ReceivedOutput, Scanner, TransactionError, @@ -38,13 +38,13 @@ use bitcoin_serai::bitcoin::{ use serai_client::{ primitives::{MAX_DATA_LEN, Coin as SeraiCoin, NetworkId, Amount, Balance}, - coins::bitcoin::Address, + networks::bitcoin::Address, }; use crate::{ - coins::{ - CoinError, Block as BlockTrait, OutputType, Output as OutputTrait, - Transaction as TransactionTrait, Eventuality, EventualitiesTracker, PostFeeBranch, Coin, + networks::{ + NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, + Transaction as TransactionTrait, Eventuality, EventualitiesTracker, PostFeeBranch, Network, drop_branches, amortize_fee, }, Plan, @@ -144,13 +144,13 @@ impl TransactionTrait for Transaction { buf } #[cfg(test)] - async fn fee(&self, coin: &Bitcoin) -> u64 { + async fn fee(&self, network: &Bitcoin) -> u64 { let mut value = 0; for input in &self.input { let output = input.previous_output; let mut hash = *output.txid.as_raw_hash().as_byte_array(); hash.reverse(); - value += coin.rpc.get_transaction(&hash).await.unwrap().output + value += network.rpc.get_transaction(&hash).await.unwrap().output [usize::try_from(output.vout).unwrap()] .value; } @@ -280,7 +280,7 @@ impl Bitcoin { } #[async_trait] -impl Coin for Bitcoin { +impl Network for Bitcoin { type Curve = Secp256k1; type Fee = Fee; @@ -326,7 +326,7 @@ impl Coin for Bitcoin { } fn address(key: ProjectivePoint) -> Address { - Address(address(Network::Bitcoin, key).unwrap()) + Address(address(BitcoinNetwork::Bitcoin, key).unwrap()) } fn branch_address(key: ProjectivePoint) -> Self::Address { @@ -334,21 +334,21 @@ impl Coin for Bitcoin { Self::address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch])) } - async fn get_latest_block_number(&self) -> Result { - self.rpc.get_latest_block_number().await.map_err(|_| CoinError::ConnectionError) + async fn get_latest_block_number(&self) -> Result { + self.rpc.get_latest_block_number().await.map_err(|_| NetworkError::ConnectionError) } - async fn get_block(&self, number: usize) -> Result { + async fn get_block(&self, number: usize) -> Result { let block_hash = - self.rpc.get_block_hash(number).await.map_err(|_| CoinError::ConnectionError)?; - self.rpc.get_block(&block_hash).await.map_err(|_| CoinError::ConnectionError) + self.rpc.get_block_hash(number).await.map_err(|_| NetworkError::ConnectionError)?; + self.rpc.get_block(&block_hash).await.map_err(|_| NetworkError::ConnectionError) } async fn get_outputs( &self, block: &Self::Block, key: ProjectivePoint, - ) -> Result, CoinError> { + ) -> Result, NetworkError> { let (scanner, _, kinds) = scanner(key); let mut outputs = vec![]; @@ -452,7 +452,8 @@ impl Coin for Bitcoin { _: usize, mut plan: Plan, fee: Fee, - ) -> Result<(Option<(SignableTransaction, Self::Eventuality)>, Vec), CoinError> { + ) -> Result<(Option<(SignableTransaction, Self::Eventuality)>, Vec), NetworkError> + { let signable = |plan: &Plan, tx_fee: Option<_>| { let mut payments = vec![]; for payment in &plan.payments { @@ -521,7 +522,7 @@ impl Coin for Bitcoin { async fn attempt_send( &self, transaction: Self::SignableTransaction, - ) -> Result { + ) -> Result { Ok( transaction .actual @@ -531,10 +532,10 @@ impl Coin for Bitcoin { ) } - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), CoinError> { + async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError> { match self.rpc.send_raw_transaction(tx).await { Ok(_) => (), - Err(RpcError::ConnectionError) => Err(CoinError::ConnectionError)?, + Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?, // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs // invalid transaction Err(e) => panic!("failed to publish TX {}: {e}", tx.txid()), @@ -542,8 +543,8 @@ impl Coin for Bitcoin { Ok(()) } - async fn get_transaction(&self, id: &[u8; 32]) -> Result { - self.rpc.get_transaction(id).await.map_err(|_| CoinError::ConnectionError) + async fn get_transaction(&self, id: &[u8; 32]) -> Result { + self.rpc.get_transaction(id).await.map_err(|_| NetworkError::ConnectionError) } fn confirm_completion(&self, eventuality: &OutPoint, tx: &Transaction) -> bool { @@ -566,7 +567,7 @@ impl Coin for Bitcoin { .rpc .rpc_call::>( "generatetoaddress", - serde_json::json!([1, BAddress::p2sh(Script::empty(), Network::Regtest).unwrap()]), + serde_json::json!([1, BAddress::p2sh(Script::empty(), BitcoinNetwork::Regtest).unwrap()]), ) .await .unwrap(); @@ -575,9 +576,9 @@ impl Coin for Bitcoin { #[cfg(test)] async fn test_send(&self, address: Self::Address) -> Block { let secret_key = SecretKey::new(&mut rand_core::OsRng); - let private_key = PrivateKey::new(secret_key, Network::Regtest); + let private_key = PrivateKey::new(secret_key, BitcoinNetwork::Regtest); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); - let main_addr = BAddress::p2pkh(&public_key, Network::Regtest); + let main_addr = BAddress::p2pkh(&public_key, BitcoinNetwork::Regtest); let new_block = self.get_latest_block_number().await.unwrap() + 1; self diff --git a/processor/src/coins/mod.rs b/processor/src/networks/mod.rs similarity index 89% rename from processor/src/coins/mod.rs rename to processor/src/networks/mod.rs index 06649cea..dbf54bfd 100644 --- a/processor/src/coins/mod.rs +++ b/processor/src/networks/mod.rs @@ -25,8 +25,8 @@ pub use monero::Monero; use crate::{Payment, Plan}; #[derive(Clone, Copy, Error, Debug)] -pub enum CoinError { - #[error("failed to connect to coin daemon")] +pub enum NetworkError { + #[error("failed to connect to network daemon")] ConnectionError, } @@ -108,13 +108,13 @@ pub trait Output: Send + Sync + Sized + Clone + PartialEq + Eq + Debug { } #[async_trait] -pub trait Transaction: Send + Sync + Sized + Clone + Debug { +pub trait Transaction: Send + Sync + Sized + Clone + Debug { type Id: 'static + Id; fn id(&self) -> Self::Id; fn serialize(&self) -> Vec; #[cfg(test)] - async fn fee(&self, coin: &C) -> u64; + async fn fee(&self, network: &N) -> u64; } pub trait Eventuality: Send + Sync + Clone + Debug { @@ -171,13 +171,13 @@ impl Default for EventualitiesTracker { } } -pub trait Block: Send + Sync + Sized + Clone + Debug { +pub trait Block: Send + Sync + Sized + Clone + Debug { // This is currently bounded to being 32-bytes. type Id: 'static + Id; fn id(&self) -> Self::Id; fn parent(&self) -> Self::Id; fn time(&self) -> u64; - fn median_fee(&self) -> C::Fee; + fn median_fee(&self) -> N::Fee; } // The post-fee value of an expected branch. @@ -187,10 +187,10 @@ pub struct PostFeeBranch { } // Return the PostFeeBranches needed when dropping a transaction -pub fn drop_branches(plan: &Plan) -> Vec { +pub fn drop_branches(plan: &Plan) -> Vec { let mut branch_outputs = vec![]; for payment in &plan.payments { - if payment.address == C::branch_address(plan.key) { + if payment.address == N::branch_address(plan.key) { branch_outputs.push(PostFeeBranch { expected: payment.amount, actual: None }); } } @@ -198,7 +198,7 @@ pub fn drop_branches(plan: &Plan) -> Vec { } // Amortize a fee over the plan's payments -pub fn amortize_fee(plan: &mut Plan, tx_fee: u64) -> Vec { +pub fn amortize_fee(plan: &mut Plan, tx_fee: u64) -> Vec { // No payments to amortize over if plan.payments.is_empty() { return vec![]; @@ -211,11 +211,11 @@ pub fn amortize_fee(plan: &mut Plan, tx_fee: u64) -> Vec, per_output_fee| { + let post_fee = |payment: &Payment, per_output_fee| { let mut post_fee = payment.amount.checked_sub(per_output_fee); // If this is under our dust threshold, drop it if let Some(amount) = post_fee { - if amount < C::DUST { + if amount < N::DUST { post_fee = None; } } @@ -244,7 +244,7 @@ pub fn amortize_fee(plan: &mut Plan, tx_fee: u64) -> Vec(plan: &mut Plan, tx_fee: u64) -> Vec; - /// The type representing the block for this coin. + /// The type representing the block for this network. type Block: Block; /// The type containing all information on a scanned output. - // This is almost certainly distinct from the coin's native output type. + // This is almost certainly distinct from the network's native output type. type Output: Output; /// The type containing all information on a planned transaction, waiting to be signed. type SignableTransaction: Send + Sync + Clone + Debug; @@ -296,9 +296,9 @@ pub trait Coin: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { + TryInto> + TryFrom>; - /// Network ID for this coin. + /// Network ID for this network. const NETWORK: NetworkId; - /// String ID for this coin. + /// String ID for this network. const ID: &'static str; /// The amount of confirmations required to consider a block 'final'. const CONFIRMATIONS: usize; @@ -314,7 +314,7 @@ pub trait Coin: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { /// Minimum output value which will be handled. const DUST: u64; - /// Tweak keys for this coin. + /// Tweak keys for this network. fn tweak_keys(key: &mut ThresholdKeys); /// Address for the given group key to receive external coins to. @@ -324,15 +324,15 @@ pub trait Coin: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { fn branch_address(key: ::G) -> Self::Address; /// Get the latest block's number. - async fn get_latest_block_number(&self) -> Result; + async fn get_latest_block_number(&self) -> Result; /// Get a block by its number. - async fn get_block(&self, number: usize) -> Result; + async fn get_block(&self, number: usize) -> Result; /// Get the outputs within a block for a specific key. async fn get_outputs( &self, block: &Self::Block, key: ::G, - ) -> Result, CoinError>; + ) -> Result, NetworkError>; /// Get the registered eventualities completed within this block, and any prior blocks which /// registered eventualities may have been completed in. @@ -353,23 +353,23 @@ pub trait Coin: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { fee: Self::Fee, ) -> Result< (Option<(Self::SignableTransaction, Self::Eventuality)>, Vec), - CoinError + NetworkError >; /// Attempt to sign a SignableTransaction. async fn attempt_send( &self, transaction: Self::SignableTransaction, - ) -> Result; + ) -> Result; /// Publish a transaction. - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), CoinError>; + async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError>; /// Get a transaction by its ID. async fn get_transaction( &self, id: &>::Id, - ) -> Result; + ) -> Result; /// Confirm a plan was completed by the specified transaction. // This is allowed to take shortcuts. diff --git a/processor/src/coins/monero.rs b/processor/src/networks/monero.rs similarity index 92% rename from processor/src/coins/monero.rs rename to processor/src/networks/monero.rs index 3e5adbb4..fe0f9cbf 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/networks/monero.rs @@ -20,7 +20,7 @@ use monero_serai::{ rpc::{RpcError, HttpRpc, Rpc}, wallet::{ ViewPair, Scanner, - address::{Network, SubaddressIndex, AddressSpec}, + address::{Network as MoneroNetwork, SubaddressIndex, AddressSpec}, Fee, SpendableOutput, Change, Decoys, TransactionError, SignableTransaction as MSignableTransaction, Eventuality, TransactionMachine, }, @@ -30,15 +30,15 @@ use tokio::time::sleep; pub use serai_client::{ primitives::{MAX_DATA_LEN, Coin as SeraiCoin, NetworkId, Amount, Balance}, - coins::monero::Address, + networks::monero::Address, }; use crate::{ Payment, Plan, additional_key, - coins::{ - CoinError, Block as BlockTrait, OutputType, Output as OutputTrait, + networks::{ + NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, Transaction as TransactionTrait, Eventuality as EventualityTrait, EventualitiesTracker, - PostFeeBranch, Coin, drop_branches, amortize_fee, + PostFeeBranch, Network, drop_branches, amortize_fee, }, }; @@ -179,7 +179,7 @@ impl Monero { fn address_internal(spend: EdwardsPoint, subaddress: Option) -> Address { Address::new(Self::view_pair(spend).address( - Network::Mainnet, + MoneroNetwork::Mainnet, AddressSpec::Featured { subaddress, payment_id: None, guaranteed: true }, )) .unwrap() @@ -205,12 +205,13 @@ impl Monero { #[cfg(test)] fn test_address() -> Address { - Address::new(Self::test_view_pair().address(Network::Mainnet, AddressSpec::Standard)).unwrap() + Address::new(Self::test_view_pair().address(MoneroNetwork::Mainnet, AddressSpec::Standard)) + .unwrap() } } #[async_trait] -impl Coin for Monero { +impl Network for Monero { type Curve = Ed25519; type Fee = Fee; @@ -249,18 +250,20 @@ impl Coin for Monero { Self::address_internal(key, BRANCH_SUBADDRESS) } - async fn get_latest_block_number(&self) -> Result { + async fn get_latest_block_number(&self) -> Result { // Monero defines height as chain length, so subtract 1 for block number - Ok(self.rpc.get_height().await.map_err(|_| CoinError::ConnectionError)? - 1) + Ok(self.rpc.get_height().await.map_err(|_| NetworkError::ConnectionError)? - 1) } - async fn get_block(&self, number: usize) -> Result { + async fn get_block(&self, number: usize) -> Result { Ok( self .rpc - .get_block(self.rpc.get_block_hash(number).await.map_err(|_| CoinError::ConnectionError)?) + .get_block( + self.rpc.get_block_hash(number).await.map_err(|_| NetworkError::ConnectionError)?, + ) .await - .map_err(|_| CoinError::ConnectionError)?, + .map_err(|_| NetworkError::ConnectionError)?, ) } @@ -268,11 +271,11 @@ impl Coin for Monero { &self, block: &Block, key: EdwardsPoint, - ) -> Result, CoinError> { + ) -> Result, NetworkError> { let mut txs = Self::scanner(key) .scan(&self.rpc, block) .await - .map_err(|_| CoinError::ConnectionError)? + .map_err(|_| NetworkError::ConnectionError)? .iter() .filter_map(|outputs| Some(outputs.not_locked()).filter(|outputs| !outputs.is_empty())) .collect::>(); @@ -316,7 +319,7 @@ impl Coin for Monero { } async fn check_block( - coin: &Monero, + network: &Monero, eventualities: &mut EventualitiesTracker, block: &Block, res: &mut HashMap<[u8; 32], [u8; 32]>, @@ -325,7 +328,7 @@ impl Coin for Monero { let tx = { let mut tx; while { - tx = coin.get_transaction(hash).await; + tx = network.get_transaction(hash).await; tx.is_err() } { log::error!("couldn't get transaction {}: {}", hex::encode(hash), tx.err().unwrap()); @@ -374,7 +377,7 @@ impl Coin for Monero { block_number: usize, mut plan: Plan, fee: Fee, - ) -> Result<(Option<(SignableTransaction, Eventuality)>, Vec), CoinError> { + ) -> Result<(Option<(SignableTransaction, Eventuality)>, Vec), NetworkError> { // Sanity check this has at least one output planned assert!((!plan.payments.is_empty()) || plan.change.is_some()); @@ -397,7 +400,7 @@ impl Coin for Monero { } // Check a fork hasn't occurred which this processor hasn't been updated for - assert_eq!(protocol, self.rpc.get_protocol().await.map_err(|_| CoinError::ConnectionError)?); + assert_eq!(protocol, self.rpc.get_protocol().await.map_err(|_| NetworkError::ConnectionError)?); let spendable_outputs = plan.inputs.iter().cloned().map(|input| input.0).collect::>(); @@ -413,7 +416,7 @@ impl Coin for Monero { &spendable_outputs, ) .await - .map_err(|_| CoinError::ConnectionError) + .map_err(|_| NetworkError::ConnectionError) .unwrap(); let inputs = spendable_outputs.into_iter().zip(decoys.into_iter()).collect::>(); @@ -428,7 +431,7 @@ impl Coin for Monero { plan.payments.push(Payment { address: Address::new( ViewPair::new(EdwardsPoint::generator().0, Zeroizing::new(Scalar::ONE.0)) - .address(Network::Mainnet, AddressSpec::Standard), + .address(MoneroNetwork::Mainnet, AddressSpec::Standard), ) .unwrap(), amount: 0, @@ -492,7 +495,7 @@ impl Coin for Monero { } TransactionError::RpcError(e) => { log::error!("RpcError when preparing transaction: {e:?}"); - Err(CoinError::ConnectionError) + Err(NetworkError::ConnectionError) } }, } @@ -520,25 +523,25 @@ impl Coin for Monero { async fn attempt_send( &self, transaction: SignableTransaction, - ) -> Result { + ) -> Result { match transaction.actual.clone().multisig(transaction.keys.clone(), transaction.transcript) { Ok(machine) => Ok(machine), Err(e) => panic!("failed to create a multisig machine for TX: {e}"), } } - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), CoinError> { + async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError> { match self.rpc.publish_transaction(tx).await { Ok(_) => Ok(()), - Err(RpcError::ConnectionError) => Err(CoinError::ConnectionError)?, + Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?, // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs // invalid transaction Err(e) => panic!("failed to publish TX {}: {e}", hex::encode(tx.hash())), } } - async fn get_transaction(&self, id: &[u8; 32]) -> Result { - self.rpc.get_transaction(*id).await.map_err(|_| CoinError::ConnectionError) + async fn get_transaction(&self, id: &[u8; 32]) -> Result { + self.rpc.get_transaction(*id).await.map_err(|_| NetworkError::ConnectionError) } fn confirm_completion(&self, eventuality: &Eventuality, tx: &Transaction) -> bool { diff --git a/processor/src/plan.rs b/processor/src/plan.rs index 6a0f08aa..12996604 100644 --- a/processor/src/plan.rs +++ b/processor/src/plan.rs @@ -4,16 +4,16 @@ use transcript::{Transcript, RecommendedTranscript}; use ciphersuite::group::GroupEncoding; use frost::curve::Ciphersuite; -use crate::coins::{Output, Coin}; +use crate::networks::{Output, Network}; #[derive(Clone, PartialEq, Eq, Debug)] -pub struct Payment { - pub address: C::Address, +pub struct Payment { + pub address: N::Address, pub data: Option>, pub amount: u64, } -impl Payment { +impl Payment { pub fn transcript(&self, transcript: &mut T) { transcript.domain_separate(b"payment"); transcript.append_message(b"address", self.address.to_string().as_bytes()); @@ -46,7 +46,7 @@ impl Payment { reader.read_exact(&mut buf)?; let mut address = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()]; reader.read_exact(&mut address)?; - let address = C::Address::try_from(address) + let address = N::Address::try_from(address) .map_err(|_| io::Error::new(io::ErrorKind::Other, "invalid address"))?; let mut buf = [0; 1]; @@ -70,13 +70,13 @@ impl Payment { } #[derive(Clone, PartialEq, Eq)] -pub struct Plan { - pub key: ::G, - pub inputs: Vec, - pub payments: Vec>, - pub change: Option<::G>, +pub struct Plan { + pub key: ::G, + pub inputs: Vec, + pub payments: Vec>, + pub change: Option<::G>, } -impl core::fmt::Debug for Plan { +impl core::fmt::Debug for Plan { fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { fmt .debug_struct("Plan") @@ -88,11 +88,11 @@ impl core::fmt::Debug for Plan { } } -impl Plan { +impl Plan { pub fn transcript(&self) -> RecommendedTranscript { let mut transcript = RecommendedTranscript::new(b"Serai Processor Plan ID"); transcript.domain_separate(b"meta"); - transcript.append_message(b"network", C::ID); + transcript.append_message(b"network", N::ID); transcript.append_message(b"key", self.key.to_bytes()); transcript.domain_separate(b"inputs"); @@ -141,24 +141,24 @@ impl Plan { } pub fn read(reader: &mut R) -> io::Result { - let key = C::Curve::read_G(reader)?; + let key = N::Curve::read_G(reader)?; let mut inputs = vec![]; let mut buf = [0; 4]; reader.read_exact(&mut buf)?; for _ in 0 .. u32::from_le_bytes(buf) { - inputs.push(C::Output::read(reader)?); + inputs.push(N::Output::read(reader)?); } let mut payments = vec![]; reader.read_exact(&mut buf)?; for _ in 0 .. u32::from_le_bytes(buf) { - payments.push(Payment::::read(reader)?); + payments.push(Payment::::read(reader)?); } let mut buf = [0; 1]; reader.read_exact(&mut buf)?; - let change = if buf[0] == 1 { Some(C::Curve::read_G(reader)?) } else { None }; + let change = if buf[0] == 1 { Some(N::Curve::read_G(reader)?) } else { None }; Ok(Plan { key, inputs, payments, change }) } diff --git a/processor/src/scanner.rs b/processor/src/scanner.rs index 5ef52730..6ebde415 100644 --- a/processor/src/scanner.rs +++ b/processor/src/scanner.rs @@ -18,27 +18,27 @@ use serai_client::primitives::BlockHash; use crate::{ Get, DbTxn, Db, - coins::{Output, Transaction, EventualitiesTracker, Block, Coin}, + networks::{Output, Transaction, EventualitiesTracker, Block, Network}, }; #[derive(Clone, Debug)] -pub enum ScannerEvent { +pub enum ScannerEvent { // Block scanned Block { - key: ::G, - block: >::Id, + key: ::G, + block: >::Id, batch: u32, - outputs: Vec, + outputs: Vec, }, // Eventuality completion found on-chain - Completed([u8; 32], >::Id), + Completed([u8; 32], >::Id), } -pub type ScannerEventChannel = mpsc::UnboundedReceiver>; +pub type ScannerEventChannel = mpsc::UnboundedReceiver>; #[derive(Clone, Debug)] -struct ScannerDb(PhantomData, PhantomData); -impl ScannerDb { +struct ScannerDb(PhantomData, PhantomData); +impl ScannerDb { fn scanner_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { D::key(b"SCANNER", dst, key) } @@ -46,21 +46,21 @@ impl ScannerDb { fn block_key(number: usize) -> Vec { Self::scanner_key(b"block_id", u64::try_from(number).unwrap().to_le_bytes()) } - fn block_number_key(id: &>::Id) -> Vec { + fn block_number_key(id: &>::Id) -> Vec { Self::scanner_key(b"block_number", id) } - fn save_block(txn: &mut D::Transaction<'_>, number: usize, id: &>::Id) { + fn save_block(txn: &mut D::Transaction<'_>, number: usize, id: &>::Id) { txn.put(Self::block_number_key(id), u64::try_from(number).unwrap().to_le_bytes()); txn.put(Self::block_key(number), id); } - fn block(getter: &G, number: usize) -> Option<>::Id> { + fn block(getter: &G, number: usize) -> Option<>::Id> { getter.get(Self::block_key(number)).map(|id| { - let mut res = >::Id::default(); + let mut res = >::Id::default(); res.as_mut().copy_from_slice(&id); res }) } - fn block_number(getter: &G, id: &>::Id) -> Option { + fn block_number(getter: &G, id: &>::Id) -> Option { getter .get(Self::block_number_key(id)) .map(|number| u64::from_le_bytes(number.try_into().unwrap()).try_into().unwrap()) @@ -69,7 +69,7 @@ impl ScannerDb { fn active_keys_key() -> Vec { Self::scanner_key(b"active_keys", b"") } - fn add_active_key(txn: &mut D::Transaction<'_>, key: ::G) { + fn add_active_key(txn: &mut D::Transaction<'_>, key: ::G) { let mut keys = txn.get(Self::active_keys_key()).unwrap_or(vec![]); let key_bytes = key.to_bytes(); @@ -90,7 +90,7 @@ impl ScannerDb { keys.extend(key_bytes.as_ref()); txn.put(Self::active_keys_key(), keys); } - fn active_keys(getter: &G) -> Vec<::G> { + fn active_keys(getter: &G) -> Vec<::G> { let bytes_vec = getter.get(Self::active_keys_key()).unwrap_or(vec![]); let mut bytes: &[u8] = bytes_vec.as_ref(); @@ -100,35 +100,35 @@ impl ScannerDb { // Either are fine let mut res = Vec::with_capacity(bytes.len() / 32); while !bytes.is_empty() { - res.push(C::Curve::read_G(&mut bytes).unwrap()); + res.push(N::Curve::read_G(&mut bytes).unwrap()); } res } - fn seen_key(id: &::Id) -> Vec { + fn seen_key(id: &::Id) -> Vec { Self::scanner_key(b"seen", id) } - fn seen(getter: &G, id: &::Id) -> bool { + fn seen(getter: &G, id: &::Id) -> bool { getter.get(Self::seen_key(id)).is_some() } fn next_batch_key() -> Vec { Self::scanner_key(b"next_batch", []) } - fn batch_key(key: &::G, block: &>::Id) -> Vec { + fn batch_key(key: &::G, block: &>::Id) -> Vec { Self::scanner_key(b"batch", [key.to_bytes().as_ref(), block.as_ref()].concat()) } fn outputs_key( - key: &::G, - block: &>::Id, + key: &::G, + block: &>::Id, ) -> Vec { Self::scanner_key(b"outputs", [key.to_bytes().as_ref(), block.as_ref()].concat()) } fn save_outputs( txn: &mut D::Transaction<'_>, - key: &::G, - block: &>::Id, - outputs: &[C::Output], + key: &::G, + block: &>::Id, + outputs: &[N::Output], ) -> u32 { let batch_key = Self::batch_key(key, block); if let Some(batch) = txn.get(batch_key) { @@ -160,29 +160,29 @@ impl ScannerDb { } fn outputs( txn: &D::Transaction<'_>, - key: &::G, - block: &>::Id, - ) -> Option> { + key: &::G, + block: &>::Id, + ) -> Option> { let bytes_vec = txn.get(Self::outputs_key(key, block))?; let mut bytes: &[u8] = bytes_vec.as_ref(); let mut res = vec![]; while !bytes.is_empty() { - res.push(C::Output::read(&mut bytes).unwrap()); + res.push(N::Output::read(&mut bytes).unwrap()); } Some(res) } - fn scanned_block_key(key: &::G) -> Vec { + fn scanned_block_key(key: &::G) -> Vec { Self::scanner_key(b"scanned_block", key.to_bytes()) } #[allow(clippy::type_complexity)] fn save_scanned_block( txn: &mut D::Transaction<'_>, - key: &::G, + key: &::G, block: usize, - ) -> (Option<>::Id>, Vec) { + ) -> (Option<>::Id>, Vec) { let id = Self::block(txn, block); // It may be None for the first key rotated to let outputs = if let Some(id) = id.as_ref() { Self::outputs(txn, key, id).unwrap_or(vec![]) @@ -200,7 +200,7 @@ impl ScannerDb { // Return this block's outputs so they can be pruned from the RAM cache (id, outputs) } - fn latest_scanned_block(getter: &G, key: ::G) -> usize { + fn latest_scanned_block(getter: &G, key: ::G) -> usize { let bytes = getter .get(Self::scanned_block_key(&key)) .expect("asking for latest scanned block of key which wasn't rotated to"); @@ -212,26 +212,26 @@ impl ScannerDb { /// It WILL NOT fail to emit an event, even if it reboots at selected moments. /// It MAY fire the same event multiple times. #[derive(Debug)] -pub struct Scanner { - coin: C, +pub struct Scanner { + network: N, db: D, - keys: Vec<::G>, + keys: Vec<::G>, - eventualities: EventualitiesTracker, + eventualities: EventualitiesTracker, ram_scanned: HashMap, usize>, ram_outputs: HashSet>, - events: mpsc::UnboundedSender>, + events: mpsc::UnboundedSender>, } #[derive(Debug)] -pub struct ScannerHandle { - scanner: Arc>>, - pub events: ScannerEventChannel, +pub struct ScannerHandle { + scanner: Arc>>, + pub events: ScannerEventChannel, } -impl ScannerHandle { +impl ScannerHandle { pub async fn ram_scanned(&self) -> usize { let mut res = None; for scanned in self.scanner.read().await.ram_scanned.values() { @@ -249,7 +249,7 @@ impl ScannerHandle { &mut self, block_number: usize, id: [u8; 32], - eventuality: C::Eventuality, + eventuality: N::Eventuality, ) { self.scanner.write().await.eventualities.register(block_number, id, eventuality) } @@ -269,7 +269,7 @@ impl ScannerHandle { &mut self, txn: &mut D::Transaction<'_>, activation_number: usize, - key: ::G, + key: ::G, ) { let mut scanner = self.scanner.write().await; if !scanner.keys.is_empty() { @@ -280,41 +280,41 @@ impl ScannerHandle { info!("Rotating scanner to key {} at {activation_number}", hex::encode(key.to_bytes())); - let (_, outputs) = ScannerDb::::save_scanned_block(txn, &key, activation_number); + let (_, outputs) = ScannerDb::::save_scanned_block(txn, &key, activation_number); scanner.ram_scanned.insert(key.to_bytes().as_ref().to_vec(), activation_number); assert!(outputs.is_empty()); - ScannerDb::::add_active_key(txn, key); + ScannerDb::::add_active_key(txn, key); scanner.keys.push(key); } // This perform a database read which isn't safe with regards to if the value is set or not // It may be set, when it isn't expected to be set, or not set, when it is expected to be set // Since the value is static, if it's set, it's correctly set - pub async fn block_number(&self, id: &>::Id) -> Option { - ScannerDb::::block_number(&self.scanner.read().await.db, id) + pub async fn block_number(&self, id: &>::Id) -> Option { + ScannerDb::::block_number(&self.scanner.read().await.db, id) } /// Acknowledge having handled a block for a key. pub async fn ack_up_to_block( &mut self, txn: &mut D::Transaction<'_>, - key: ::G, - id: >::Id, - ) -> (Vec, Vec) { + key: ::G, + id: >::Id, + ) -> (Vec, Vec) { let mut scanner = self.scanner.write().await; debug!("Block {} acknowledged", hex::encode(&id)); // Get the number for this block - let number = ScannerDb::::block_number(txn, &id) + let number = ScannerDb::::block_number(txn, &id) .expect("main loop trying to operate on data we haven't scanned"); // Get the number of the last block we acknowledged - let prior = ScannerDb::::latest_scanned_block(txn, key); + let prior = ScannerDb::::latest_scanned_block(txn, key); let mut blocks = vec![]; let mut outputs = vec![]; for number in (prior + 1) ..= number { - let (block, these_outputs) = ScannerDb::::save_scanned_block(txn, &key, number); + let (block, these_outputs) = ScannerDb::::save_scanned_block(txn, &key, number); let block = BlockHash(block.unwrap().as_ref().try_into().unwrap()); blocks.push(block); outputs.extend(these_outputs); @@ -329,22 +329,22 @@ impl ScannerHandle { } } -impl Scanner { +impl Scanner { #[allow(clippy::new_ret_no_self)] - pub fn new(coin: C, db: D) -> (ScannerHandle, Vec<::G>) { + pub fn new(network: N, db: D) -> (ScannerHandle, Vec<::G>) { let (events_send, events_recv) = mpsc::unbounded_channel(); - let keys = ScannerDb::::active_keys(&db); + let keys = ScannerDb::::active_keys(&db); let mut ram_scanned = HashMap::new(); for key in keys.clone() { ram_scanned.insert( key.to_bytes().as_ref().to_vec(), - ScannerDb::::latest_scanned_block(&db, key), + ScannerDb::::latest_scanned_block(&db, key), ); } let scanner = Arc::new(RwLock::new(Scanner { - coin, + network, db, keys: keys.clone(), @@ -360,7 +360,7 @@ impl Scanner { (ScannerHandle { scanner, events: events_recv }, keys) } - fn emit(&mut self, event: ScannerEvent) -> bool { + fn emit(&mut self, event: ScannerEvent) -> bool { if self.events.send(event).is_err() { info!("Scanner handler was dropped. Shutting down?"); return false; @@ -377,11 +377,11 @@ impl Scanner { // Scan new blocks { let mut scanner = scanner.write().await; - let latest = scanner.coin.get_latest_block_number().await; + let latest = scanner.network.get_latest_block_number().await; let latest = match latest { // Only scan confirmed blocks, which we consider effectively finalized // CONFIRMATIONS - 1 as whatever's in the latest block already has 1 confirm - Ok(latest) => latest.saturating_sub(C::CONFIRMATIONS.saturating_sub(1)), + Ok(latest) => latest.saturating_sub(N::CONFIRMATIONS.saturating_sub(1)), Err(_) => { warn!("couldn't get latest block number"); sleep(Duration::from_secs(60)).await; @@ -396,7 +396,7 @@ impl Scanner { for i in (latest_scanned + 1) ..= latest { // TODO2: Check for key deprecation - let block = match scanner.coin.get_block(i).await { + let block = match scanner.network.get_block(i).await { Ok(block) => block, Err(_) => { warn!("couldn't get block {i}"); @@ -409,14 +409,14 @@ impl Scanner { // only written to/read by this thread // There's also no error caused by them being unexpectedly written (if the commit is // made and then the processor suddenly reboots) - if let Some(id) = ScannerDb::::block(&scanner.db, i) { + if let Some(id) = ScannerDb::::block(&scanner.db, i) { if id != block_id { panic!("reorg'd from finalized {} to {}", hex::encode(id), hex::encode(block_id)); } } else { info!("Found new block: {}", hex::encode(&block_id)); - if let Some(id) = ScannerDb::::block(&scanner.db, i.saturating_sub(1)) { + if let Some(id) = ScannerDb::::block(&scanner.db, i.saturating_sub(1)) { if id != block.parent() { panic!( "block {} doesn't build off expected parent {}", @@ -427,15 +427,16 @@ impl Scanner { } let mut txn = scanner.db.txn(); - ScannerDb::::save_block(&mut txn, i, &block_id); + ScannerDb::::save_block(&mut txn, i, &block_id); txn.commit(); } - // Clone coin because we can't borrow it while also mutably borrowing the eventualities - // Thankfully, coin is written to be a cheap clone - let coin = scanner.coin.clone(); + // Clone network because we can't borrow it while also mutably borrowing the + // eventualities + // Thankfully, network is written to be a cheap clone + let network = scanner.network.clone(); for (id, tx) in - coin.get_eventuality_completions(&mut scanner.eventualities, &block).await + network.get_eventuality_completions(&mut scanner.eventualities, &block).await { // This should only happen if there's a P2P net desync or there's a malicious // validator @@ -450,7 +451,7 @@ impl Scanner { } } - let outputs = match scanner.coin.get_outputs(&block, key).await { + let outputs = match scanner.network.get_outputs(&block, key).await { Ok(outputs) => outputs, Err(_) => { warn!("Couldn't scan block {i}"); @@ -499,7 +500,7 @@ impl Scanner { TODO: Only update ram_outputs after committing the TXN in question. */ - let seen = ScannerDb::::seen(&scanner.db, &id); + let seen = ScannerDb::::seen(&scanner.db, &id); let id = id.as_ref().to_vec(); if seen || scanner.ram_outputs.contains(&id) { panic!("scanned an output multiple times"); @@ -513,7 +514,7 @@ impl Scanner { // Save the outputs to disk let mut txn = scanner.db.txn(); - let batch = ScannerDb::::save_outputs(&mut txn, &key, &block_id, &outputs); + let batch = ScannerDb::::save_outputs(&mut txn, &key, &block_id, &outputs); txn.commit(); // Send all outputs diff --git a/processor/src/scheduler.rs b/processor/src/scheduler.rs index faa9e44e..e441a71e 100644 --- a/processor/src/scheduler.rs +++ b/processor/src/scheduler.rs @@ -6,14 +6,14 @@ use std::{ use ciphersuite::{group::GroupEncoding, Ciphersuite}; use crate::{ - coins::{Output, Coin}, + networks::{Output, Network}, DbTxn, Db, Payment, Plan, }; /// Stateless, deterministic output/payment manager. #[derive(PartialEq, Eq, Debug)] -pub struct Scheduler { - key: ::G, +pub struct Scheduler { + key: ::G, // Serai, when it has more outputs expected than it can handle in a single tranaction, will // schedule the outputs to be handled later. Immediately, it just creates additional outputs @@ -31,22 +31,22 @@ pub struct Scheduler { // output actually has, and it'll be moved into plans // // TODO2: Consider edge case where branch/change isn't mined yet keys are deprecated - queued_plans: HashMap>>>, - plans: HashMap>>>, + queued_plans: HashMap>>>, + plans: HashMap>>>, // UTXOs available - utxos: Vec, + utxos: Vec, // Payments awaiting scheduling due to the output availability problem - payments: VecDeque>, + payments: VecDeque>, } fn scheduler_key(key: &G) -> Vec { D::key(b"SCHEDULER", b"scheduler", key.to_bytes()) } -impl Scheduler { - fn read(key: ::G, reader: &mut R) -> io::Result { +impl Scheduler { + fn read(key: ::G, reader: &mut R) -> io::Result { let mut read_plans = || -> io::Result<_> { let mut all_plans = HashMap::new(); let mut all_plans_len = [0; 4]; @@ -80,7 +80,7 @@ impl Scheduler { let mut utxos_len = [0; 4]; reader.read_exact(&mut utxos_len)?; for _ in 0 .. u32::from_le_bytes(utxos_len) { - utxos.push(C::Output::read(reader)?); + utxos.push(N::Output::read(reader)?); } let mut payments = VecDeque::new(); @@ -99,7 +99,7 @@ impl Scheduler { fn serialize(&self) -> Vec { let mut res = Vec::with_capacity(4096); - let mut write_plans = |plans: &HashMap>>>| { + let mut write_plans = |plans: &HashMap>>>| { res.extend(u32::try_from(plans.len()).unwrap().to_le_bytes()); for (amount, list_of_plans) in plans { res.extend(amount.to_le_bytes()); @@ -129,7 +129,7 @@ impl Scheduler { res } - pub fn new(txn: &mut D::Transaction<'_>, key: ::G) -> Self { + pub fn new(txn: &mut D::Transaction<'_>, key: ::G) -> Self { let res = Scheduler { key, queued_plans: HashMap::new(), @@ -142,7 +142,7 @@ impl Scheduler { res } - pub fn from_db(db: &D, key: ::G) -> io::Result { + pub fn from_db(db: &D, key: ::G) -> io::Result { let scheduler = db.get(scheduler_key::(&key)).unwrap_or_else(|| { panic!("loading scheduler from DB without scheduler for {}", hex::encode(key.to_bytes())) }); @@ -152,10 +152,10 @@ impl Scheduler { Self::read(key, reader) } - fn execute(&mut self, inputs: Vec, mut payments: Vec>) -> Plan { - // This must be equal to plan.key due to how coins detect they created outputs which are to + fn execute(&mut self, inputs: Vec, mut payments: Vec>) -> Plan { + // This must be equal to plan.key due to how networks detect they created outputs which are to // the branch address - let branch_address = C::branch_address(self.key); + let branch_address = N::branch_address(self.key); // created_output will be called any time we send to a branch address // If it's called, and it wasn't expecting to be called, that's almost certainly an error // The only way it wouldn't be is if someone on Serai triggered a burn to a branch, which is @@ -166,10 +166,10 @@ impl Scheduler { payments.drain(..).filter(|payment| payment.address != branch_address).collect::>(); let mut change = false; - let mut max = C::MAX_OUTPUTS; + let mut max = N::MAX_OUTPUTS; let payment_amounts = - |payments: &Vec>| payments.iter().map(|payment| payment.amount).sum::(); + |payments: &Vec>| payments.iter().map(|payment| payment.amount).sum::(); // Requires a change output if inputs.iter().map(Output::amount).sum::() != payment_amounts(&payments) { @@ -192,9 +192,9 @@ impl Scheduler { // If this was perfect, the heaviest branch would have 1 branch of 3 leaves and 15 leaves while payments.len() > max { // The resulting TX will have the remaining payments and a new branch payment - let to_remove = (payments.len() + 1) - C::MAX_OUTPUTS; + let to_remove = (payments.len() + 1) - N::MAX_OUTPUTS; // Don't remove more than possible - let to_remove = to_remove.min(C::MAX_OUTPUTS); + let to_remove = to_remove.min(N::MAX_OUTPUTS); // Create the plan let removed = payments.drain((payments.len() - to_remove) ..).collect::>(); @@ -211,7 +211,7 @@ impl Scheduler { Plan { key: self.key, inputs, payments, change: Some(self.key).filter(|_| change) } } - fn add_outputs(&mut self, mut utxos: Vec) -> Vec> { + fn add_outputs(&mut self, mut utxos: Vec) -> Vec> { log::info!("adding {} outputs", utxos.len()); let mut txs = vec![]; @@ -247,9 +247,9 @@ impl Scheduler { pub fn schedule( &mut self, txn: &mut D::Transaction<'_>, - utxos: Vec, - payments: Vec>, - ) -> Vec> { + utxos: Vec, + payments: Vec>, + ) -> Vec> { let mut plans = self.add_outputs(utxos); log::info!("scheduling {} new payments", payments.len()); @@ -275,7 +275,7 @@ impl Scheduler { // Since we do multiple aggregation TXs at once, this will execute in logarithmic time let utxos = self.utxos.drain(..).collect::>(); let mut utxo_chunks = - utxos.chunks(C::MAX_INPUTS).map(|chunk| chunk.to_vec()).collect::>(); + utxos.chunks(N::MAX_INPUTS).map(|chunk| chunk.to_vec()).collect::>(); // Use the first chunk for any scheduled payments, since it has the most value let utxos = utxo_chunks.remove(0); @@ -294,7 +294,7 @@ impl Scheduler { // TODO: While payments have their TXs' fees deducted from themselves, that doesn't hold here // We need to charge a fee before reporting incoming UTXOs to Substrate to cover aggregation // TXs - log::debug!("aggregating a chunk of {} inputs", C::MAX_INPUTS); + log::debug!("aggregating a chunk of {} inputs", N::MAX_INPUTS); plans.push(Plan { key: self.key, inputs: chunk, payments: vec![], change: Some(self.key) }) } @@ -303,7 +303,7 @@ impl Scheduler { // If we can't fulfill the next payment, we have encountered an instance of the UTXO // availability problem - // This shows up in coins like Monero, where because we spent outputs, our change has yet to + // This shows up in networks like Monero, where because we spent outputs, our change has yet to // re-appear. Since it has yet to re-appear, we only operate with a balance which is a subset // of our total balance // Despite this, we may be order to fulfill a payment which is our total balance @@ -369,7 +369,7 @@ impl Scheduler { }; // Amortize the fee amongst all payments - // While some coins, like Ethereum, may have some payments take notably more gas, those + // While some networks, like Ethereum, may have some payments take notably more gas, those // payments will have their own gas deducted when they're created. The difference in output // value present here is solely the cost of the branch, which is used for all of these // payments, regardless of how much they'll end up costing @@ -387,7 +387,7 @@ impl Scheduler { // Drop payments now below the dust threshold let payments = - payments.drain(..).filter(|payment| payment.amount >= C::DUST).collect::>(); + payments.drain(..).filter(|payment| payment.amount >= N::DUST).collect::>(); // Sanity check this was done properly assert!(actual >= payments.iter().map(|payment| payment.amount).sum::()); if payments.is_empty() { diff --git a/processor/src/signer.rs b/processor/src/signer.rs index 0199a36e..673a4d45 100644 --- a/processor/src/signer.rs +++ b/processor/src/signer.rs @@ -14,18 +14,18 @@ use log::{info, debug, warn, error}; use messages::sign::*; use crate::{ Get, DbTxn, Db, - coins::{Transaction, Eventuality, Coin}, + networks::{Transaction, Eventuality, Network}, }; #[derive(Debug)] -pub enum SignerEvent { - SignedTransaction { id: [u8; 32], tx: >::Id }, +pub enum SignerEvent { + SignedTransaction { id: [u8; 32], tx: >::Id }, ProcessorMessage(ProcessorMessage), } #[derive(Debug)] -struct SignerDb(D, PhantomData); -impl SignerDb { +struct SignerDb(D, PhantomData); +impl SignerDb { fn sign_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { D::key(b"SIGNER", dst, key) } @@ -36,7 +36,7 @@ impl SignerDb { fn complete( txn: &mut D::Transaction<'_>, id: [u8; 32], - tx: &>::Id, + tx: &>::Id, ) { // Transactions can be completed by multiple signatures // Save every solution in order to be robust @@ -64,12 +64,12 @@ impl SignerDb { fn eventuality_key(id: [u8; 32]) -> Vec { Self::sign_key(b"eventuality", id) } - fn save_eventuality(txn: &mut D::Transaction<'_>, id: [u8; 32], eventuality: C::Eventuality) { + fn save_eventuality(txn: &mut D::Transaction<'_>, id: [u8; 32], eventuality: N::Eventuality) { txn.put(Self::eventuality_key(id), eventuality.serialize()); } - fn eventuality(getter: &G, id: [u8; 32]) -> Option { + fn eventuality(getter: &G, id: [u8; 32]) -> Option { Some( - C::Eventuality::read::<&[u8]>(&mut getter.get(Self::eventuality_key(id))?.as_ref()).unwrap(), + N::Eventuality::read::<&[u8]>(&mut getter.get(Self::eventuality_key(id))?.as_ref()).unwrap(), ) } @@ -83,49 +83,49 @@ impl SignerDb { getter.get(Self::attempt_key(id)).is_some() } - fn save_transaction(txn: &mut D::Transaction<'_>, tx: &C::Transaction) { + fn save_transaction(txn: &mut D::Transaction<'_>, tx: &N::Transaction) { txn.put(Self::sign_key(b"tx", tx.id()), tx.serialize()); } } -pub struct Signer { +pub struct Signer { db: PhantomData, - coin: C, + network: N, - keys: ThresholdKeys, + keys: ThresholdKeys, - signable: HashMap<[u8; 32], C::SignableTransaction>, + signable: HashMap<[u8; 32], N::SignableTransaction>, attempt: HashMap<[u8; 32], u32>, - preprocessing: HashMap<[u8; 32], ::SignMachine>, + preprocessing: HashMap<[u8; 32], ::SignMachine>, #[allow(clippy::type_complexity)] signing: HashMap< [u8; 32], < - ::SignMachine as SignMachine + ::SignMachine as SignMachine >::SignatureMachine, >, - pub events: VecDeque>, + pub events: VecDeque>, } -impl fmt::Debug for Signer { +impl fmt::Debug for Signer { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("Signer") - .field("coin", &self.coin) + .field("network", &self.network) .field("signable", &self.signable) .field("attempt", &self.attempt) .finish_non_exhaustive() } } -impl Signer { - pub fn new(coin: C, keys: ThresholdKeys) -> Signer { +impl Signer { + pub fn new(network: N, keys: ThresholdKeys) -> Signer { Signer { db: PhantomData, - coin, + network, keys, @@ -138,7 +138,7 @@ impl Signer { } } - pub fn keys(&self) -> ThresholdKeys { + pub fn keys(&self) -> ThresholdKeys { self.keys.clone() } @@ -173,7 +173,7 @@ impl Signer { } fn already_completed(&self, txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool { - if SignerDb::::completed(txn, id).is_some() { + if SignerDb::::completed(txn, id).is_some() { debug!( "SignTransaction/Reattempt order for {}, which we've already completed signing", hex::encode(id) @@ -185,7 +185,7 @@ impl Signer { } } - fn complete(&mut self, id: [u8; 32], tx_id: >::Id) { + fn complete(&mut self, id: [u8; 32], tx_id: >::Id) { // Assert we're actively signing for this TX assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for"); assert!(self.attempt.remove(&id).is_some(), "attempt had an ID signable didn't have"); @@ -205,14 +205,14 @@ impl Signer { &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], - tx_id: &>::Id, + tx_id: &>::Id, ) { - if let Some(eventuality) = SignerDb::::eventuality(txn, id) { + if let Some(eventuality) = SignerDb::::eventuality(txn, id) { // Transaction hasn't hit our mempool/was dropped for a different signature // The latter can happen given certain latency conditions/a single malicious signer // In the case of a single malicious signer, they can drag multiple honest // validators down with them, so we unfortunately can't slash on this case - let Ok(tx) = self.coin.get_transaction(tx_id).await else { + let Ok(tx) = self.network.get_transaction(tx_id).await else { warn!( "a validator claimed {} completed {} yet we didn't have that TX in our mempool", hex::encode(tx_id), @@ -221,14 +221,14 @@ impl Signer { return; }; - if self.coin.confirm_completion(&eventuality, &tx) { + if self.network.confirm_completion(&eventuality, &tx) { info!("eventuality for {} resolved in TX {}", hex::encode(id), hex::encode(tx_id)); let first_completion = !self.already_completed(txn, id); // Save this completion to the DB - SignerDb::::save_transaction(txn, &tx); - SignerDb::::complete(txn, id, tx_id); + SignerDb::::save_transaction(txn, &tx); + SignerDb::::complete(txn, id, tx_id); if first_completion { self.complete(id, tx.id()); @@ -298,7 +298,7 @@ impl Signer { // branch again for something we've already attempted // // Only run if this hasn't already been attempted - if SignerDb::::has_attempt(txn, &id) { + if SignerDb::::has_attempt(txn, &id) { warn!( "already attempted {} #{}. this is an error if we didn't reboot", hex::encode(id.id), @@ -307,10 +307,10 @@ impl Signer { return; } - SignerDb::::attempt(txn, &id); + SignerDb::::attempt(txn, &id); // Attempt to create the TX - let machine = match self.coin.attempt_send(tx).await { + let machine = match self.network.attempt_send(tx).await { Err(e) => { error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e); return; @@ -336,14 +336,14 @@ impl Signer { &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], - tx: C::SignableTransaction, - eventuality: C::Eventuality, + tx: N::SignableTransaction, + eventuality: N::Eventuality, ) { if self.already_completed(txn, id) { return; } - SignerDb::::save_eventuality(txn, id, eventuality); + SignerDb::::save_eventuality(txn, id, eventuality); self.signable.insert(id, tx); self.attempt(txn, id, 0).await; @@ -445,12 +445,12 @@ impl Signer { }; // Save the transaction in case it's needed for recovery - SignerDb::::save_transaction(txn, &tx); + SignerDb::::save_transaction(txn, &tx); let tx_id = tx.id(); - SignerDb::::complete(txn, id.id, &tx_id); + SignerDb::::complete(txn, id.id, &tx_id); // Publish it - if let Err(e) = self.coin.publish_transaction(&tx).await { + if let Err(e) = self.network.publish_transaction(&tx).await { error!("couldn't publish {:?}: {:?}", tx, e); } else { info!("published {} for plan {}", hex::encode(&tx_id), hex::encode(id.id)); @@ -465,7 +465,7 @@ impl Signer { } CoordinatorMessage::Completed { key: _, id, tx: mut tx_vec } => { - let mut tx = >::Id::default(); + let mut tx = >::Id::default(); if tx.as_ref().len() != tx_vec.len() { let true_len = tx_vec.len(); tx_vec.truncate(2 * tx.as_ref().len()); diff --git a/processor/src/tests/addresses.rs b/processor/src/tests/addresses.rs index 4d114dec..bd134278 100644 --- a/processor/src/tests/addresses.rs +++ b/processor/src/tests/addresses.rs @@ -11,18 +11,18 @@ use serai_db::{DbTxn, MemDb}; use crate::{ Plan, Db, - coins::{OutputType, Output, Block, Coin}, + networks::{OutputType, Output, Block, Network}, scanner::{ScannerEvent, Scanner, ScannerHandle}, tests::sign, }; -async fn spend( - coin: &C, - keys: &HashMap>, - scanner: &mut ScannerHandle, +async fn spend( + network: &N, + keys: &HashMap>, + scanner: &mut ScannerHandle, batch: u32, - outputs: Vec, -) -> Vec { + outputs: Vec, +) -> Vec { let key = keys[&Participant::new(1).unwrap()].group_key(); let mut keys_txs = HashMap::new(); @@ -31,13 +31,13 @@ async fn spend( *i, ( keys.clone(), - coin + network .prepare_send( keys.clone(), - coin.get_latest_block_number().await.unwrap() - C::CONFIRMATIONS, + network.get_latest_block_number().await.unwrap() - N::CONFIRMATIONS, // Send to a change output Plan { key, inputs: outputs.clone(), payments: vec![], change: Some(key) }, - coin.get_fee().await, + network.get_fee().await, ) .await .unwrap() @@ -46,10 +46,10 @@ async fn spend( ), ); } - sign(coin.clone(), keys_txs).await; + sign(network.clone(), keys_txs).await; - for _ in 0 .. C::CONFIRMATIONS { - coin.mine_block().await; + for _ in 0 .. N::CONFIRMATIONS { + network.mine_block().await; } match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { key: this_key, block: _, batch: this_batch, outputs } => { @@ -66,27 +66,27 @@ async fn spend( } } -pub async fn test_addresses(coin: C) { - let mut keys = frost::tests::key_gen::<_, C::Curve>(&mut OsRng); +pub async fn test_addresses(network: N) { + let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng); for (_, keys) in keys.iter_mut() { - C::tweak_keys(keys); + N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); // Mine blocks so there's a confirmed block - for _ in 0 .. C::CONFIRMATIONS { - coin.mine_block().await; + for _ in 0 .. N::CONFIRMATIONS { + network.mine_block().await; } let mut db = MemDb::new(); - let (mut scanner, active_keys) = Scanner::new(coin.clone(), db.clone()); + let (mut scanner, active_keys) = Scanner::new(network.clone(), db.clone()); assert!(active_keys.is_empty()); let mut txn = db.txn(); - scanner.rotate_key(&mut txn, coin.get_latest_block_number().await.unwrap(), key).await; + scanner.rotate_key(&mut txn, network.get_latest_block_number().await.unwrap(), key).await; txn.commit(); // Receive funds to the branch address and make sure it's properly identified - let block_id = coin.test_send(C::branch_address(key)).await.id(); + let block_id = network.test_send(N::branch_address(key)).await.id(); // Verify the Scanner picked them up let outputs = @@ -105,7 +105,7 @@ pub async fn test_addresses(coin: C) { }; // Spend the branch output, creating a change output and ensuring we actually get change - let outputs = spend(&coin, &keys, &mut scanner, 1, outputs).await; + let outputs = spend(&network, &keys, &mut scanner, 1, outputs).await; // Also test spending the change output - spend(&coin, &keys, &mut scanner, 2, outputs).await; + spend(&network, &keys, &mut scanner, 2, outputs).await; } diff --git a/processor/src/tests/key_gen.rs b/processor/src/tests/key_gen.rs index 52cd710e..2b083ad4 100644 --- a/processor/src/tests/key_gen.rs +++ b/processor/src/tests/key_gen.rs @@ -17,14 +17,14 @@ use serai_client::{ use messages::key_gen::*; use crate::{ - coins::Coin, + networks::Network, key_gen::{KeyConfirmed, KeyGen}, }; const ID: KeyGenId = KeyGenId { set: ValidatorSet { session: Session(1), network: NetworkId::Monero }, attempt: 3 }; -pub async fn test_key_gen() { +pub async fn test_key_gen() { let mut entropies = HashMap::new(); let mut dbs = HashMap::new(); let mut key_gens = HashMap::new(); @@ -34,7 +34,7 @@ pub async fn test_key_gen() { entropies.insert(i, entropy); let db = MemDb::new(); dbs.insert(i, db.clone()); - key_gens.insert(i, KeyGen::::new(db, entropies[&i].clone())); + key_gens.insert(i, KeyGen::::new(db, entropies[&i].clone())); } let mut all_commitments = HashMap::new(); @@ -65,7 +65,7 @@ pub async fn test_key_gen() { // 3 ... are rebuilt once, one at each of the following steps let rebuild = |key_gens: &mut HashMap<_, _>, dbs: &HashMap<_, MemDb>, i| { key_gens.remove(&i); - key_gens.insert(i, KeyGen::::new(dbs[&i].clone(), entropies[&i].clone())); + key_gens.insert(i, KeyGen::::new(dbs[&i].clone(), entropies[&i].clone())); }; rebuild(&mut key_gens, &dbs, 1); rebuild(&mut key_gens, &dbs, 2); @@ -102,7 +102,7 @@ pub async fn test_key_gen() { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); - if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, coin_key } = key_gen + if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } = key_gen .handle( &mut txn, CoordinatorMessage::Shares { @@ -117,9 +117,9 @@ pub async fn test_key_gen() { { assert_eq!(id, ID); if res.is_none() { - res = Some((substrate_key, coin_key.clone())); + res = Some((substrate_key, network_key.clone())); } - assert_eq!(res.as_ref().unwrap(), &(substrate_key, coin_key)); + assert_eq!(res.as_ref().unwrap(), &(substrate_key, network_key)); } else { panic!("didn't get key back"); } @@ -134,7 +134,7 @@ pub async fn test_key_gen() { for i in 1 ..= 5 { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); - let KeyConfirmed { substrate_keys, coin_keys } = key_gen + let KeyConfirmed { substrate_keys, network_keys } = key_gen .confirm(&mut txn, ID.set, (sr25519::Public(res.0), res.1.clone().try_into().unwrap())) .await; txn.commit(); @@ -142,9 +142,12 @@ pub async fn test_key_gen() { let params = ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()).unwrap(); assert_eq!(substrate_keys.params(), params); - assert_eq!(coin_keys.params(), params); + assert_eq!(network_keys.params(), params); assert_eq!( - (substrate_keys.group_key().to_bytes(), coin_keys.group_key().to_bytes().as_ref().to_vec()), + ( + substrate_keys.group_key().to_bytes(), + network_keys.group_key().to_bytes().as_ref().to_vec() + ), res ); } diff --git a/processor/src/tests/literal/mod.rs b/processor/src/tests/literal/mod.rs index 8b749f85..d9494361 100644 --- a/processor/src/tests/literal/mod.rs +++ b/processor/src/tests/literal/mod.rs @@ -1,6 +1,6 @@ #[cfg(feature = "bitcoin")] mod bitcoin { - use crate::coins::Bitcoin; + use crate::networks::Bitcoin; async fn bitcoin() -> Bitcoin { let bitcoin = Bitcoin::new("http://serai:seraidex@127.0.0.1:18443".to_string()).await; @@ -8,7 +8,7 @@ mod bitcoin { bitcoin } - test_coin!( + test_network!( Bitcoin, bitcoin, bitcoin_key_gen, @@ -21,7 +21,7 @@ mod bitcoin { #[cfg(feature = "monero")] mod monero { - use crate::coins::{Coin, Monero}; + use crate::networks::{Network, Monero}; async fn monero() -> Monero { let monero = Monero::new("http://127.0.0.1:18081".to_string()); @@ -31,7 +31,7 @@ mod monero { monero } - test_coin!( + test_network!( Monero, monero, monero_key_gen, diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index 5160d96b..084d60b3 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -50,10 +50,10 @@ macro_rules! async_sequential { } #[macro_export] -macro_rules! test_coin { +macro_rules! test_network { ( - $C: ident, - $coin: ident, + $N: ident, + $network: ident, $key_gen: ident, $scanner: ident, $signer: ident, @@ -65,32 +65,32 @@ macro_rules! test_coin { // This doesn't interact with a node and accordingly doesn't need to be run sequentially #[tokio::test] async fn $key_gen() { - test_key_gen::<$C>().await; + test_key_gen::<$N>().await; } sequential!(); async_sequential! { async fn $scanner() { - test_scanner($coin().await).await; + test_scanner($network().await).await; } } async_sequential! { async fn $signer() { - test_signer($coin().await).await; + test_signer($network().await).await; } } async_sequential! { async fn $wallet() { - test_wallet($coin().await).await; + test_wallet($network().await).await; } } async_sequential! { async fn $addresses() { - test_addresses($coin().await).await; + test_addresses($network().await).await; } } }; diff --git a/processor/src/tests/scanner.rs b/processor/src/tests/scanner.rs index bdfb3782..799e365b 100644 --- a/processor/src/tests/scanner.rs +++ b/processor/src/tests/scanner.rs @@ -12,27 +12,27 @@ use serai_client::primitives::BlockHash; use serai_db::{DbTxn, Db, MemDb}; use crate::{ - coins::{OutputType, Output, Block, Coin}, + networks::{OutputType, Output, Block, Network}, scanner::{ScannerEvent, Scanner, ScannerHandle}, }; -pub async fn test_scanner(coin: C) { +pub async fn test_scanner(network: N) { let mut keys = - frost::tests::key_gen::<_, C::Curve>(&mut OsRng).remove(&Participant::new(1).unwrap()).unwrap(); - C::tweak_keys(&mut keys); + frost::tests::key_gen::<_, N::Curve>(&mut OsRng).remove(&Participant::new(1).unwrap()).unwrap(); + N::tweak_keys(&mut keys); let group_key = keys.group_key(); // Mine blocks so there's a confirmed block - for _ in 0 .. C::CONFIRMATIONS { - coin.mine_block().await; + for _ in 0 .. N::CONFIRMATIONS { + network.mine_block().await; } let first = Arc::new(Mutex::new(true)); - let activation_number = coin.get_latest_block_number().await.unwrap(); + let activation_number = network.get_latest_block_number().await.unwrap(); let db = MemDb::new(); let new_scanner = || async { let mut db = db.clone(); - let (mut scanner, active_keys) = Scanner::new(coin.clone(), db.clone()); + let (mut scanner, active_keys) = Scanner::new(network.clone(), db.clone()); let mut first = first.lock().unwrap(); if *first { assert!(active_keys.is_empty()); @@ -48,11 +48,11 @@ pub async fn test_scanner(coin: C) { let scanner = new_scanner().await; // Receive funds - let block = coin.test_send(C::address(keys.group_key())).await; + let block = network.test_send(N::address(keys.group_key())).await; let block_id = block.id(); // Verify the Scanner picked them up - let verify_event = |mut scanner: ScannerHandle| async { + let verify_event = |mut scanner: ScannerHandle| async { let outputs = match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { key, block, batch, outputs } => { @@ -80,7 +80,7 @@ pub async fn test_scanner(coin: C) { let mut blocks = vec![]; let mut curr_block = activation_number + 1; loop { - let block = coin.get_block(curr_block).await.unwrap().id(); + let block = network.get_block(curr_block).await.unwrap().id(); blocks.push(BlockHash(block.as_ref().try_into().unwrap())); if block == block_id { break; diff --git a/processor/src/tests/signer.rs b/processor/src/tests/signer.rs index 168c7a56..1e0c53b8 100644 --- a/processor/src/tests/signer.rs +++ b/processor/src/tests/signer.rs @@ -13,18 +13,18 @@ use serai_db::{DbTxn, Db, MemDb}; use messages::sign::*; use crate::{ Payment, Plan, - coins::{Output, Transaction, Coin}, + networks::{Output, Transaction, Network}, signer::{SignerEvent, Signer}, }; #[allow(clippy::type_complexity)] -pub async fn sign( - coin: C, +pub async fn sign( + network: N, mut keys_txs: HashMap< Participant, - (ThresholdKeys, (C::SignableTransaction, C::Eventuality)), + (ThresholdKeys, (N::SignableTransaction, N::Eventuality)), >, -) -> >::Id { +) -> >::Id { let actual_id = SignId { key: keys_txs[&Participant::new(1).unwrap()].0.group_key().to_bytes().as_ref().to_vec(), id: [0xaa; 32], @@ -45,7 +45,7 @@ pub async fn sign( let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); let keys = keys.remove(&i).unwrap(); t = keys.params().t(); - signers.insert(i, Signer::<_, MemDb>::new(coin.clone(), keys)); + signers.insert(i, Signer::<_, MemDb>::new(network.clone(), keys)); dbs.insert(i, MemDb::new()); } drop(keys); @@ -146,29 +146,29 @@ pub async fn sign( tx_id.unwrap() } -pub async fn test_signer(coin: C) { +pub async fn test_signer(network: N) { let mut keys = key_gen(&mut OsRng); for (_, keys) in keys.iter_mut() { - C::tweak_keys(keys); + N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); - let outputs = coin.get_outputs(&coin.test_send(C::address(key)).await, key).await.unwrap(); - let sync_block = coin.get_latest_block_number().await.unwrap() - C::CONFIRMATIONS; - let fee = coin.get_fee().await; + let outputs = network.get_outputs(&network.test_send(N::address(key)).await, key).await.unwrap(); + let sync_block = network.get_latest_block_number().await.unwrap() - N::CONFIRMATIONS; + let fee = network.get_fee().await; - let amount = 2 * C::DUST; + let amount = 2 * N::DUST; let mut keys_txs = HashMap::new(); let mut eventualities = vec![]; for (i, keys) in keys.drain() { - let (signable, eventuality) = coin + let (signable, eventuality) = network .prepare_send( keys.clone(), sync_block, Plan { key, inputs: outputs.clone(), - payments: vec![Payment { address: C::address(key), data: None, amount }], + payments: vec![Payment { address: N::address(key), data: None, amount }], change: Some(key), }, fee, @@ -184,23 +184,26 @@ pub async fn test_signer(coin: C) { // The signer may not publish the TX if it has a connection error // It doesn't fail in this case - let txid = sign(coin.clone(), keys_txs).await; - let tx = coin.get_transaction(&txid).await.unwrap(); + let txid = sign(network.clone(), keys_txs).await; + let tx = network.get_transaction(&txid).await.unwrap(); assert_eq!(tx.id(), txid); // Mine a block, and scan it, to ensure that the TX actually made it on chain - coin.mine_block().await; - let outputs = coin - .get_outputs(&coin.get_block(coin.get_latest_block_number().await.unwrap()).await.unwrap(), key) + network.mine_block().await; + let outputs = network + .get_outputs( + &network.get_block(network.get_latest_block_number().await.unwrap()).await.unwrap(), + key, + ) .await .unwrap(); assert_eq!(outputs.len(), 2); // Adjust the amount for the fees - let amount = amount - tx.fee(&coin).await; + let amount = amount - tx.fee(&network).await; // Check either output since Monero will randomize its output order assert!((outputs[0].amount() == amount) || (outputs[1].amount() == amount)); // Check the eventualities pass for eventuality in eventualities { - assert!(coin.confirm_completion(&eventuality, &tx)); + assert!(network.confirm_completion(&eventuality, &tx)); } } diff --git a/processor/src/tests/wallet.rs b/processor/src/tests/wallet.rs index 9bd1b157..324e631f 100644 --- a/processor/src/tests/wallet.rs +++ b/processor/src/tests/wallet.rs @@ -10,29 +10,29 @@ use serai_db::{DbTxn, Db, MemDb}; use crate::{ Payment, Plan, - coins::{Output, Transaction, Block, Coin}, + networks::{Output, Transaction, Block, Network}, scanner::{ScannerEvent, Scanner}, scheduler::Scheduler, tests::sign, }; // Tests the Scanner, Scheduler, and Signer together -pub async fn test_wallet(coin: C) { +pub async fn test_wallet(network: N) { let mut keys = key_gen(&mut OsRng); for (_, keys) in keys.iter_mut() { - C::tweak_keys(keys); + N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); let mut db = MemDb::new(); - let (mut scanner, active_keys) = Scanner::new(coin.clone(), db.clone()); + let (mut scanner, active_keys) = Scanner::new(network.clone(), db.clone()); assert!(active_keys.is_empty()); let (block_id, outputs) = { let mut txn = db.txn(); - scanner.rotate_key(&mut txn, coin.get_latest_block_number().await.unwrap(), key).await; + scanner.rotate_key(&mut txn, network.get_latest_block_number().await.unwrap(), key).await; txn.commit(); - let block = coin.test_send(C::address(key)).await; + let block = network.test_send(N::address(key)).await; let block_id = block.id(); match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { @@ -51,11 +51,11 @@ pub async fn test_wallet(coin: C) { let mut txn = db.txn(); let mut scheduler = Scheduler::new::(&mut txn, key); - let amount = 2 * C::DUST; + let amount = 2 * N::DUST; let plans = scheduler.schedule::( &mut txn, outputs.clone(), - vec![Payment { address: C::address(key), data: None, amount }], + vec![Payment { address: N::address(key), data: None, amount }], ); txn.commit(); assert_eq!( @@ -63,7 +63,7 @@ pub async fn test_wallet(coin: C) { vec![Plan { key, inputs: outputs.clone(), - payments: vec![Payment { address: C::address(key), data: None, amount }], + payments: vec![Payment { address: N::address(key), data: None, amount }], change: Some(key), }] ); @@ -71,16 +71,16 @@ pub async fn test_wallet(coin: C) { { let mut buf = vec![]; plans[0].write(&mut buf).unwrap(); - assert_eq!(plans[0], Plan::::read::<&[u8]>(&mut buf.as_ref()).unwrap()); + assert_eq!(plans[0], Plan::::read::<&[u8]>(&mut buf.as_ref()).unwrap()); } // Execute the plan - let fee = coin.get_fee().await; + let fee = network.get_fee().await; let mut keys_txs = HashMap::new(); let mut eventualities = vec![]; for (i, keys) in keys.drain() { - let (signable, eventuality) = coin - .prepare_send(keys.clone(), coin.get_block_number(&block_id).await, plans[0].clone(), fee) + let (signable, eventuality) = network + .prepare_send(keys.clone(), network.get_block_number(&block_id).await, plans[0].clone(), fee) .await .unwrap() .0 @@ -90,23 +90,23 @@ pub async fn test_wallet(coin: C) { keys_txs.insert(i, (keys, (signable, eventuality))); } - let txid = sign(coin.clone(), keys_txs).await; - let tx = coin.get_transaction(&txid).await.unwrap(); - coin.mine_block().await; - let block_number = coin.get_latest_block_number().await.unwrap(); - let block = coin.get_block(block_number).await.unwrap(); + let txid = sign(network.clone(), keys_txs).await; + let tx = network.get_transaction(&txid).await.unwrap(); + network.mine_block().await; + let block_number = network.get_latest_block_number().await.unwrap(); + let block = network.get_block(block_number).await.unwrap(); let first_outputs = outputs; - let outputs = coin.get_outputs(&block, key).await.unwrap(); + let outputs = network.get_outputs(&block, key).await.unwrap(); assert_eq!(outputs.len(), 2); - let amount = amount - tx.fee(&coin).await; + let amount = amount - tx.fee(&network).await; assert!((outputs[0].amount() == amount) || (outputs[1].amount() == amount)); for eventuality in eventualities { - assert!(coin.confirm_completion(&eventuality, &tx)); + assert!(network.confirm_completion(&eventuality, &tx)); } - for _ in 1 .. C::CONFIRMATIONS { - coin.mine_block().await; + for _ in 1 .. N::CONFIRMATIONS { + network.mine_block().await; } match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { diff --git a/substrate/client/Cargo.toml b/substrate/client/Cargo.toml index 03f59fbe..4a529b31 100644 --- a/substrate/client/Cargo.toml +++ b/substrate/client/Cargo.toml @@ -43,9 +43,9 @@ tokio = "1" [features] serai = ["thiserror", "scale-info", "subxt"] -coins = [] -bitcoin = ["coins", "dep:bitcoin"] -monero = ["coins", "ciphersuite/ed25519", "monero-serai"] +networks = [] +bitcoin = ["networks", "dep:bitcoin"] +monero = ["networks", "ciphersuite/ed25519", "monero-serai"] # Assumes the default usage is to use Serai as a DEX, which doesn't actually # require connecting to a Serai node diff --git a/substrate/client/src/lib.rs b/substrate/client/src/lib.rs index d7311d95..e80b183f 100644 --- a/substrate/client/src/lib.rs +++ b/substrate/client/src/lib.rs @@ -1,5 +1,5 @@ -#[cfg(feature = "coins")] -pub mod coins; +#[cfg(feature = "networks")] +pub mod networks; #[cfg(feature = "serai")] mod serai; diff --git a/substrate/client/src/coins/bitcoin.rs b/substrate/client/src/networks/bitcoin.rs similarity index 100% rename from substrate/client/src/coins/bitcoin.rs rename to substrate/client/src/networks/bitcoin.rs diff --git a/substrate/client/src/coins/mod.rs b/substrate/client/src/networks/mod.rs similarity index 100% rename from substrate/client/src/coins/mod.rs rename to substrate/client/src/networks/mod.rs diff --git a/substrate/client/src/coins/monero.rs b/substrate/client/src/networks/monero.rs similarity index 100% rename from substrate/client/src/coins/monero.rs rename to substrate/client/src/networks/monero.rs diff --git a/substrate/client/src/tests/mod.rs b/substrate/client/src/tests/mod.rs index 7cf73e9a..3ffa8630 100644 --- a/substrate/client/src/tests/mod.rs +++ b/substrate/client/src/tests/mod.rs @@ -1,2 +1,2 @@ -#[cfg(feature = "coins")] -mod coins; +#[cfg(feature = "networks")] +mod networks; diff --git a/substrate/client/src/tests/coins/bitcoin.rs b/substrate/client/src/tests/networks/bitcoin.rs similarity index 100% rename from substrate/client/src/tests/coins/bitcoin.rs rename to substrate/client/src/tests/networks/bitcoin.rs diff --git a/substrate/client/src/tests/coins/mod.rs b/substrate/client/src/tests/networks/mod.rs similarity index 100% rename from substrate/client/src/tests/coins/mod.rs rename to substrate/client/src/tests/networks/mod.rs diff --git a/substrate/client/src/tests/coins/monero.rs b/substrate/client/src/tests/networks/monero.rs similarity index 100% rename from substrate/client/src/tests/coins/monero.rs rename to substrate/client/src/tests/networks/monero.rs diff --git a/substrate/primitives/src/lib.rs b/substrate/primitives/src/lib.rs index 2fc97ff8..8c493138 100644 --- a/substrate/primitives/src/lib.rs +++ b/substrate/primitives/src/lib.rs @@ -20,8 +20,8 @@ pub use amount::*; mod block; pub use block::*; -mod coins; -pub use coins::*; +mod networks; +pub use networks::*; mod balance; pub use balance::*; diff --git a/substrate/primitives/src/coins.rs b/substrate/primitives/src/networks.rs similarity index 100% rename from substrate/primitives/src/coins.rs rename to substrate/primitives/src/networks.rs diff --git a/tests/processor/src/networks.rs b/tests/processor/src/networks.rs index 47514109..27deff1d 100644 --- a/tests/processor/src/networks.rs +++ b/tests/processor/src/networks.rs @@ -88,7 +88,7 @@ pub fn network_rpc(network: NetworkId, ops: &DockerOperations, handle: &str) -> } pub fn confirmations(network: NetworkId) -> usize { - use processor::coins::*; + use processor::networks::*; match network { NetworkId::Bitcoin => Bitcoin::CONFIRMATIONS, NetworkId::Ethereum => todo!(), @@ -313,7 +313,7 @@ impl Wallet { }, rpc::HttpRpc, }; - use processor::{additional_key, coins::Monero}; + use processor::{additional_key, networks::Monero}; let rpc_url = network_rpc(NetworkId::Monero, ops, handle); let rpc = HttpRpc::new(rpc_url).expect("couldn't connect to the Monero RPC"); @@ -384,23 +384,27 @@ impl Wallet { } pub fn address(&self) -> ExternalAddress { - use serai_client::coins; + use serai_client::networks; match self { Wallet::Bitcoin { public_key, .. } => { use bitcoin_serai::bitcoin::{Network, Address}; ExternalAddress::new( - coins::bitcoin::Address(Address::p2pkh(public_key, Network::Regtest)).try_into().unwrap(), + networks::bitcoin::Address(Address::p2pkh(public_key, Network::Regtest)) + .try_into() + .unwrap(), ) .unwrap() } Wallet::Monero { view_pair, .. } => { use monero_serai::wallet::address::{Network, AddressSpec}; ExternalAddress::new( - coins::monero::Address::new(view_pair.address(Network::Mainnet, AddressSpec::Standard)) - .unwrap() - .try_into() - .unwrap(), + networks::monero::Address::new( + view_pair.address(Network::Mainnet, AddressSpec::Standard), + ) + .unwrap() + .try_into() + .unwrap(), ) .unwrap() } diff --git a/tests/processor/src/tests/batch.rs b/tests/processor/src/tests/batch.rs index 76be87ea..cc07487f 100644 --- a/tests/processor/src/tests/batch.rs +++ b/tests/processor/src/tests/batch.rs @@ -273,7 +273,7 @@ fn batch_test() { messages::substrate::CoordinatorMessage::SubstrateBlock { context: SubstrateContext { serai_time, - coin_latest_finalized_block: batch.batch.block, + network_latest_finalized_block: batch.batch.block, }, network, block: substrate_block_num + u64::from(i), diff --git a/tests/processor/src/tests/key_gen.rs b/tests/processor/src/tests/key_gen.rs index 447d91cd..903b2efa 100644 --- a/tests/processor/src/tests/key_gen.rs +++ b/tests/processor/src/tests/key_gen.rs @@ -80,7 +80,7 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator], network: NetworkId // Send the shares let mut substrate_key = None; - let mut coin_key = None; + let mut network_key = None; interact_with_all( coordinators, |participant| messages::key_gen::CoordinatorMessage::Shares { @@ -96,15 +96,15 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator], network: NetworkId messages::key_gen::ProcessorMessage::GeneratedKeyPair { id: this_id, substrate_key: this_substrate_key, - coin_key: this_coin_key, + network_key: this_network_key, } => { assert_eq!(this_id, id); if substrate_key.is_none() { substrate_key = Some(this_substrate_key); - coin_key = Some(this_coin_key.clone()); + network_key = Some(this_network_key.clone()); } assert_eq!(substrate_key.unwrap(), this_substrate_key); - assert_eq!(coin_key.as_ref().unwrap(), &this_coin_key); + assert_eq!(network_key.as_ref().unwrap(), &this_network_key); } _ => panic!("processor didn't return GeneratedKeyPair in response to GenerateKey"), }, @@ -112,15 +112,15 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator], network: NetworkId .await; // Confirm the key pair - // TODO: Beter document coin_latest_finalized_block's genesis state, and error if a set claims + // TODO: Beter document network_latest_finalized_block's genesis state, and error if a set claims // [0; 32] was finalized let context = SubstrateContext { serai_time: SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(), - coin_latest_finalized_block: BlockHash([0; 32]), + network_latest_finalized_block: BlockHash([0; 32]), }; let key_pair = - (PublicKey::from_raw(substrate_key.unwrap()), coin_key.clone().unwrap().try_into().unwrap()); + (PublicKey::from_raw(substrate_key.unwrap()), network_key.clone().unwrap().try_into().unwrap()); for coordinator in coordinators { coordinator diff --git a/tests/processor/src/tests/send.rs b/tests/processor/src/tests/send.rs index bc2a4325..dcf6bd6b 100644 --- a/tests/processor/src/tests/send.rs +++ b/tests/processor/src/tests/send.rs @@ -158,7 +158,7 @@ fn send_test() { let key_pair = key_gen(&mut coordinators, network).await; // Now we we have to mine blocks to activate the key - // (the first key is activated when the coin's block time exceeds the Serai time it was + // (the first key is activated when the network's block time exceeds the Serai time it was // confirmed at) for _ in 0 .. confirmations(network) { @@ -209,7 +209,7 @@ fn send_test() { messages::substrate::CoordinatorMessage::SubstrateBlock { context: SubstrateContext { serai_time, - coin_latest_finalized_block: batch.batch.block, + network_latest_finalized_block: batch.batch.block, }, network, block: substrate_block_num,