diff --git a/Cargo.lock b/Cargo.lock index 68a2657f..87d49f61 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5630,19 +5630,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "monero-seed" -version = "0.1.0" -dependencies = [ - "curve25519-dalek", - "hex", - "monero-primitives", - "rand_core", - "std-shims", - "thiserror 2.0.9", - "zeroize", -] - [[package]] name = "monero-serai" version = "0.1.4-alpha" @@ -5717,21 +5704,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "monero-wallet-util" -version = "0.1.0" -dependencies = [ - "curve25519-dalek", - "hex", - "monero-seed", - "monero-wallet", - "polyseed", - "rand_core", - "std-shims", - "thiserror 2.0.9", - "zeroize", -] - [[package]] name = "multiaddr" version = "0.18.1" @@ -6478,17 +6450,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7924d1d0ad836f665c9065e26d016c673ece3993f30d340068b16f282afc1156" -[[package]] -name = "password-hash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" -dependencies = [ - "base64ct", - "rand_core", - "subtle", -] - [[package]] name = "pasta_curves" version = "0.5.1" @@ -6533,9 +6494,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ "digest 0.10.7", - "hmac", - "password-hash", - "sha2", ] [[package]] @@ -6658,20 +6616,6 @@ dependencies = [ "universal-hash", ] -[[package]] -name = "polyseed" -version = "0.1.0" -dependencies = [ - "hex", - "pbkdf2 0.12.2", - "rand_core", - "sha3", - "std-shims", - "subtle", - "thiserror 2.0.9", - "zeroize", -] - [[package]] name = "polyval" version = "0.6.2" @@ -9006,6 +8950,7 @@ dependencies = [ "serai-coins-primitives", "serai-primitives", "sp-core", + "sp-io", "sp-runtime", "sp-std", ] @@ -9445,7 +9390,7 @@ dependencies = [ "generalized-bulletproofs-circuit-abstraction", "generalized-bulletproofs-ec-gadgets", "minimal-ed448", - "monero-wallet-util", + "monero-wallet", "multiexp", "schnorr-signatures", "secq256k1", @@ -9531,6 +9476,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", + "sp-std", "zeroize", ] diff --git a/Cargo.toml b/Cargo.toml index c50c7c10..7ac71666 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -191,9 +191,6 @@ parking_lot = { path = "patches/parking_lot" } zstd = { path = "patches/zstd" } # Needed for WAL compression rocksdb = { path = "patches/rocksdb" } -# 1.0.1 was yanked due to a breaking change (an extra field) -# 2.0 has fewer dependencies and still works within our tree -tiny-bip39 = { path = "patches/tiny-bip39" } # is-terminal now has an std-based solution with an equivalent API is-terminal = { path = "patches/is-terminal" } diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index c42c2d12..08643aad 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, collections::HashMap}; use serai_client::{ primitives::{SeraiAddress, Amount}, - validator_sets::primitives::ValidatorSet, + validator_sets::primitives::ExternalValidatorSet, Serai, }; @@ -28,7 +28,7 @@ db_channel! { CosignIntendChannels { GlobalSessionsChannel: () -> ([u8; 32], GlobalSession), BlockEvents: () -> BlockEventData, - IntendedCosigns: (set: ValidatorSet) -> CosignIntent, + IntendedCosigns: (set: ExternalValidatorSet) -> CosignIntent, } } @@ -110,7 +110,7 @@ impl ContinuallyRan for CosignIntendTask { keys.insert(set.network, SeraiAddress::from(*key)); let stake = serai .validator_sets() - .total_allocated_stake(set.network) + .total_allocated_stake(set.network.into()) .await .map_err(|e| format!("{e:?}"))? .unwrap_or(Amount(0)) diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index 3d476c3d..e98127b4 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -11,8 +11,8 @@ use scale::{Encode, Decode}; use borsh::{BorshSerialize, BorshDeserialize}; use serai_client::{ - primitives::{NetworkId, SeraiAddress}, - validator_sets::primitives::{Session, ValidatorSet, KeyPair}, + primitives::{ExternalNetworkId, SeraiAddress}, + validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair}, Public, Block, Serai, TemporalSerai, }; @@ -52,13 +52,13 @@ pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign"; #[derive(Debug, BorshSerialize, BorshDeserialize)] pub(crate) struct GlobalSession { pub(crate) start_block_number: u64, - pub(crate) sets: Vec, - pub(crate) keys: HashMap, - pub(crate) stakes: HashMap, + pub(crate) sets: Vec, + pub(crate) keys: HashMap, + pub(crate) stakes: HashMap, pub(crate) total_stake: u64, } impl GlobalSession { - fn id(mut cosigners: Vec) -> [u8; 32] { + fn id(mut cosigners: Vec) -> [u8; 32] { cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap()); Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into() } @@ -101,12 +101,12 @@ pub struct Cosign { /// The hash of the block to cosign. pub block_hash: [u8; 32], /// The actual cosigner. - pub cosigner: NetworkId, + pub cosigner: ExternalNetworkId, } impl CosignIntent { /// Convert this into a `Cosign`. - pub fn into_cosign(self, cosigner: NetworkId) -> Cosign { + pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign { let CosignIntent { global_session, block_number, block_hash, notable: _ } = self; Cosign { global_session, block_number, block_hash, cosigner } } @@ -166,7 +166,10 @@ create_db! { // one notable block. All validator sets will explicitly produce a cosign for their notable // block, causing the latest cosigned block for a global session to either be the global // session's notable cosigns or the network's latest cosigns. - NetworksLatestCosignedBlock: (global_session: [u8; 32], network: NetworkId) -> SignedCosign, + NetworksLatestCosignedBlock: ( + global_session: [u8; 32], + network: ExternalNetworkId + ) -> SignedCosign, // Cosigns received for blocks not locally recognized as finalized. Faults: (global_session: [u8; 32]) -> Vec, // The global session which faulted. @@ -177,15 +180,10 @@ create_db! { /// Fetch the keys used for cosigning by a specific network. async fn keys_for_network( serai: &TemporalSerai<'_>, - network: NetworkId, + network: ExternalNetworkId, ) -> Result, String> { - // The Serai network never cosigns so it has no keys for cosigning - if network == NetworkId::Serai { - return Ok(None); - } - let Some(latest_session) = - serai.validator_sets().session(network).await.map_err(|e| format!("{e:?}"))? + serai.validator_sets().session(network.into()).await.map_err(|e| format!("{e:?}"))? else { // If this network hasn't had a session declared, move on return Ok(None); @@ -194,7 +192,7 @@ async fn keys_for_network( // Get the keys for the latest session if let Some(keys) = serai .validator_sets() - .keys(ValidatorSet { network, session: latest_session }) + .keys(ExternalValidatorSet { network, session: latest_session }) .await .map_err(|e| format!("{e:?}"))? { @@ -205,7 +203,7 @@ async fn keys_for_network( if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) { if let Some(keys) = serai .validator_sets() - .keys(ValidatorSet { network, session: prior_session }) + .keys(ExternalValidatorSet { network, session: prior_session }) .await .map_err(|e| format!("{e:?}"))? { @@ -216,16 +214,19 @@ async fn keys_for_network( Ok(None) } -/// Fetch the `ValidatorSet`s, and their associated keys, used for cosigning as of this block. -async fn cosigning_sets(serai: &TemporalSerai<'_>) -> Result, String> { - let mut sets = Vec::with_capacity(serai_client::primitives::NETWORKS.len()); - for network in serai_client::primitives::NETWORKS { +/// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this +/// block. +async fn cosigning_sets( + serai: &TemporalSerai<'_>, +) -> Result, String> { + let mut sets = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len()); + for network in serai_client::primitives::EXTERNAL_NETWORKS { let Some((session, keys)) = keys_for_network(serai, network).await? else { // If this network doesn't have usable keys, move on continue; }; - sets.push((ValidatorSet { network, session }, keys.0)); + sets.push((ExternalValidatorSet { network, session }, keys.0)); } Ok(sets) } @@ -345,8 +346,8 @@ impl Cosigning { /// If this global session hasn't produced any notable cosigns, this will return the latest /// cosigns for this session. pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec { - let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len()); - for network in serai_client::primitives::NETWORKS { + let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len()); + for network in serai_client::primitives::EXTERNAL_NETWORKS { if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) { cosigns.push(cosign); } @@ -363,7 +364,7 @@ impl Cosigning { let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults"); // Also include all of our recognized-as-honest cosigns in an attempt to induce fault // identification in those who see the faulty cosigns as honest - for network in serai_client::primitives::NETWORKS { + for network in serai_client::primitives::EXTERNAL_NETWORKS { if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) { if cosign.cosign.global_session == faulted { cosigns.push(cosign); @@ -375,8 +376,8 @@ impl Cosigning { let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else { return vec![]; }; - let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len()); - for network in serai_client::primitives::NETWORKS { + let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len()); + for network in serai_client::primitives::EXTERNAL_NETWORKS { if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) { cosigns.push(cosign); } @@ -487,12 +488,12 @@ impl Cosigning { Ok(()) } - /// Receive intended cosigns to produce for this ValidatorSet. + /// Receive intended cosigns to produce for this ExternalValidatorSet. /// /// All cosigns intended, up to and including the next notable cosign, are returned. /// /// This will drain the internal channel and not re-yield these intentions again. - pub fn intended_cosigns(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec { + pub fn intended_cosigns(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec { let mut res: Vec = vec![]; // While we have yet to find a notable cosign... while !res.last().map(|cosign| cosign.notable).unwrap_or(false) { diff --git a/coordinator/p2p/libp2p/src/lib.rs b/coordinator/p2p/libp2p/src/lib.rs index 91f66a2d..8d60b32b 100644 --- a/coordinator/p2p/libp2p/src/lib.rs +++ b/coordinator/p2p/libp2p/src/lib.rs @@ -14,8 +14,8 @@ use zeroize::Zeroizing; use schnorrkel::Keypair; use serai_client::{ - primitives::{NetworkId, PublicKey}, - validator_sets::primitives::ValidatorSet, + primitives::{ExternalNetworkId, PublicKey}, + validator_sets::primitives::ExternalValidatorSet, Serai, }; @@ -104,7 +104,7 @@ impl serai_coordinator_p2p::Peer<'_> for Peer<'_> { #[derive(Clone)] struct Peers { - peers: Arc>>>, + peers: Arc>>>, } // Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai @@ -135,7 +135,8 @@ struct Libp2pInner { signed_cosigns: Mutex>, signed_cosigns_send: mpsc::UnboundedSender, - heartbeat_requests: Mutex>, + heartbeat_requests: + Mutex>, notable_cosign_requests: Mutex>, inbound_request_responses: mpsc::UnboundedSender<(InboundRequestId, Response)>, } @@ -312,7 +313,7 @@ impl serai_cosign::RequestNotableCosigns for Libp2p { impl serai_coordinator_p2p::P2p for Libp2p { type Peer<'a> = Peer<'a>; - fn peers(&self, network: NetworkId) -> impl Send + Future>> { + fn peers(&self, network: ExternalNetworkId) -> impl Send + Future>> { async move { let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else { return vec![]; diff --git a/coordinator/p2p/libp2p/src/swarm.rs b/coordinator/p2p/libp2p/src/swarm.rs index 0d06c171..94a7cb03 100644 --- a/coordinator/p2p/libp2p/src/swarm.rs +++ b/coordinator/p2p/libp2p/src/swarm.rs @@ -6,7 +6,7 @@ use std::{ use borsh::BorshDeserialize; -use serai_client::validator_sets::primitives::ValidatorSet; +use serai_client::validator_sets::primitives::ExternalValidatorSet; use tokio::sync::{mpsc, oneshot, RwLock}; @@ -68,7 +68,7 @@ pub(crate) struct SwarmTask { outbound_request_responses: HashMap>, inbound_request_response_channels: HashMap>, - heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ValidatorSet, [u8; 32])>, + heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>, notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>, inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>, } @@ -324,7 +324,7 @@ impl SwarmTask { outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender)>, - heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ValidatorSet, [u8; 32])>, + heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>, notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>, inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>, ) { diff --git a/coordinator/p2p/libp2p/src/validators.rs b/coordinator/p2p/libp2p/src/validators.rs index 6b93cf4d..25fabacd 100644 --- a/coordinator/p2p/libp2p/src/validators.rs +++ b/coordinator/p2p/libp2p/src/validators.rs @@ -4,7 +4,9 @@ use std::{ collections::{HashSet, HashMap}, }; -use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, SeraiError, Serai}; +use serai_client::{ + primitives::ExternalNetworkId, validator_sets::primitives::Session, SeraiError, Serai, +}; use serai_task::{Task, ContinuallyRan}; @@ -24,11 +26,11 @@ pub(crate) struct Validators { serai: Arc, // A cache for which session we're populated with the validators of - sessions: HashMap, + sessions: HashMap, // The validators by network - by_network: HashMap>, + by_network: HashMap>, // The validators and their networks - validators: HashMap>, + validators: HashMap>, // The channel to send the changes down changes: mpsc::UnboundedSender, @@ -49,8 +51,8 @@ impl Validators { async fn session_changes( serai: impl Borrow, - sessions: impl Borrow>, - ) -> Result)>, SeraiError> { + sessions: impl Borrow>, + ) -> Result)>, SeraiError> { /* This uses the latest finalized block, not the latest cosigned block, which should be fine as in the worst case, we'd connect to unexpected validators. They still shouldn't be able to @@ -67,13 +69,10 @@ impl Validators { // FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but // we poll it till it yields all futures with the most minimal processing possible let mut futures = FuturesUnordered::new(); - for network in serai_client::primitives::NETWORKS { - if network == NetworkId::Serai { - continue; - } + for network in serai_client::primitives::EXTERNAL_NETWORKS { let sessions = sessions.borrow(); futures.push(async move { - let session = match temporal_serai.session(network).await { + let session = match temporal_serai.session(network.into()).await { Ok(Some(session)) => session, Ok(None) => return Ok(None), Err(e) => return Err(e), @@ -82,7 +81,7 @@ impl Validators { if sessions.get(&network) == Some(&session) { Ok(None) } else { - match temporal_serai.active_network_validators(network).await { + match temporal_serai.active_network_validators(network.into()).await { Ok(validators) => Ok(Some(( network, session, @@ -105,7 +104,7 @@ impl Validators { fn incorporate_session_changes( &mut self, - session_changes: Vec<(NetworkId, Session, HashSet)>, + session_changes: Vec<(ExternalNetworkId, Session, HashSet)>, ) { let mut removed = HashSet::new(); let mut added = HashSet::new(); @@ -160,11 +159,11 @@ impl Validators { Ok(()) } - pub(crate) fn by_network(&self) -> &HashMap> { + pub(crate) fn by_network(&self) -> &HashMap> { &self.by_network } - pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet> { + pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet> { self.validators.get(peer_id) } } diff --git a/coordinator/p2p/src/heartbeat.rs b/coordinator/p2p/src/heartbeat.rs index f13a0e5c..7691abbd 100644 --- a/coordinator/p2p/src/heartbeat.rs +++ b/coordinator/p2p/src/heartbeat.rs @@ -1,7 +1,7 @@ use core::future::Future; use std::time::{Duration, SystemTime}; -use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet}; +use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet}; use futures_lite::FutureExt; @@ -38,7 +38,7 @@ pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH * /// If the other validator has more blocks then we do, they're expected to inform us. This forms /// the sync protocol for our Tributaries. pub(crate) struct HeartbeatTask { - pub(crate) set: ValidatorSet, + pub(crate) set: ExternalValidatorSet, pub(crate) tributary: Tributary, pub(crate) reader: TributaryReader, pub(crate) p2p: P, diff --git a/coordinator/p2p/src/lib.rs b/coordinator/p2p/src/lib.rs index 9bf245ca..68536b9d 100644 --- a/coordinator/p2p/src/lib.rs +++ b/coordinator/p2p/src/lib.rs @@ -7,7 +7,7 @@ use std::collections::HashMap; use borsh::{BorshSerialize, BorshDeserialize}; -use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet}; +use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet}; use serai_db::Db; use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader}; @@ -25,7 +25,7 @@ use crate::heartbeat::HeartbeatTask; #[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)] pub struct Heartbeat { /// The Tributary this is the heartbeat of. - pub set: ValidatorSet, + pub set: ExternalValidatorSet, /// The hash of the latest block added to the Tributary. pub latest_block_hash: [u8; 32], } @@ -56,7 +56,7 @@ pub trait P2p: type Peer<'a>: Peer<'a>; /// Fetch the peers for this network. - fn peers(&self, network: NetworkId) -> impl Send + Future>>; + fn peers(&self, network: ExternalNetworkId) -> impl Send + Future>>; /// Broadcast a cosign. fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future; @@ -131,13 +131,13 @@ fn handle_heartbeat( pub async fn run( db: impl Db, p2p: P, - mut add_tributary: mpsc::UnboundedReceiver<(ValidatorSet, Tributary)>, - mut retire_tributary: mpsc::UnboundedReceiver, + mut add_tributary: mpsc::UnboundedReceiver<(ExternalValidatorSet, Tributary)>, + mut retire_tributary: mpsc::UnboundedReceiver, send_cosigns: mpsc::UnboundedSender, ) { - let mut readers = HashMap::>::new(); + let mut readers = HashMap::>::new(); let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender>>::new(); - let mut heartbeat_tasks = HashMap::::new(); + let mut heartbeat_tasks = HashMap::::new(); loop { tokio::select! { diff --git a/coordinator/src/db.rs b/coordinator/src/db.rs index 631c6d4b..108e0f32 100644 --- a/coordinator/src/db.rs +++ b/coordinator/src/db.rs @@ -6,8 +6,8 @@ use serai_db::{create_db, db_channel}; use dkg::Participant; use serai_client::{ - primitives::NetworkId, - validator_sets::primitives::{Session, ValidatorSet, KeyPair}, + primitives::ExternalNetworkId, + validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair}, }; use serai_cosign::SignedCosign; @@ -43,22 +43,21 @@ pub(crate) fn coordinator_db() -> Db { db(&format!("{root_path}/coordinator/db")) } -fn tributary_db_folder(set: ValidatorSet) -> String { +fn tributary_db_folder(set: ExternalValidatorSet) -> String { let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified"); let network = match set.network { - NetworkId::Serai => panic!("creating Tributary for the Serai network"), - NetworkId::Bitcoin => "Bitcoin", - NetworkId::Ethereum => "Ethereum", - NetworkId::Monero => "Monero", + ExternalNetworkId::Bitcoin => "Bitcoin", + ExternalNetworkId::Ethereum => "Ethereum", + ExternalNetworkId::Monero => "Monero", }; format!("{root_path}/tributary-{network}-{}", set.session.0) } -pub(crate) fn tributary_db(set: ValidatorSet) -> Db { +pub(crate) fn tributary_db(set: ExternalValidatorSet) -> Db { db(&format!("{}/db", tributary_db_folder(set))) } -pub(crate) fn prune_tributary_db(set: ValidatorSet) { +pub(crate) fn prune_tributary_db(set: ExternalValidatorSet) { log::info!("pruning data directory for tributary {set:?}"); let db = tributary_db_folder(set); if fs::exists(&db).expect("couldn't check if tributary DB exists") { @@ -73,15 +72,15 @@ create_db! { // The latest Tributary to have been retired for a network // Since Tributaries are retired sequentially, this is informative to if any Tributary has been // retired - RetiredTributary: (network: NetworkId) -> Session, + RetiredTributary: (network: ExternalNetworkId) -> Session, // The last handled message from a Processor - LastProcessorMessage: (network: NetworkId) -> u64, + LastProcessorMessage: (network: ExternalNetworkId) -> u64, // Cosigns we produced and tried to intake yet incurred an error while doing so ErroneousCosigns: () -> Vec, // The keys to confirm and set on the Serai network - KeysToConfirm: (set: ValidatorSet) -> KeyPair, + KeysToConfirm: (set: ExternalValidatorSet) -> KeyPair, // The key was set on the Serai network - KeySet: (set: ValidatorSet) -> (), + KeySet: (set: ExternalValidatorSet) -> (), } } @@ -90,7 +89,7 @@ db_channel! { // Cosigns we produced SignedCosigns: () -> SignedCosign, // Tributaries to clean up upon reboot - TributaryCleanup: () -> ValidatorSet, + TributaryCleanup: () -> ExternalValidatorSet, } } @@ -100,50 +99,50 @@ mod _internal_db { db_channel! { Coordinator { // Tributary transactions to publish from the Processor messages - TributaryTransactionsFromProcessorMessages: (set: ValidatorSet) -> Transaction, + TributaryTransactionsFromProcessorMessages: (set: ExternalValidatorSet) -> Transaction, // Tributary transactions to publish from the DKG confirmation task - TributaryTransactionsFromDkgConfirmation: (set: ValidatorSet) -> Transaction, + TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction, // Participants to remove - RemoveParticipant: (set: ValidatorSet) -> Participant, + RemoveParticipant: (set: ExternalValidatorSet) -> Participant, } } } pub(crate) struct TributaryTransactionsFromProcessorMessages; impl TributaryTransactionsFromProcessorMessages { - pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, tx: &Transaction) { + pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) { // If this set has yet to be retired, send this transaction if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) { _internal_db::TributaryTransactionsFromProcessorMessages::send(txn, set, tx); } } - pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option { + pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option { _internal_db::TributaryTransactionsFromProcessorMessages::try_recv(txn, set) } } pub(crate) struct TributaryTransactionsFromDkgConfirmation; impl TributaryTransactionsFromDkgConfirmation { - pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, tx: &Transaction) { + pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) { // If this set has yet to be retired, send this transaction if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) { _internal_db::TributaryTransactionsFromDkgConfirmation::send(txn, set, tx); } } - pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option { + pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option { _internal_db::TributaryTransactionsFromDkgConfirmation::try_recv(txn, set) } } pub(crate) struct RemoveParticipant; impl RemoveParticipant { - pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, participant: Participant) { + pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) { // If this set has yet to be retired, send this transaction if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) { _internal_db::RemoveParticipant::send(txn, set, &participant); } } - pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option { + pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option { _internal_db::RemoveParticipant::try_recv(txn, set) } } diff --git a/coordinator/src/dkg_confirmation.rs b/coordinator/src/dkg_confirmation.rs index b9af0ec7..a28fb40f 100644 --- a/coordinator/src/dkg_confirmation.rs +++ b/coordinator/src/dkg_confirmation.rs @@ -17,7 +17,7 @@ use serai_db::{DbTxn, Db as DbTrait}; use serai_client::{ primitives::SeraiAddress, - validator_sets::primitives::{ValidatorSet, musig_context, set_keys_message}, + validator_sets::primitives::{ExternalValidatorSet, musig_context, set_keys_message}, }; use serai_task::{DoesNotError, ContinuallyRan}; @@ -141,7 +141,7 @@ impl ConfirmDkgTask { Self { db, set, tributary_db, key, signer: None } } - fn slash(db: &mut CD, set: ValidatorSet, validator: SeraiAddress) { + fn slash(db: &mut CD, set: ExternalValidatorSet, validator: SeraiAddress) { let mut txn = db.txn(); TributaryTransactionsFromDkgConfirmation::send( &mut txn, @@ -153,7 +153,7 @@ impl ConfirmDkgTask { fn preprocess( db: &mut CD, - set: ValidatorSet, + set: ExternalValidatorSet, attempt: u32, key: &Zeroizing<::F>, signer: &mut Option, @@ -162,7 +162,9 @@ impl ConfirmDkgTask { let (machine, preprocess) = AlgorithmMachine::new( schnorrkel(), // We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet - musig(&musig_context(set), key, &[Ristretto::generator() * key.deref()]).unwrap().into(), + musig(&musig_context(set.into()), key, &[Ristretto::generator() * key.deref()]) + .unwrap() + .into(), ) .preprocess(&mut OsRng); // We take the preprocess so we can use it in a distinct machine with the actual Musig @@ -256,8 +258,9 @@ impl ContinuallyRan for ConfirmDkgTask { }) .collect::>(); - let keys = - musig(&musig_context(self.set.set), &self.key, &musig_public_keys).unwrap().into(); + let keys = musig(&musig_context(self.set.set.into()), &self.key, &musig_public_keys) + .unwrap() + .into(); // Rebuild the machine let (machine, preprocess_from_cache) = diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 4d48a317..d63b79a2 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -14,8 +14,8 @@ use borsh::BorshDeserialize; use tokio::sync::mpsc; use serai_client::{ - primitives::{NetworkId, PublicKey, SeraiAddress, Signature}, - validator_sets::primitives::{ValidatorSet, KeyPair}, + primitives::{ExternalNetworkId, PublicKey, SeraiAddress, Signature}, + validator_sets::primitives::{ExternalValidatorSet, KeyPair}, Serai, }; use message_queue::{Service, client::MessageQueue}; @@ -153,14 +153,13 @@ async fn handle_network( mut db: impl serai_db::Db, message_queue: Arc, serai: Arc, - network: NetworkId, + network: ExternalNetworkId, ) { // Spawn the task to publish batches for this network { let (publish_batch_task_def, publish_batch_task) = Task::new(); tokio::spawn( PublishBatchTask::new(db.clone(), serai.clone(), network) - .unwrap() .continually_run(publish_batch_task_def, vec![]), ); // Forget its handle so it always runs in the background @@ -197,7 +196,7 @@ async fn handle_network( match msg { messages::ProcessorMessage::KeyGen(msg) => match msg { messages::key_gen::ProcessorMessage::Participation { session, participation } => { - let set = ValidatorSet { network, session }; + let set = ExternalValidatorSet { network, session }; TributaryTransactionsFromProcessorMessages::send( &mut txn, set, @@ -211,7 +210,7 @@ async fn handle_network( } => { KeysToConfirm::set( &mut txn, - ValidatorSet { network, session }, + ExternalValidatorSet { network, session }, &KeyPair( PublicKey::from_raw(substrate_key), network_key @@ -221,15 +220,15 @@ async fn handle_network( ); } messages::key_gen::ProcessorMessage::Blame { session, participant } => { - RemoveParticipant::send(&mut txn, ValidatorSet { network, session }, participant); + RemoveParticipant::send(&mut txn, ExternalValidatorSet { network, session }, participant); } }, messages::ProcessorMessage::Sign(msg) => match msg { messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => { - RemoveParticipant::send(&mut txn, ValidatorSet { network, session }, participant); + RemoveParticipant::send(&mut txn, ExternalValidatorSet { network, session }, participant); } messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => { - let set = ValidatorSet { network, session: id.session }; + let set = ExternalValidatorSet { network, session: id.session }; if id.attempt == 0 { // Batches are declared by their intent to be signed if let messages::sign::VariantSignId::Batch(hash) = id.id { @@ -254,7 +253,7 @@ async fn handle_network( ); } messages::sign::ProcessorMessage::Shares { id, shares } => { - let set = ValidatorSet { network, session: id.session }; + let set = ExternalValidatorSet { network, session: id.session }; TributaryTransactionsFromProcessorMessages::send( &mut txn, set, @@ -282,7 +281,7 @@ async fn handle_network( } => { SlashReports::set( &mut txn, - ValidatorSet { network, session }, + ExternalValidatorSet { network, session }, slash_report, Signature(signature), ); @@ -298,7 +297,7 @@ async fn handle_network( .push(plan.transaction_plan_id); } for (session, plans) in by_session { - let set = ValidatorSet { network, session }; + let set = ExternalValidatorSet { network, session }; SubstrateBlockPlans::set(&mut txn, set, block, &plans); TributaryTransactionsFromProcessorMessages::send( &mut txn, @@ -481,10 +480,7 @@ async fn main() { ); // Handle each of the networks - for network in serai_client::primitives::NETWORKS { - if network == NetworkId::Serai { - continue; - } + for network in serai_client::primitives::EXTERNAL_NETWORKS { tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network)); } diff --git a/coordinator/src/substrate.rs b/coordinator/src/substrate.rs index 7a78e512..4a70ee6b 100644 --- a/coordinator/src/substrate.rs +++ b/coordinator/src/substrate.rs @@ -9,7 +9,7 @@ use tokio::sync::mpsc; use serai_db::{DbTxn, Db as DbTrait}; -use serai_client::validator_sets::primitives::{Session, ValidatorSet}; +use serai_client::validator_sets::primitives::{Session, ExternalValidatorSet}; use message_queue::{Service, Metadata, client::MessageQueue}; use tributary_sdk::Tributary; @@ -27,8 +27,8 @@ pub(crate) struct SubstrateTask { pub(crate) message_queue: Arc, pub(crate) p2p: P, pub(crate) p2p_add_tributary: - mpsc::UnboundedSender<(ValidatorSet, Tributary)>, - pub(crate) p2p_retire_tributary: mpsc::UnboundedSender, + mpsc::UnboundedSender<(ExternalValidatorSet, Tributary)>, + pub(crate) p2p_retire_tributary: mpsc::UnboundedSender, } impl ContinuallyRan for SubstrateTask

{ @@ -38,7 +38,7 @@ impl ContinuallyRan for SubstrateTask

{ let mut made_progress = false; // Handle the Canonical events - for network in serai_client::primitives::NETWORKS { + for network in serai_client::primitives::EXTERNAL_NETWORKS { loop { let mut txn = self.db.txn(); let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network) @@ -48,7 +48,7 @@ impl ContinuallyRan for SubstrateTask

{ match msg { messages::substrate::CoordinatorMessage::SetKeys { session, .. } => { - KeySet::set(&mut txn, ValidatorSet { network, session }, &()); + KeySet::set(&mut txn, ExternalValidatorSet { network, session }, &()); } messages::substrate::CoordinatorMessage::SlashesReported { session } => { let prior_retired = crate::db::RetiredTributary::get(&txn, network); @@ -58,7 +58,7 @@ impl ContinuallyRan for SubstrateTask

{ crate::db::RetiredTributary::set(&mut txn, network, &session); self .p2p_retire_tributary - .send(ValidatorSet { network, session }) + .send(ExternalValidatorSet { network, session }) .expect("p2p retire_tributary channel dropped?"); } messages::substrate::CoordinatorMessage::Block { .. } => {} @@ -108,7 +108,10 @@ impl ContinuallyRan for SubstrateTask

{ */ crate::db::TributaryCleanup::send( &mut txn, - &ValidatorSet { network: new_set.set.network, session: Session(historic_session) }, + &ExternalValidatorSet { + network: new_set.set.network, + session: Session(historic_session), + }, ); } diff --git a/coordinator/src/tributary.rs b/coordinator/src/tributary.rs index 5f935f68..7f45797d 100644 --- a/coordinator/src/tributary.rs +++ b/coordinator/src/tributary.rs @@ -11,7 +11,7 @@ use tokio::sync::mpsc; use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel}; use scale::Encode; -use serai_client::validator_sets::primitives::ValidatorSet; +use serai_client::validator_sets::primitives::ExternalValidatorSet; use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary}; @@ -33,13 +33,13 @@ use crate::{ create_db! { Coordinator { - PublishOnRecognition: (set: ValidatorSet, topic: Topic) -> Transaction, + PublishOnRecognition: (set: ExternalValidatorSet, topic: Topic) -> Transaction, } } db_channel! { Coordinator { - PendingCosigns: (set: ValidatorSet) -> CosignIntent, + PendingCosigns: (set: ExternalValidatorSet) -> CosignIntent, } } @@ -48,7 +48,7 @@ db_channel! { /// This is not a well-designed function. This is specific to the context in which its called, /// within this file. It should only be considered an internal helper for this domain alone. async fn provide_transaction( - set: ValidatorSet, + set: ExternalValidatorSet, tributary: &Tributary, tx: Transaction, ) { @@ -211,7 +211,7 @@ async fn add_signed_unsigned_transaction( } async fn add_with_recognition_check( - set: ValidatorSet, + set: ExternalValidatorSet, tributary_db: &mut TD, tributary: &Tributary, key: &Zeroizing<::F>, @@ -350,7 +350,7 @@ impl ContinuallyRan for AddTributaryTransactio /// Takes the messages from ScanTributaryTask and publishes them to the message-queue. pub(crate) struct TributaryProcessorMessagesTask { tributary_db: TD, - set: ValidatorSet, + set: ExternalValidatorSet, message_queue: Arc, } impl ContinuallyRan for TributaryProcessorMessagesTask { @@ -430,7 +430,7 @@ impl ContinuallyRan for SignSlashReportTask( db: CD, - set: ValidatorSet, + set: ExternalValidatorSet, tributary: Tributary, scan_tributary_task: TaskHandle, tasks_to_keep_alive: Vec, @@ -469,7 +469,7 @@ pub(crate) async fn spawn_tributary( db: Db, message_queue: Arc, p2p: P, - p2p_add_tributary: &mpsc::UnboundedSender<(ValidatorSet, Tributary)>, + p2p_add_tributary: &mpsc::UnboundedSender<(ExternalValidatorSet, Tributary)>, set: NewSetInformation, serai_key: Zeroizing<::F>, ) { diff --git a/coordinator/substrate/src/canonical.rs b/coordinator/substrate/src/canonical.rs index bc6db5ca..81a8ccce 100644 --- a/coordinator/substrate/src/canonical.rs +++ b/coordinator/substrate/src/canonical.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use futures::stream::{StreamExt, FuturesOrdered}; -use serai_client::Serai; +use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai}; use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage}; @@ -152,6 +152,7 @@ impl ContinuallyRan for CanonicalEventStream { else { panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}"); }; + let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; crate::Canonical::send( &mut txn, set.network, @@ -159,7 +160,7 @@ impl ContinuallyRan for CanonicalEventStream { ); } - for network in serai_client::primitives::NETWORKS { + for network in serai_client::primitives::EXTERNAL_NETWORKS { let mut batch = None; for this_batch in &block.batch_events { let serai_client::in_instructions::InInstructionsEvent::Batch { @@ -201,7 +202,7 @@ impl ContinuallyRan for CanonicalEventStream { let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } = &burn else { - panic!("Burn event wasn't a Burn.in event: {burn:?}"); + panic!("BurnWithInstruction event wasn't a BurnWithInstruction event: {burn:?}"); }; if instruction.balance.coin.network() == network { burns.push(instruction.clone()); diff --git a/coordinator/substrate/src/ephemeral.rs b/coordinator/substrate/src/ephemeral.rs index 18c11d00..cb6e14cd 100644 --- a/coordinator/substrate/src/ephemeral.rs +++ b/coordinator/substrate/src/ephemeral.rs @@ -4,8 +4,8 @@ use std::sync::Arc; use futures::stream::{StreamExt, FuturesOrdered}; use serai_client::{ - primitives::{NetworkId, SeraiAddress, EmbeddedEllipticCurve}, - validator_sets::primitives::MAX_KEY_SHARES_PER_SET, + primitives::{SeraiAddress, EmbeddedEllipticCurve}, + validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet}, Serai, }; @@ -130,16 +130,13 @@ impl ContinuallyRan for EphemeralEventStream { let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else { panic!("NewSet event wasn't a NewSet event: {new_set:?}"); }; - // We only coordinate over external networks - if set.network == NetworkId::Serai { - continue; - } + let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; let serai = self.serai.as_of(block.block_hash); let serai = serai.validator_sets(); let Some(validators) = - serai.participants(set.network).await.map_err(|e| format!("{e:?}"))? + serai.participants(set.network.into()).await.map_err(|e| format!("{e:?}"))? else { Err(format!( "block #{block_number} declared a new set but didn't have the participants" @@ -222,11 +219,11 @@ impl ContinuallyRan for EphemeralEventStream { } let mut new_set = NewSetInformation { - set: *set, + set, serai_block: block.block_hash, declaration_time: block.time, - // TODO: Why do we have this as an explicit field here? - // Shouldn't this be inlined into the Processor's key gen code, where it's used? + // TODO: This should be inlined into the Processor's key gen code + // It's legacy from when we removed participants from the key gen threshold: ((total_weight * 2) / 3) + 1, validators, evrf_public_keys, @@ -246,7 +243,8 @@ impl ContinuallyRan for EphemeralEventStream { else { panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}"); }; - crate::SignSlashReport::send(&mut txn, *set); + let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; + crate::SignSlashReport::send(&mut txn, set); } txn.commit(); diff --git a/coordinator/substrate/src/lib.rs b/coordinator/substrate/src/lib.rs index 68566ff4..902234dc 100644 --- a/coordinator/substrate/src/lib.rs +++ b/coordinator/substrate/src/lib.rs @@ -10,8 +10,8 @@ use borsh::{BorshSerialize, BorshDeserialize}; use dkg::Participant; use serai_client::{ - primitives::{NetworkId, SeraiAddress, Signature}, - validator_sets::primitives::{Session, ValidatorSet, KeyPair, SlashReport}, + primitives::{ExternalNetworkId, SeraiAddress, Signature}, + validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair, SlashReport}, in_instructions::primitives::SignedBatch, Transaction, }; @@ -35,7 +35,7 @@ pub use publish_slash_report::PublishSlashReportTask; #[borsh(init = init_participant_indexes)] pub struct NewSetInformation { /// The set. - pub set: ValidatorSet, + pub set: ExternalValidatorSet, /// The Serai block which declared it. pub serai_block: [u8; 32], /// The time of the block which declared it, in seconds. @@ -82,24 +82,24 @@ mod _public_db { db_channel!( CoordinatorSubstrate { // Canonical messages to send to the processor - Canonical: (network: NetworkId) -> messages::substrate::CoordinatorMessage, + Canonical: (network: ExternalNetworkId) -> messages::substrate::CoordinatorMessage, // Relevant new set, from an ephemeral event stream NewSet: () -> NewSetInformation, // Potentially relevant sign slash report, from an ephemeral event stream - SignSlashReport: (set: ValidatorSet) -> (), + SignSlashReport: (set: ExternalValidatorSet) -> (), // Signed batches to publish onto the Serai network - SignedBatches: (network: NetworkId) -> SignedBatch, + SignedBatches: (network: ExternalNetworkId) -> SignedBatch, } ); create_db!( CoordinatorSubstrate { // Keys to set on the Serai network - Keys: (network: NetworkId) -> (Session, Vec), + Keys: (network: ExternalNetworkId) -> (Session, Vec), // Slash reports to publish onto the Serai network - SlashReports: (network: NetworkId) -> (Session, Vec), + SlashReports: (network: ExternalNetworkId) -> (Session, Vec), } ); } @@ -109,7 +109,7 @@ pub struct Canonical; impl Canonical { pub(crate) fn send( txn: &mut impl DbTxn, - network: NetworkId, + network: ExternalNetworkId, msg: &messages::substrate::CoordinatorMessage, ) { _public_db::Canonical::send(txn, network, msg); @@ -117,7 +117,7 @@ impl Canonical { /// Try to receive a canonical event, returning `None` if there is none to receive. pub fn try_recv( txn: &mut impl DbTxn, - network: NetworkId, + network: ExternalNetworkId, ) -> Option { _public_db::Canonical::try_recv(txn, network) } @@ -141,12 +141,12 @@ impl NewSet { /// notifications for all relevant validator sets will be included. pub struct SignSlashReport; impl SignSlashReport { - pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet) { + pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet) { _public_db::SignSlashReport::send(txn, set, &()); } /// Try to receive a notification to sign a slash report, returning `None` if there is none to /// receive. - pub fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<()> { + pub fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<()> { _public_db::SignSlashReport::try_recv(txn, set) } } @@ -160,7 +160,7 @@ impl Keys { /// reported at once. pub fn set( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, key_pair: KeyPair, signature_participants: bitvec::vec::BitVec, signature: Signature, @@ -180,7 +180,10 @@ impl Keys { ); _public_db::Keys::set(txn, set.network, &(set.session, tx.encode())); } - pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> { + pub(crate) fn take( + txn: &mut impl DbTxn, + network: ExternalNetworkId, + ) -> Option<(Session, Transaction)> { let (session, tx) = _public_db::Keys::take(txn, network)?; Some((session, <_>::decode(&mut tx.as_slice()).unwrap())) } @@ -193,7 +196,7 @@ impl SignedBatches { pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) { _public_db::SignedBatches::send(txn, batch.batch.network, batch); } - pub(crate) fn try_recv(txn: &mut impl DbTxn, network: NetworkId) -> Option { + pub(crate) fn try_recv(txn: &mut impl DbTxn, network: ExternalNetworkId) -> Option { _public_db::SignedBatches::try_recv(txn, network) } } @@ -207,7 +210,7 @@ impl SlashReports { /// slashes reported at once. pub fn set( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, slash_report: SlashReport, signature: Signature, ) { @@ -225,7 +228,10 @@ impl SlashReports { ); _public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode())); } - pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> { + pub(crate) fn take( + txn: &mut impl DbTxn, + network: ExternalNetworkId, + ) -> Option<(Session, Transaction)> { let (session, tx) = _public_db::SlashReports::take(txn, network)?; Some((session, <_>::decode(&mut tx.as_slice()).unwrap())) } diff --git a/coordinator/substrate/src/publish_batch.rs b/coordinator/substrate/src/publish_batch.rs index 83aa0718..ff4b46de 100644 --- a/coordinator/substrate/src/publish_batch.rs +++ b/coordinator/substrate/src/publish_batch.rs @@ -2,7 +2,7 @@ use core::future::Future; use std::sync::Arc; #[rustfmt::skip] -use serai_client::{primitives::NetworkId, in_instructions::primitives::SignedBatch, SeraiError, Serai}; +use serai_client::{primitives::ExternalNetworkId, in_instructions::primitives::SignedBatch, SeraiError, Serai}; use serai_db::{Get, DbTxn, Db, create_db}; use serai_task::ContinuallyRan; @@ -11,8 +11,8 @@ use crate::SignedBatches; create_db!( CoordinatorSubstrate { - LastPublishedBatch: (network: NetworkId) -> u32, - BatchesToPublish: (network: NetworkId, batch: u32) -> SignedBatch, + LastPublishedBatch: (network: ExternalNetworkId) -> u32, + BatchesToPublish: (network: ExternalNetworkId, batch: u32) -> SignedBatch, } ); @@ -20,19 +20,13 @@ create_db!( pub struct PublishBatchTask { db: D, serai: Arc, - network: NetworkId, + network: ExternalNetworkId, } impl PublishBatchTask { /// Create a task to publish `SignedBatch`s onto Serai. - /// - /// Returns None if `network == NetworkId::Serai`. - // TODO: ExternalNetworkId - pub fn new(db: D, serai: Arc, network: NetworkId) -> Option { - if network == NetworkId::Serai { - None? - }; - Some(Self { db, serai, network }) + pub fn new(db: D, serai: Arc, network: ExternalNetworkId) -> Self { + Self { db, serai, network } } } diff --git a/coordinator/substrate/src/publish_slash_report.rs b/coordinator/substrate/src/publish_slash_report.rs index 9be94f60..7b90d53d 100644 --- a/coordinator/substrate/src/publish_slash_report.rs +++ b/coordinator/substrate/src/publish_slash_report.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use serai_db::{DbTxn, Db}; -use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, Serai}; +use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::Session, Serai}; use serai_task::ContinuallyRan; @@ -24,7 +24,7 @@ impl PublishSlashReportTask { impl PublishSlashReportTask { // Returns if a slash report was successfully published - async fn publish(&mut self, network: NetworkId) -> Result { + async fn publish(&mut self, network: ExternalNetworkId) -> Result { let mut txn = self.db.txn(); let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else { // No slash report to publish @@ -36,7 +36,7 @@ impl PublishSlashReportTask { let serai = self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?; let serai = serai.validator_sets(); let session_after_slash_report = Session(session.0 + 1); - let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?; + let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?; let current_session = current_session.map(|session| session.0); // Only attempt to publish the slash report for session #n while session #n+1 is still // active @@ -84,11 +84,7 @@ impl ContinuallyRan for PublishSlashReportTask { async move { let mut made_progress = false; let mut error = None; - for network in serai_client::primitives::NETWORKS { - if network == NetworkId::Serai { - continue; - }; - + for network in serai_client::primitives::EXTERNAL_NETWORKS { let network_res = self.publish(network).await; // We made progress if any network successfully published their slash report made_progress |= network_res == Ok(true); diff --git a/coordinator/substrate/src/set_keys.rs b/coordinator/substrate/src/set_keys.rs index a63e0923..b8bf2ad1 100644 --- a/coordinator/substrate/src/set_keys.rs +++ b/coordinator/substrate/src/set_keys.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use serai_db::{DbTxn, Db}; -use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai}; +use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai}; use serai_task::ContinuallyRan; @@ -28,11 +28,7 @@ impl ContinuallyRan for SetKeysTask { fn run_iteration(&mut self) -> impl Send + Future> { async move { let mut made_progress = false; - for network in serai_client::primitives::NETWORKS { - if network == NetworkId::Serai { - continue; - }; - + for network in serai_client::primitives::EXTERNAL_NETWORKS { let mut txn = self.db.txn(); let Some((session, keys)) = Keys::take(&mut txn, network) else { // No keys to set @@ -44,7 +40,7 @@ impl ContinuallyRan for SetKeysTask { let serai = self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?; let serai = serai.validator_sets(); - let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?; + let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?; let current_session = current_session.map(|session| session.0); // Only attempt to set these keys if this isn't a retired session if Some(session.0) < current_session { @@ -62,7 +58,7 @@ impl ContinuallyRan for SetKeysTask { // If this session already has had its keys set, move on if serai - .keys(ValidatorSet { network, session }) + .keys(ExternalValidatorSet { network, session }) .await .map_err(|e| format!("{e:?}"))? .is_some() diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index ef4199b8..7d5857eb 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use scale::Encode; use borsh::{BorshSerialize, BorshDeserialize}; -use serai_client::{primitives::SeraiAddress, validator_sets::primitives::ValidatorSet}; +use serai_client::{primitives::SeraiAddress, validator_sets::primitives::ExternalValidatorSet}; use messages::sign::{VariantSignId, SignId}; @@ -97,7 +97,7 @@ impl Topic { /// The SignId for this topic /// /// Returns None if Topic isn't Topic::Sign - pub(crate) fn sign_id(self, set: ValidatorSet) -> Option { + pub(crate) fn sign_id(self, set: ExternalValidatorSet) -> Option { #[allow(clippy::match_same_arms)] match self { Topic::RemoveParticipant { .. } => None, @@ -115,7 +115,7 @@ impl Topic { /// Returns None if Topic isn't Topic::DkgConfirmation. pub(crate) fn dkg_confirmation_sign_id( self, - set: ValidatorSet, + set: ExternalValidatorSet, ) -> Option { #[allow(clippy::match_same_arms)] match self { @@ -227,41 +227,48 @@ pub(crate) enum DataSet { create_db!( CoordinatorTributary { // The last handled tributary block's (number, hash) - LastHandledTributaryBlock: (set: ValidatorSet) -> (u64, [u8; 32]), + LastHandledTributaryBlock: (set: ExternalValidatorSet) -> (u64, [u8; 32]), // The slash points a validator has accrued, with u32::MAX representing a fatal slash. - SlashPoints: (set: ValidatorSet, validator: SeraiAddress) -> u32, + SlashPoints: (set: ExternalValidatorSet, validator: SeraiAddress) -> u32, // The cosign intent for a Substrate block - CosignIntents: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> CosignIntent, + CosignIntents: (set: ExternalValidatorSet, substrate_block_hash: [u8; 32]) -> CosignIntent, // The latest Substrate block to cosign. - LatestSubstrateBlockToCosign: (set: ValidatorSet) -> [u8; 32], + LatestSubstrateBlockToCosign: (set: ExternalValidatorSet) -> [u8; 32], // The hash of the block we're actively cosigning. - ActivelyCosigning: (set: ValidatorSet) -> [u8; 32], + ActivelyCosigning: (set: ExternalValidatorSet) -> [u8; 32], // If this block has already been cosigned. - Cosigned: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> (), + Cosigned: (set: ExternalValidatorSet, substrate_block_hash: [u8; 32]) -> (), // The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain. - SubstrateBlockPlans: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> Vec<[u8; 32]>, + SubstrateBlockPlans: ( + set: ExternalValidatorSet, + substrate_block_hash: [u8; 32] + ) -> Vec<[u8; 32]>, // The weight accumulated for a topic. - AccumulatedWeight: (set: ValidatorSet, topic: Topic) -> u16, + AccumulatedWeight: (set: ExternalValidatorSet, topic: Topic) -> u16, // The entries accumulated for a topic, by validator. - Accumulated: (set: ValidatorSet, topic: Topic, validator: SeraiAddress) -> D, + Accumulated: ( + set: ExternalValidatorSet, + topic: Topic, + validator: SeraiAddress + ) -> D, // Topics to be recognized as of a certain block number due to the reattempt protocol. - Reattempt: (set: ValidatorSet, block_number: u64) -> Vec, + Reattempt: (set: ExternalValidatorSet, block_number: u64) -> Vec, } ); db_channel!( CoordinatorTributary { // Messages to send to the processor - ProcessorMessages: (set: ValidatorSet) -> messages::CoordinatorMessage, + ProcessorMessages: (set: ExternalValidatorSet) -> messages::CoordinatorMessage, // Messages for the DKG confirmation - DkgConfirmationMessages: (set: ValidatorSet) -> messages::sign::CoordinatorMessage, + DkgConfirmationMessages: (set: ExternalValidatorSet) -> messages::sign::CoordinatorMessage, // Topics which have been explicitly recognized - RecognizedTopics: (set: ValidatorSet) -> Topic, + RecognizedTopics: (set: ExternalValidatorSet) -> Topic, } ); @@ -269,13 +276,13 @@ pub(crate) struct TributaryDb; impl TributaryDb { pub(crate) fn last_handled_tributary_block( getter: &impl Get, - set: ValidatorSet, + set: ExternalValidatorSet, ) -> Option<(u64, [u8; 32])> { LastHandledTributaryBlock::get(getter, set) } pub(crate) fn set_last_handled_tributary_block( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, block_number: u64, block_hash: [u8; 32], ) { @@ -284,23 +291,26 @@ impl TributaryDb { pub(crate) fn latest_substrate_block_to_cosign( getter: &impl Get, - set: ValidatorSet, + set: ExternalValidatorSet, ) -> Option<[u8; 32]> { LatestSubstrateBlockToCosign::get(getter, set) } pub(crate) fn set_latest_substrate_block_to_cosign( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, substrate_block_hash: [u8; 32], ) { LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash); } - pub(crate) fn actively_cosigning(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<[u8; 32]> { + pub(crate) fn actively_cosigning( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + ) -> Option<[u8; 32]> { ActivelyCosigning::get(txn, set) } pub(crate) fn start_cosigning( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, substrate_block_hash: [u8; 32], substrate_block_number: u64, ) { @@ -320,33 +330,33 @@ impl TributaryDb { }, ); } - pub(crate) fn finish_cosigning(txn: &mut impl DbTxn, set: ValidatorSet) { + pub(crate) fn finish_cosigning(txn: &mut impl DbTxn, set: ExternalValidatorSet) { assert!(ActivelyCosigning::take(txn, set).is_some(), "finished cosigning but not cosigning"); } pub(crate) fn mark_cosigned( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, substrate_block_hash: [u8; 32], ) { Cosigned::set(txn, set, substrate_block_hash, &()); } pub(crate) fn cosigned( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, substrate_block_hash: [u8; 32], ) -> bool { Cosigned::get(txn, set, substrate_block_hash).is_some() } - pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ValidatorSet, topic: Topic) { + pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ExternalValidatorSet, topic: Topic) { AccumulatedWeight::set(txn, set, topic, &0); RecognizedTopics::send(txn, set, &topic); } - pub(crate) fn recognized(getter: &impl Get, set: ValidatorSet, topic: Topic) -> bool { + pub(crate) fn recognized(getter: &impl Get, set: ExternalValidatorSet, topic: Topic) -> bool { AccumulatedWeight::get(getter, set, topic).is_some() } - pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ValidatorSet, block_number: u64) { + pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ExternalValidatorSet, block_number: u64) { for topic in Reattempt::take(txn, set, block_number).unwrap_or(vec![]) { /* TODO: Slash all people who preprocessed but didn't share, and add a delay to their @@ -376,7 +386,7 @@ impl TributaryDb { pub(crate) fn fatal_slash( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, validator: SeraiAddress, reason: &str, ) { @@ -386,7 +396,7 @@ impl TributaryDb { pub(crate) fn is_fatally_slashed( getter: &impl Get, - set: ValidatorSet, + set: ExternalValidatorSet, validator: SeraiAddress, ) -> bool { SlashPoints::get(getter, set, validator).unwrap_or(0) == u32::MAX @@ -395,7 +405,7 @@ impl TributaryDb { #[allow(clippy::too_many_arguments)] pub(crate) fn accumulate( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, validators: &[SeraiAddress], total_weight: u16, block_number: u64, @@ -511,7 +521,7 @@ impl TributaryDb { pub(crate) fn send_message( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, message: impl Into, ) { ProcessorMessages::send(txn, set, &message.into()); diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 1e1235ad..1c82d5b9 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -10,7 +10,7 @@ use dkg::Participant; use serai_client::{ primitives::SeraiAddress, - validator_sets::primitives::{ValidatorSet, Slash}, + validator_sets::primitives::{ExternalValidatorSet, Slash}, }; use serai_db::*; @@ -41,7 +41,10 @@ pub use db::Topic; pub struct ProcessorMessages; impl ProcessorMessages { /// Try to receive a message to send to a Processor. - pub fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option { + pub fn try_recv( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + ) -> Option { db::ProcessorMessages::try_recv(txn, set) } } @@ -58,7 +61,7 @@ impl DkgConfirmationMessages { /// across validator sets, with no guarantees of uniqueness across contexts. pub fn try_recv( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, ) -> Option { db::DkgConfirmationMessages::try_recv(txn, set) } @@ -70,12 +73,12 @@ impl CosignIntents { /// Provide a CosignIntent for this Tributary. /// /// This must be done before the associated `Transaction::Cosign` is provided. - pub fn provide(txn: &mut impl DbTxn, set: ValidatorSet, intent: &CosignIntent) { + pub fn provide(txn: &mut impl DbTxn, set: ExternalValidatorSet, intent: &CosignIntent) { db::CosignIntents::set(txn, set, intent.block_hash, intent); } fn take( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, substrate_block_hash: [u8; 32], ) -> Option { db::CosignIntents::take(txn, set, substrate_block_hash) @@ -88,13 +91,13 @@ impl RecognizedTopics { /// If this topic has been recognized by this Tributary. /// /// This will either be by explicit recognition or participation. - pub fn recognized(getter: &impl Get, set: ValidatorSet, topic: Topic) -> bool { + pub fn recognized(getter: &impl Get, set: ExternalValidatorSet, topic: Topic) -> bool { TributaryDb::recognized(getter, set, topic) } /// The next topic requiring recognition which has been recognized by this Tributary. pub fn try_recv_topic_requiring_recognition( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, ) -> Option { db::RecognizedTopics::try_recv(txn, set) } @@ -109,7 +112,7 @@ impl SubstrateBlockPlans { /// This must be done before the associated `Transaction::Cosign` is provided. pub fn set( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, substrate_block_hash: [u8; 32], plans: &Vec<[u8; 32]>, ) { @@ -117,7 +120,7 @@ impl SubstrateBlockPlans { } fn take( txn: &mut impl DbTxn, - set: ValidatorSet, + set: ExternalValidatorSet, substrate_block_hash: [u8; 32], ) -> Option> { db::SubstrateBlockPlans::take(txn, set, substrate_block_hash) diff --git a/patches/tiny-bip39/Cargo.toml b/patches/tiny-bip39/Cargo.toml deleted file mode 100644 index ff9b8a61..00000000 --- a/patches/tiny-bip39/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "tiny-bip39" -version = "1.0.2" -description = "tiny-bip39 which patches to the latest update" -license = "MIT" -repository = "https://github.com/serai-dex/serai/tree/develop/patches/tiny-bip39" -authors = ["Luke Parker "] -keywords = [] -edition = "2021" -rust-version = "1.70" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[package.metadata.cargo-machete] -ignored = ["tiny-bip39"] - -[lib] -name = "bip39" -path = "src/lib.rs" - -[dependencies] -tiny-bip39 = "2" diff --git a/patches/tiny-bip39/src/lib.rs b/patches/tiny-bip39/src/lib.rs deleted file mode 100644 index 3890f5ae..00000000 --- a/patches/tiny-bip39/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub use bip39::*; diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index 5feb3e25..302f670b 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -90,7 +90,7 @@ use bitcoin_serai::bitcoin::{ }; use serai_client::{ - primitives::{MAX_DATA_LEN, Coin, NetworkId, Amount, Balance}, + primitives::{MAX_DATA_LEN, ExternalNetworkId, ExternalCoin, Amount, Balance}, networks::bitcoin::Address, }; */ diff --git a/processor/bitcoin/src/primitives/output.rs b/processor/bitcoin/src/primitives/output.rs index f1a1dc7a..44f422c2 100644 --- a/processor/bitcoin/src/primitives/output.rs +++ b/processor/bitcoin/src/primitives/output.rs @@ -14,7 +14,7 @@ use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::Get; use serai_client::{ - primitives::{Coin, Amount, Balance, ExternalAddress}, + primitives::{ExternalCoin, Amount, ExternalBalance, ExternalAddress}, networks::bitcoin::Address, }; @@ -127,8 +127,8 @@ impl ReceivedOutput<::G, Address> for Output { self.presumed_origin.clone() } - fn balance(&self) -> Balance { - Balance { coin: Coin::Bitcoin, amount: Amount(self.output.value()) } + fn balance(&self) -> ExternalBalance { + ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(self.output.value()) } } fn data(&self) -> &[u8] { diff --git a/processor/bitcoin/src/rpc.rs b/processor/bitcoin/src/rpc.rs index acd3be85..4289c714 100644 --- a/processor/bitcoin/src/rpc.rs +++ b/processor/bitcoin/src/rpc.rs @@ -2,7 +2,7 @@ use core::future::Future; use bitcoin_serai::rpc::{RpcError, Rpc as BRpc}; -use serai_client::primitives::{NetworkId, Coin, Amount}; +use serai_client::primitives::{ExternalNetworkId, ExternalCoin, Amount}; use serai_db::Db; use scanner::ScannerFeed; @@ -21,7 +21,7 @@ pub(crate) struct Rpc { } impl ScannerFeed for Rpc { - const NETWORK: NetworkId = NetworkId::Bitcoin; + const NETWORK: ExternalNetworkId = ExternalNetworkId::Bitcoin; // 6 confirmations is widely accepted as secure and shouldn't occur const CONFIRMATIONS: u64 = 6; // The window length should be roughly an hour @@ -118,8 +118,8 @@ impl ScannerFeed for Rpc { } } - fn dust(coin: Coin) -> Amount { - assert_eq!(coin, Coin::Bitcoin); + fn dust(coin: ExternalCoin) -> Amount { + assert_eq!(coin, ExternalCoin::Bitcoin); /* A Taproot input is: @@ -158,11 +158,11 @@ impl ScannerFeed for Rpc { fn cost_to_aggregate( &self, - coin: Coin, + coin: ExternalCoin, _reference_block: &Self::Block, ) -> impl Send + Future> { async move { - assert_eq!(coin, Coin::Bitcoin); + assert_eq!(coin, ExternalCoin::Bitcoin); // TODO Ok(Amount(0)) } diff --git a/processor/bitcoin/src/scheduler.rs b/processor/bitcoin/src/scheduler.rs index 08dc508c..00f4a072 100644 --- a/processor/bitcoin/src/scheduler.rs +++ b/processor/bitcoin/src/scheduler.rs @@ -8,7 +8,7 @@ use bitcoin_serai::{ }; use serai_client::{ - primitives::{Coin, Amount}, + primitives::{ExternalCoin, Amount}, networks::bitcoin::Address, }; @@ -59,7 +59,7 @@ fn signable_transaction( .map(|payment| { (ScriptBuf::from(payment.address().clone()), { let balance = payment.balance(); - assert_eq!(balance.coin, Coin::Bitcoin); + assert_eq!(balance.coin, ExternalCoin::Bitcoin); balance.amount.0 }) }) diff --git a/processor/ethereum/src/primitives/output.rs b/processor/ethereum/src/primitives/output.rs index 99ffc880..797b528d 100644 --- a/processor/ethereum/src/primitives/output.rs +++ b/processor/ethereum/src/primitives/output.rs @@ -8,7 +8,7 @@ use scale::{Encode, Decode}; use borsh::{BorshSerialize, BorshDeserialize}; use serai_client::{ - primitives::{NetworkId, Coin, Amount, Balance}, + primitives::{ExternalNetworkId, ExternalCoin, Amount, ExternalBalance}, networks::ethereum::Address, }; @@ -17,20 +17,20 @@ use ethereum_router::{Coin as EthereumCoin, InInstruction as EthereumInInstructi use crate::{DAI, ETHER_DUST}; -fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { +fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { match coin { - EthereumCoin::Ether => Some(Coin::Ether), + EthereumCoin::Ether => Some(ExternalCoin::Ether), EthereumCoin::Erc20(token) => { if *token == DAI { - return Some(Coin::Dai); + return Some(ExternalCoin::Dai); } None } } } -fn amount_to_serai_amount(coin: Coin, amount: U256) -> Amount { - assert_eq!(coin.network(), NetworkId::Ethereum); +fn amount_to_serai_amount(coin: ExternalCoin, amount: U256) -> Amount { + assert_eq!(coin.network(), ExternalNetworkId::Ethereum); assert_eq!(coin.decimals(), 8); // Remove 10 decimals so we go from 18 decimals to 8 decimals let divisor = U256::from(10_000_000_000u64); @@ -119,7 +119,7 @@ impl ReceivedOutput<::G, Address> for Output { } } - fn balance(&self) -> Balance { + fn balance(&self) -> ExternalBalance { match self { Output::Output { key: _, instruction } => { let coin = coin_to_serai_coin(&instruction.coin).unwrap_or_else(|| { @@ -128,9 +128,11 @@ impl ReceivedOutput<::G, Address> for Output { "this never should have been yielded" ) }); - Balance { coin, amount: amount_to_serai_amount(coin, instruction.amount) } + ExternalBalance { coin, amount: amount_to_serai_amount(coin, instruction.amount) } + } + Output::Eventuality { .. } => { + ExternalBalance { coin: ExternalCoin::Ether, amount: ETHER_DUST } } - Output::Eventuality { .. } => Balance { coin: Coin::Ether, amount: ETHER_DUST }, } } fn data(&self) -> &[u8] { diff --git a/processor/ethereum/src/rpc.rs b/processor/ethereum/src/rpc.rs index b5b50cfa..57c14f59 100644 --- a/processor/ethereum/src/rpc.rs +++ b/processor/ethereum/src/rpc.rs @@ -7,7 +7,7 @@ use alloy_transport::{RpcError, TransportErrorKind}; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; -use serai_client::primitives::{NetworkId, Coin, Amount}; +use serai_client::primitives::{ExternalNetworkId, ExternalCoin, Amount}; use tokio::task::JoinSet; @@ -30,7 +30,7 @@ pub(crate) struct Rpc { } impl ScannerFeed for Rpc { - const NETWORK: NetworkId = NetworkId::Ethereum; + const NETWORK: ExternalNetworkId = ExternalNetworkId::Ethereum; // We only need one confirmation as Ethereum properly finalizes const CONFIRMATIONS: u64 = 1; @@ -209,22 +209,22 @@ impl ScannerFeed for Rpc { } } - fn dust(coin: Coin) -> Amount { - assert_eq!(coin.network(), NetworkId::Ethereum); + fn dust(coin: ExternalCoin) -> Amount { + assert_eq!(coin.network(), ExternalNetworkId::Ethereum); match coin { - Coin::Ether => ETHER_DUST, - Coin::Dai => DAI_DUST, + ExternalCoin::Ether => ETHER_DUST, + ExternalCoin::Dai => DAI_DUST, _ => unreachable!(), } } fn cost_to_aggregate( &self, - coin: Coin, + coin: ExternalCoin, _reference_block: &Self::Block, ) -> impl Send + Future> { async move { - assert_eq!(coin.network(), NetworkId::Ethereum); + assert_eq!(coin.network(), ExternalNetworkId::Ethereum); // There is no cost to aggregate as we receive to an account Ok(Amount(0)) } diff --git a/processor/ethereum/src/scheduler.rs b/processor/ethereum/src/scheduler.rs index e8a437c1..207792ec 100644 --- a/processor/ethereum/src/scheduler.rs +++ b/processor/ethereum/src/scheduler.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use alloy_core::primitives::U256; use serai_client::{ - primitives::{NetworkId, Coin, Balance}, + primitives::{ExternalNetworkId, ExternalCoin, ExternalBalance}, networks::ethereum::Address, }; @@ -17,17 +17,17 @@ use ethereum_router::Coin as EthereumCoin; use crate::{DAI, transaction::Action, rpc::Rpc}; -fn coin_to_ethereum_coin(coin: Coin) -> EthereumCoin { - assert_eq!(coin.network(), NetworkId::Ethereum); +fn coin_to_ethereum_coin(coin: ExternalCoin) -> EthereumCoin { + assert_eq!(coin.network(), ExternalNetworkId::Ethereum); match coin { - Coin::Ether => EthereumCoin::Ether, - Coin::Dai => EthereumCoin::Erc20(DAI), + ExternalCoin::Ether => EthereumCoin::Ether, + ExternalCoin::Dai => EthereumCoin::Erc20(DAI), _ => unreachable!(), } } -fn balance_to_ethereum_amount(balance: Balance) -> U256 { - assert_eq!(balance.coin.network(), NetworkId::Ethereum); +fn balance_to_ethereum_amount(balance: ExternalBalance) -> U256 { + assert_eq!(balance.coin.network(), ExternalNetworkId::Ethereum); assert_eq!(balance.coin.decimals(), 8); // Restore 10 decimals so we go from 8 decimals to 18 decimals // TODO: Document the expectation all integrated coins have 18 decimals @@ -73,17 +73,17 @@ impl smart_contract_scheduler::SmartContract> for SmartContract { } let mut res = vec![]; - for coin in [Coin::Ether, Coin::Dai] { + for coin in [ExternalCoin::Ether, ExternalCoin::Dai] { let Some(outs) = outs.remove(&coin) else { continue }; assert!(!outs.is_empty()); let fee_per_gas = match coin { // 10 gwei - Coin::Ether => { + ExternalCoin::Ether => { U256::try_from(10u64).unwrap() * alloy_core::primitives::utils::Unit::GWEI.wei() } // 0.0003 DAI - Coin::Dai => { + ExternalCoin::Dai => { U256::try_from(30u64).unwrap() * alloy_core::primitives::utils::Unit::TWEI.wei() } _ => unreachable!(), diff --git a/processor/monero/src/primitives/output.rs b/processor/monero/src/primitives/output.rs index 201e75c9..b2b87a5c 100644 --- a/processor/monero/src/primitives/output.rs +++ b/processor/monero/src/primitives/output.rs @@ -8,7 +8,7 @@ use scale::{Encode, Decode}; use borsh::{BorshSerialize, BorshDeserialize}; use serai_client::{ - primitives::{Coin, Amount, Balance}, + primitives::{ExternalCoin, Amount, ExternalBalance}, networks::monero::Address, }; @@ -76,8 +76,8 @@ impl ReceivedOutput<::G, Address> for Output { None } - fn balance(&self) -> Balance { - Balance { coin: Coin::Monero, amount: Amount(self.0.commitment().amount) } + fn balance(&self) -> ExternalBalance { + ExternalBalance { coin: ExternalCoin::Monero, amount: Amount(self.0.commitment().amount) } } fn data(&self) -> &[u8] { diff --git a/processor/monero/src/rpc.rs b/processor/monero/src/rpc.rs index 9244b23f..5ca74d02 100644 --- a/processor/monero/src/rpc.rs +++ b/processor/monero/src/rpc.rs @@ -3,7 +3,7 @@ use core::future::Future; use monero_wallet::rpc::{RpcError, Rpc as RpcTrait}; use monero_simple_request_rpc::SimpleRequestRpc; -use serai_client::primitives::{NetworkId, Coin, Amount}; +use serai_client::primitives::{ExternalNetworkId, ExternalCoin, Amount}; use scanner::ScannerFeed; use signers::TransactionPublisher; @@ -19,7 +19,7 @@ pub(crate) struct Rpc { } impl ScannerFeed for Rpc { - const NETWORK: NetworkId = NetworkId::Monero; + const NETWORK: ExternalNetworkId = ExternalNetworkId::Monero; // Outputs aren't spendable until 10 blocks later due to the 10-block lock // Since we assumed scanned outputs are spendable, that sets a minimum confirmation depth of 10 // A 10-block reorganization hasn't been observed in years and shouldn't occur @@ -107,8 +107,8 @@ impl ScannerFeed for Rpc { } } - fn dust(coin: Coin) -> Amount { - assert_eq!(coin, Coin::Monero); + fn dust(coin: ExternalCoin) -> Amount { + assert_eq!(coin, ExternalCoin::Monero); // 0.01 XMR Amount(10_000_000_000) @@ -116,11 +116,11 @@ impl ScannerFeed for Rpc { fn cost_to_aggregate( &self, - coin: Coin, + coin: ExternalCoin, _reference_block: &Self::Block, ) -> impl Send + Future> { async move { - assert_eq!(coin, Coin::Bitcoin); + assert_eq!(coin, ExternalCoin::Bitcoin); // TODO Ok(Amount(0)) } diff --git a/processor/monero/src/scheduler.rs b/processor/monero/src/scheduler.rs index 489db810..9043f888 100644 --- a/processor/monero/src/scheduler.rs +++ b/processor/monero/src/scheduler.rs @@ -9,7 +9,7 @@ use ciphersuite::{Ciphersuite, Ed25519}; use monero_wallet::rpc::{FeeRate, RpcError}; use serai_client::{ - primitives::{Coin, Amount}, + primitives::{ExternalCoin, Amount}, networks::monero::Address, }; @@ -106,7 +106,7 @@ async fn signable_transaction( .map(|payment| { (MoneroAddress::from(*payment.address()), { let balance = payment.balance(); - assert_eq!(balance.coin, Coin::Monero); + assert_eq!(balance.coin, ExternalCoin::Monero); balance.amount.0 }) }) diff --git a/processor/primitives/src/output.rs b/processor/primitives/src/output.rs index 76acde60..e45b7344 100644 --- a/processor/primitives/src/output.rs +++ b/processor/primitives/src/output.rs @@ -5,7 +5,7 @@ use group::GroupEncoding; use borsh::{BorshSerialize, BorshDeserialize}; -use serai_primitives::{ExternalAddress, Balance}; +use serai_primitives::{ExternalAddress, ExternalBalance}; use crate::Id; @@ -133,7 +133,7 @@ pub trait ReceivedOutput: fn presumed_origin(&self) -> Option; /// The balance associated with this output. - fn balance(&self) -> Balance; + fn balance(&self) -> ExternalBalance; /// The arbitrary data (presumably an InInstruction) associated with this output. fn data(&self) -> &[u8]; diff --git a/processor/primitives/src/payment.rs b/processor/primitives/src/payment.rs index 59b10f7f..b892b2b4 100644 --- a/processor/primitives/src/payment.rs +++ b/processor/primitives/src/payment.rs @@ -3,7 +3,7 @@ use std::io; use scale::{Encode, Decode, IoReader}; use borsh::{BorshSerialize, BorshDeserialize}; -use serai_primitives::Balance; +use serai_primitives::ExternalBalance; use serai_coins_primitives::OutInstructionWithBalance; use crate::Address; @@ -12,7 +12,7 @@ use crate::Address; #[derive(Clone, BorshSerialize, BorshDeserialize)] pub struct Payment { address: A, - balance: Balance, + balance: ExternalBalance, } impl TryFrom for Payment { @@ -27,7 +27,7 @@ impl TryFrom for Payment { impl Payment { /// Create a new Payment. - pub fn new(address: A, balance: Balance) -> Self { + pub fn new(address: A, balance: ExternalBalance) -> Self { Payment { address, balance } } @@ -36,7 +36,7 @@ impl Payment { &self.address } /// The balance to transfer. - pub fn balance(&self) -> Balance { + pub fn balance(&self) -> ExternalBalance { self.balance } @@ -44,7 +44,7 @@ impl Payment { pub fn read(reader: &mut impl io::Read) -> io::Result { let address = A::deserialize_reader(reader)?; let reader = &mut IoReader(reader); - let balance = Balance::decode(reader).map_err(io::Error::other)?; + let balance = ExternalBalance::decode(reader).map_err(io::Error::other)?; Ok(Self { address, balance }) } /// Write the Payment. diff --git a/processor/scanner/src/batch/db.rs b/processor/scanner/src/batch/db.rs index 88ca2882..015b661b 100644 --- a/processor/scanner/src/batch/db.rs +++ b/processor/scanner/src/batch/db.rs @@ -7,7 +7,7 @@ use scale::{Encode, Decode, IoReader}; use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, create_db}; -use serai_primitives::Balance; +use serai_primitives::ExternalBalance; use serai_validator_sets_primitives::Session; use primitives::EncodableG; @@ -39,7 +39,7 @@ create_db!( pub(crate) struct ReturnInformation { pub(crate) address: AddressFor, - pub(crate) balance: Balance, + pub(crate) balance: ExternalBalance, } pub(crate) struct BatchDb(PhantomData); @@ -116,7 +116,7 @@ impl BatchDb { res.push((opt[0] == 1).then(|| { let address = AddressFor::::deserialize_reader(&mut buf).unwrap(); - let balance = Balance::decode(&mut IoReader(&mut buf)).unwrap(); + let balance = ExternalBalance::decode(&mut IoReader(&mut buf)).unwrap(); ReturnInformation { address, balance } })); } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 510af61b..d3e24183 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -10,7 +10,7 @@ use group::GroupEncoding; use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, Db}; -use serai_primitives::{NetworkId, Coin, Amount}; +use serai_primitives::{ExternalNetworkId, ExternalCoin, Amount}; use serai_coins_primitives::OutInstructionWithBalance; use messages::substrate::ExecutedBatch; @@ -64,7 +64,7 @@ impl BlockExt for B { /// This defines the primitive types used, along with various getters necessary for indexing. pub trait ScannerFeed: 'static + Send + Sync + Clone { /// The ID of the network being scanned for. - const NETWORK: NetworkId; + const NETWORK: ExternalNetworkId; /// The amount of confirmations a block must have to be considered finalized. /// @@ -175,14 +175,14 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { /// /// This MUST be constant. Serai MUST NOT create internal outputs worth less than this. This /// SHOULD be a value worth handling at a human level. - fn dust(coin: Coin) -> Amount; + fn dust(coin: ExternalCoin) -> Amount; /// The cost to aggregate an input as of the specified block. /// /// This is defined as the transaction fee for a 2-input, 1-output transaction. fn cost_to_aggregate( &self, - coin: Coin, + coin: ExternalCoin, reference_block: &Self::Block, ) -> impl Send + Future>; } diff --git a/processor/scheduler/utxo/primitives/src/tree.rs b/processor/scheduler/utxo/primitives/src/tree.rs index d5b47309..565706a3 100644 --- a/processor/scheduler/utxo/primitives/src/tree.rs +++ b/processor/scheduler/utxo/primitives/src/tree.rs @@ -1,6 +1,6 @@ use borsh::{BorshSerialize, BorshDeserialize}; -use serai_primitives::{Coin, Amount, Balance}; +use serai_primitives::{ExternalCoin, Amount, ExternalBalance}; use primitives::{Address, Payment}; use scanner::ScannerFeed; @@ -52,7 +52,7 @@ impl TreeTransaction { /// payments should be made. pub fn payments( &self, - coin: Coin, + coin: ExternalCoin, branch_address: &A, input_value: u64, ) -> Option>> { @@ -115,7 +115,10 @@ impl TreeTransaction { .filter_map(|(payment, amount)| { amount.map(|amount| { // The existing payment, with the new amount - Payment::new(payment.address().clone(), Balance { coin, amount: Amount(amount) }) + Payment::new( + payment.address().clone(), + ExternalBalance { coin, amount: Amount(amount) }, + ) }) }) .collect() @@ -126,7 +129,7 @@ impl TreeTransaction { .filter_map(|amount| { amount.map(|amount| { // A branch output with the new amount - Payment::new(branch_address.clone(), Balance { coin, amount: Amount(amount) }) + Payment::new(branch_address.clone(), ExternalBalance { coin, amount: Amount(amount) }) }) }) .collect() diff --git a/processor/scheduler/utxo/standard/src/db.rs b/processor/scheduler/utxo/standard/src/db.rs index 00761595..128c5df6 100644 --- a/processor/scheduler/utxo/standard/src/db.rs +++ b/processor/scheduler/utxo/standard/src/db.rs @@ -2,7 +2,7 @@ use core::marker::PhantomData; use group::GroupEncoding; -use serai_primitives::{Coin, Amount, Balance}; +use serai_primitives::{ExternalCoin, Amount, ExternalBalance}; use borsh::BorshDeserialize; use serai_db::{Get, DbTxn, create_db, db_channel}; @@ -13,31 +13,31 @@ use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor}; create_db! { UtxoScheduler { - OperatingCosts: (coin: Coin) -> Amount, - SerializedOutputs: (key: &[u8], coin: Coin) -> Vec, - SerializedQueuedPayments: (key: &[u8], coin: Coin) -> Vec, + OperatingCosts: (coin: ExternalCoin) -> Amount, + SerializedOutputs: (key: &[u8], coin: ExternalCoin) -> Vec, + SerializedQueuedPayments: (key: &[u8], coin: ExternalCoin) -> Vec, } } db_channel! { UtxoScheduler { - PendingBranch: (key: &[u8], balance: Balance) -> Vec, + PendingBranch: (key: &[u8], balance: ExternalBalance) -> Vec, } } pub(crate) struct Db(PhantomData); impl Db { - pub(crate) fn operating_costs(getter: &impl Get, coin: Coin) -> Amount { + pub(crate) fn operating_costs(getter: &impl Get, coin: ExternalCoin) -> Amount { OperatingCosts::get(getter, coin).unwrap_or(Amount(0)) } - pub(crate) fn set_operating_costs(txn: &mut impl DbTxn, coin: Coin, amount: Amount) { + pub(crate) fn set_operating_costs(txn: &mut impl DbTxn, coin: ExternalCoin, amount: Amount) { OperatingCosts::set(txn, coin, &amount) } pub(crate) fn outputs( getter: &impl Get, key: KeyFor, - coin: Coin, + coin: ExternalCoin, ) -> Option>> { let buf = SerializedOutputs::get(getter, key.to_bytes().as_ref(), coin)?; let mut buf = buf.as_slice(); @@ -51,7 +51,7 @@ impl Db { pub(crate) fn set_outputs( txn: &mut impl DbTxn, key: KeyFor, - coin: Coin, + coin: ExternalCoin, outputs: &[OutputFor], ) { let mut buf = Vec::with_capacity(outputs.len() * 128); @@ -60,14 +60,14 @@ impl Db { } SerializedOutputs::set(txn, key.to_bytes().as_ref(), coin, &buf); } - pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { + pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor, coin: ExternalCoin) { SerializedOutputs::del(txn, key.to_bytes().as_ref(), coin); } pub(crate) fn queued_payments( getter: &impl Get, key: KeyFor, - coin: Coin, + coin: ExternalCoin, ) -> Option>>> { let buf = SerializedQueuedPayments::get(getter, key.to_bytes().as_ref(), coin)?; let mut buf = buf.as_slice(); @@ -81,7 +81,7 @@ impl Db { pub(crate) fn set_queued_payments( txn: &mut impl DbTxn, key: KeyFor, - coin: Coin, + coin: ExternalCoin, queued: &[Payment>], ) { let mut buf = Vec::with_capacity(queued.len() * 128); @@ -90,14 +90,14 @@ impl Db { } SerializedQueuedPayments::set(txn, key.to_bytes().as_ref(), coin, &buf); } - pub(crate) fn del_queued_payments(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { + pub(crate) fn del_queued_payments(txn: &mut impl DbTxn, key: KeyFor, coin: ExternalCoin) { SerializedQueuedPayments::del(txn, key.to_bytes().as_ref(), coin); } pub(crate) fn queue_pending_branch( txn: &mut impl DbTxn, key: KeyFor, - balance: Balance, + balance: ExternalBalance, child: &TreeTransaction>, ) { PendingBranch::send(txn, key.to_bytes().as_ref(), balance, &borsh::to_vec(child).unwrap()) @@ -105,7 +105,7 @@ impl Db { pub(crate) fn take_pending_branch( txn: &mut impl DbTxn, key: KeyFor, - balance: Balance, + balance: ExternalBalance, ) -> Option>> { PendingBranch::try_recv(txn, key.to_bytes().as_ref(), balance) .map(|bytes| TreeTransaction::>::deserialize(&mut bytes.as_slice()).unwrap()) diff --git a/processor/scheduler/utxo/standard/src/lib.rs b/processor/scheduler/utxo/standard/src/lib.rs index e826c300..cc2e2d35 100644 --- a/processor/scheduler/utxo/standard/src/lib.rs +++ b/processor/scheduler/utxo/standard/src/lib.rs @@ -7,7 +7,7 @@ use std::collections::HashMap; use group::GroupEncoding; -use serai_primitives::{Coin, Amount, Balance}; +use serai_primitives::{ExternalCoin, Amount, ExternalBalance}; use serai_db::DbTxn; @@ -42,7 +42,7 @@ impl> Scheduler { block: &BlockFor, key_for_change: KeyFor, key: KeyFor, - coin: Coin, + coin: ExternalCoin, ) -> Result>, >::EphemeralError> { let mut eventualities = vec![]; @@ -79,7 +79,7 @@ impl> Scheduler { txn: &mut impl DbTxn, operating_costs: &mut u64, key: KeyFor, - coin: Coin, + coin: ExternalCoin, value_of_outputs: u64, ) -> Vec>> { // Fetch all payments for this key @@ -133,7 +133,7 @@ impl> Scheduler { fn queue_branches( txn: &mut impl DbTxn, key: KeyFor, - coin: Coin, + coin: ExternalCoin, effected_payments: Vec, tx: TreeTransaction>, ) { @@ -149,7 +149,7 @@ impl> Scheduler { children thanks to our sort. */ for (amount, child) in effected_payments.into_iter().zip(children) { - Db::::queue_pending_branch(txn, key, Balance { coin, amount }, &child); + Db::::queue_pending_branch(txn, key, ExternalBalance { coin, amount }, &child); } } } @@ -216,8 +216,6 @@ impl> Scheduler { let branch_address = P::branch_address(key); 'coin: for coin in S::NETWORK.coins() { - let coin = *coin; - // Perform any input aggregation we should eventualities .append(&mut self.aggregate_inputs(txn, block, key_for_change, key, coin).await?); @@ -308,7 +306,7 @@ impl> Scheduler { block: &BlockFor, from: KeyFor, to: KeyFor, - coin: Coin, + coin: ExternalCoin, ) -> Result<(), >::EphemeralError> { let from_bytes = from.to_bytes().as_ref().to_vec(); // Ensure our inputs are aggregated @@ -349,10 +347,10 @@ impl> SchedulerTrait for Schedul fn activate_key(txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { - assert!(Db::::outputs(txn, key, *coin).is_none()); - Db::::set_outputs(txn, key, *coin, &[]); - assert!(Db::::queued_payments(txn, key, *coin).is_none()); - Db::::set_queued_payments(txn, key, *coin, &[]); + assert!(Db::::outputs(txn, key, coin).is_none()); + Db::::set_outputs(txn, key, coin, &[]); + assert!(Db::::queued_payments(txn, key, coin).is_none()); + Db::::set_queued_payments(txn, key, coin, &[]); } } @@ -368,18 +366,18 @@ impl> SchedulerTrait for Schedul for coin in S::NETWORK.coins() { // Move the payments to the new key { - let still_queued = Db::::queued_payments(txn, retiring_key, *coin).unwrap(); - let mut new_queued = Db::::queued_payments(txn, new_key, *coin).unwrap(); + let still_queued = Db::::queued_payments(txn, retiring_key, coin).unwrap(); + let mut new_queued = Db::::queued_payments(txn, new_key, coin).unwrap(); let mut queued = still_queued; queued.append(&mut new_queued); - Db::::set_queued_payments(txn, retiring_key, *coin, &[]); - Db::::set_queued_payments(txn, new_key, *coin, &queued); + Db::::set_queued_payments(txn, retiring_key, coin, &[]); + Db::::set_queued_payments(txn, new_key, coin, &queued); } // Move the outputs to the new key - self.flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin).await?; + self.flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, coin).await?; } Ok(eventualities) } @@ -387,10 +385,10 @@ impl> SchedulerTrait for Schedul fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { - assert!(Db::::outputs(txn, key, *coin).unwrap().is_empty()); - Db::::del_outputs(txn, key, *coin); - assert!(Db::::queued_payments(txn, key, *coin).unwrap().is_empty()); - Db::::del_queued_payments(txn, key, *coin); + assert!(Db::::outputs(txn, key, coin).unwrap().is_empty()); + Db::::del_outputs(txn, key, coin); + assert!(Db::::queued_payments(txn, key, coin).unwrap().is_empty()); + Db::::del_queued_payments(txn, key, coin); } } @@ -463,7 +461,7 @@ impl> SchedulerTrait for Schedul block, active_keys[0].0, active_keys[1].0, - *coin, + coin, ) .await?; } @@ -552,10 +550,10 @@ impl> SchedulerTrait for Schedul // Queue the payments for this key for coin in S::NETWORK.coins() { - let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, *coin).unwrap(); + let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, coin).unwrap(); queued_payments - .extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned()); - Db::::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments); + .extend(payments.iter().filter(|payment| payment.balance().coin == coin).cloned()); + Db::::set_queued_payments(txn, fulfillment_key, coin, &queued_payments); } // Handle the queued payments diff --git a/processor/scheduler/utxo/transaction-chaining/src/db.rs b/processor/scheduler/utxo/transaction-chaining/src/db.rs index 11bcd78d..68558e6f 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/db.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/db.rs @@ -2,7 +2,7 @@ use core::marker::PhantomData; use group::GroupEncoding; -use serai_primitives::{Coin, Amount}; +use serai_primitives::{ExternalCoin, Amount}; use serai_db::{Get, DbTxn, create_db}; @@ -11,28 +11,28 @@ use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor}; create_db! { TransactionChainingScheduler { - OperatingCosts: (coin: Coin) -> Amount, - SerializedOutputs: (key: &[u8], coin: Coin) -> Vec, + OperatingCosts: (coin: ExternalCoin) -> Amount, + SerializedOutputs: (key: &[u8], coin: ExternalCoin) -> Vec, AlreadyAccumulatedOutput: (id: &[u8]) -> (), // We should be immediately able to schedule the fulfillment of payments, yet this may not be // possible if we're in the middle of a multisig rotation (as our output set will be split) - SerializedQueuedPayments: (key: &[u8], coin: Coin) -> Vec, + SerializedQueuedPayments: (key: &[u8], coin: ExternalCoin) -> Vec, } } pub(crate) struct Db(PhantomData); impl Db { - pub(crate) fn operating_costs(getter: &impl Get, coin: Coin) -> Amount { + pub(crate) fn operating_costs(getter: &impl Get, coin: ExternalCoin) -> Amount { OperatingCosts::get(getter, coin).unwrap_or(Amount(0)) } - pub(crate) fn set_operating_costs(txn: &mut impl DbTxn, coin: Coin, amount: Amount) { + pub(crate) fn set_operating_costs(txn: &mut impl DbTxn, coin: ExternalCoin, amount: Amount) { OperatingCosts::set(txn, coin, &amount) } pub(crate) fn outputs( getter: &impl Get, key: KeyFor, - coin: Coin, + coin: ExternalCoin, ) -> Option>> { let buf = SerializedOutputs::get(getter, key.to_bytes().as_ref(), coin)?; let mut buf = buf.as_slice(); @@ -46,7 +46,7 @@ impl Db { pub(crate) fn set_outputs( txn: &mut impl DbTxn, key: KeyFor, - coin: Coin, + coin: ExternalCoin, outputs: &[OutputFor], ) { let mut buf = Vec::with_capacity(outputs.len() * 128); @@ -55,7 +55,7 @@ impl Db { } SerializedOutputs::set(txn, key.to_bytes().as_ref(), coin, &buf); } - pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { + pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor, coin: ExternalCoin) { SerializedOutputs::del(txn, key.to_bytes().as_ref(), coin); } @@ -75,7 +75,7 @@ impl Db { pub(crate) fn queued_payments( getter: &impl Get, key: KeyFor, - coin: Coin, + coin: ExternalCoin, ) -> Option>>> { let buf = SerializedQueuedPayments::get(getter, key.to_bytes().as_ref(), coin)?; let mut buf = buf.as_slice(); @@ -89,7 +89,7 @@ impl Db { pub(crate) fn set_queued_payments( txn: &mut impl DbTxn, key: KeyFor, - coin: Coin, + coin: ExternalCoin, queued: &[Payment>], ) { let mut buf = Vec::with_capacity(queued.len() * 128); @@ -98,7 +98,7 @@ impl Db { } SerializedQueuedPayments::set(txn, key.to_bytes().as_ref(), coin, &buf); } - pub(crate) fn del_queued_payments(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { + pub(crate) fn del_queued_payments(txn: &mut impl DbTxn, key: KeyFor, coin: ExternalCoin) { SerializedQueuedPayments::del(txn, key.to_bytes().as_ref(), coin); } } diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index bb39dcd3..5f7275ce 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -7,7 +7,7 @@ use std::collections::HashMap; use group::GroupEncoding; -use serai_primitives::{Coin, Amount}; +use serai_primitives::{ExternalCoin, Amount}; use serai_db::DbTxn; @@ -72,7 +72,7 @@ impl>> Sched block: &BlockFor, key_for_change: KeyFor, key: KeyFor, - coin: Coin, + coin: ExternalCoin, ) -> Result>, >::EphemeralError> { let mut eventualities = vec![]; @@ -112,7 +112,7 @@ impl>> Sched txn: &mut impl DbTxn, operating_costs: &mut u64, key: KeyFor, - coin: Coin, + coin: ExternalCoin, value_of_outputs: u64, ) -> Vec>> { // Fetch all payments for this key @@ -184,8 +184,6 @@ impl>> Sched let branch_address = P::branch_address(key); 'coin: for coin in S::NETWORK.coins() { - let coin = *coin; - // Perform any input aggregation we should eventualities .append(&mut self.aggregate_inputs(txn, block, key_for_change, key, coin).await?); @@ -360,7 +358,7 @@ impl>> Sched block: &BlockFor, from: KeyFor, to: KeyFor, - coin: Coin, + coin: ExternalCoin, ) -> Result<(), >::EphemeralError> { let from_bytes = from.to_bytes().as_ref().to_vec(); // Ensure our inputs are aggregated @@ -404,10 +402,10 @@ impl>> Sched fn activate_key(txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { - assert!(Db::::outputs(txn, key, *coin).is_none()); - Db::::set_outputs(txn, key, *coin, &[]); - assert!(Db::::queued_payments(txn, key, *coin).is_none()); - Db::::set_queued_payments(txn, key, *coin, &[]); + assert!(Db::::outputs(txn, key, coin).is_none()); + Db::::set_outputs(txn, key, coin, &[]); + assert!(Db::::queued_payments(txn, key, coin).is_none()); + Db::::set_queued_payments(txn, key, coin, &[]); } } @@ -423,18 +421,18 @@ impl>> Sched for coin in S::NETWORK.coins() { // Move the payments to the new key { - let still_queued = Db::::queued_payments(txn, retiring_key, *coin).unwrap(); - let mut new_queued = Db::::queued_payments(txn, new_key, *coin).unwrap(); + let still_queued = Db::::queued_payments(txn, retiring_key, coin).unwrap(); + let mut new_queued = Db::::queued_payments(txn, new_key, coin).unwrap(); let mut queued = still_queued; queued.append(&mut new_queued); - Db::::set_queued_payments(txn, retiring_key, *coin, &[]); - Db::::set_queued_payments(txn, new_key, *coin, &queued); + Db::::set_queued_payments(txn, retiring_key, coin, &[]); + Db::::set_queued_payments(txn, new_key, coin, &queued); } // Move the outputs to the new key - self.flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin).await?; + self.flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, coin).await?; } Ok(eventualities) } @@ -442,10 +440,10 @@ impl>> Sched fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { - assert!(Db::::outputs(txn, key, *coin).unwrap().is_empty()); - Db::::del_outputs(txn, key, *coin); - assert!(Db::::queued_payments(txn, key, *coin).unwrap().is_empty()); - Db::::del_queued_payments(txn, key, *coin); + assert!(Db::::outputs(txn, key, coin).unwrap().is_empty()); + Db::::del_outputs(txn, key, coin); + assert!(Db::::queued_payments(txn, key, coin).unwrap().is_empty()); + Db::::del_queued_payments(txn, key, coin); } } @@ -481,7 +479,7 @@ impl>> Sched block, active_keys[0].0, active_keys[1].0, - *coin, + coin, ) .await?; } @@ -570,10 +568,10 @@ impl>> Sched // Queue the payments for this key for coin in S::NETWORK.coins() { - let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, *coin).unwrap(); + let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, coin).unwrap(); queued_payments - .extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned()); - Db::::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments); + .extend(payments.iter().filter(|payment| payment.balance().coin == coin).cloned()); + Db::::set_queued_payments(txn, fulfillment_key, coin, &queued_payments); } // Handle the queued payments diff --git a/substrate/client/src/serai/in_instructions.rs b/substrate/client/src/serai/in_instructions.rs index 7b0629f0..db9a4f78 100644 --- a/substrate/client/src/serai/in_instructions.rs +++ b/substrate/client/src/serai/in_instructions.rs @@ -1,10 +1,7 @@ pub use serai_abi::in_instructions::primitives; use primitives::SignedBatch; -use crate::{ - primitives::{BlockHash, ExternalNetworkId}, - Transaction, SeraiError, Serai, TemporalSerai, -}; +use crate::{primitives::ExternalNetworkId, Transaction, SeraiError, Serai, TemporalSerai}; pub type InInstructionsEvent = serai_abi::in_instructions::Event; @@ -12,6 +9,7 @@ const PALLET: &str = "InInstructions"; #[derive(Clone, Copy)] pub struct SeraiInInstructions<'a>(pub(crate) &'a TemporalSerai<'a>); +impl SeraiInInstructions<'_> { pub async fn last_batch_for_network( &self, network: ExternalNetworkId, diff --git a/substrate/client/src/serai/mod.rs b/substrate/client/src/serai/mod.rs index fda876b6..532c9cf2 100644 --- a/substrate/client/src/serai/mod.rs +++ b/substrate/client/src/serai/mod.rs @@ -16,7 +16,7 @@ pub use abi::{primitives, Transaction}; use abi::*; pub use primitives::{SeraiAddress, Signature, Amount}; -use primitives::{Header, NetworkId}; +use primitives::{Header, ExternalNetworkId}; pub mod coins; pub use coins::SeraiCoins; @@ -313,7 +313,7 @@ impl Serai { /// Return the P2P Multiaddrs for the validators of the specified network. pub async fn p2p_validators( &self, - network: NetworkId, + network: ExternalNetworkId, ) -> Result, SeraiError> { self.call("p2p_validators", network).await } diff --git a/substrate/client/src/serai/validator_sets.rs b/substrate/client/src/serai/validator_sets.rs index b475730d..3eddc943 100644 --- a/substrate/client/src/serai/validator_sets.rs +++ b/substrate/client/src/serai/validator_sets.rs @@ -5,10 +5,10 @@ use sp_runtime::BoundedVec; use serai_abi::{primitives::Amount, validator_sets::primitives::ExternalValidatorSet}; pub use serai_abi::validator_sets::primitives; -use primitives::{MAX_KEY_LEN, Session, ValidatorSet, KeyPair, SlashReport}; +use primitives::{MAX_KEY_LEN, Session, KeyPair, SlashReport}; use crate::{ - primitives::{NetworkId, ExternalNetworkId, EmbeddedEllipticCurve, SeraiAddress}, + primitives::{NetworkId, ExternalNetworkId, EmbeddedEllipticCurve}, Transaction, Serai, TemporalSerai, SeraiError, }; @@ -203,7 +203,7 @@ impl SeraiValidatorSets<'_> { } pub fn set_keys( - network: NetworkId, + network: ExternalNetworkId, key_pair: KeyPair, signature_participants: bitvec::vec::BitVec, signature: Signature, @@ -237,7 +237,7 @@ impl SeraiValidatorSets<'_> { } pub fn report_slashes( - network: NetworkId, + network: ExternalNetworkId, slashes: SlashReport, signature: Signature, ) -> Transaction { diff --git a/substrate/client/tests/batch.rs b/substrate/client/tests/batch.rs index 0edfaf2f..2d32462f 100644 --- a/substrate/client/tests/batch.rs +++ b/substrate/client/tests/batch.rs @@ -8,7 +8,7 @@ use blake2::{ use scale::Encode; use serai_client::{ - primitives::{BlockHash, NetworkId, ExternalCoin, Amount, ExternalBalance, SeraiAddress}, + primitives::{BlockHash, ExternalCoin, Amount, ExternalBalance, SeraiAddress}, coins::CoinsEvent, validator_sets::primitives::Session, in_instructions::{ diff --git a/substrate/client/tests/burn.rs b/substrate/client/tests/burn.rs index cba8e480..8351781e 100644 --- a/substrate/client/tests/burn.rs +++ b/substrate/client/tests/burn.rs @@ -11,7 +11,7 @@ use sp_core::Pair; use serai_client::{ primitives::{ - BlockHash, ExternalNetworkId, ExternalCoin, Amount, ExternalBalance, SeraiAddress, ExternalAddress, + BlockHash, ExternalCoin, Amount, ExternalBalance, SeraiAddress, ExternalAddress, insecure_pair_from_name, }, coins::{ diff --git a/substrate/client/tests/common/genesis_liquidity.rs b/substrate/client/tests/common/genesis_liquidity.rs index c602b2e6..cba6bdea 100644 --- a/substrate/client/tests/common/genesis_liquidity.rs +++ b/substrate/client/tests/common/genesis_liquidity.rs @@ -11,10 +11,11 @@ use sp_core::{sr25519::Signature, Pair as PairTrait}; use serai_abi::{ primitives::{ - BlockHash, ExternalNetworkId, ExternalCoin, Amount, ExternalBalance, SeraiAddress, insecure_pair_from_name, + EXTERNAL_COINS, BlockHash, ExternalNetworkId, NetworkId, ExternalCoin, Amount, ExternalBalance, + SeraiAddress, insecure_pair_from_name, }, - validator_sets::primitives::{musig_context, Session, ValidatorSet}, - genesis_liquidity::primitives::{oraclize_values_message, Values}, + validator_sets::primitives::{Session, ValidatorSet, musig_context}, + genesis_liquidity::primitives::{Values, oraclize_values_message}, in_instructions::primitives::{InInstruction, InInstructionWithBalance, Batch}, }; diff --git a/substrate/client/tests/common/in_instructions.rs b/substrate/client/tests/common/in_instructions.rs index 115b75d0..87e26c5d 100644 --- a/substrate/client/tests/common/in_instructions.rs +++ b/substrate/client/tests/common/in_instructions.rs @@ -9,7 +9,7 @@ use scale::Encode; use sp_core::Pair; use serai_client::{ - primitives::{BlockHash, NetworkId, ExternalBalance, SeraiAddress, insecure_pair_from_name}, + primitives::{BlockHash, ExternalBalance, SeraiAddress, insecure_pair_from_name}, validator_sets::primitives::{ExternalValidatorSet, KeyPair}, in_instructions::{ primitives::{Batch, SignedBatch, batch_message, InInstruction, InInstructionWithBalance}, diff --git a/substrate/client/tests/common/validator_sets.rs b/substrate/client/tests/common/validator_sets.rs index 4771b5ed..008cb3fc 100644 --- a/substrate/client/tests/common/validator_sets.rs +++ b/substrate/client/tests/common/validator_sets.rs @@ -16,12 +16,12 @@ use frost::dkg::musig::musig; use schnorrkel::Schnorrkel; use serai_client::{ - primitives::EmbeddedEllipticCurve, + primitives::{EmbeddedEllipticCurve, Amount}, validator_sets::{ primitives::{MAX_KEY_LEN, ExternalValidatorSet, KeyPair, musig_context, set_keys_message}, ValidatorSetsEvent, }, - Amount, Serai, SeraiValidatorSets, + SeraiValidatorSets, Serai, }; use crate::common::tx::publish_tx; diff --git a/substrate/client/tests/dex.rs b/substrate/client/tests/dex.rs index 06bf42f9..93422f5e 100644 --- a/substrate/client/tests/dex.rs +++ b/substrate/client/tests/dex.rs @@ -6,7 +6,7 @@ use serai_abi::in_instructions::primitives::DexCall; use serai_client::{ primitives::{ - BlockHash, NetworkId, Coin, Amount, Balance, SeraiAddress, ExternalAddress, + BlockHash, ExternalCoin, Coin, Amount, ExternalBalance, Balance, SeraiAddress, ExternalAddress, insecure_pair_from_name, }, in_instructions::primitives::{ diff --git a/substrate/client/tests/dht.rs b/substrate/client/tests/dht.rs index 0d27c91e..8b8a078b 100644 --- a/substrate/client/tests/dht.rs +++ b/substrate/client/tests/dht.rs @@ -44,7 +44,7 @@ async fn dht() { assert!(!Serai::new(serai_rpc.clone()) .await .unwrap() - .p2p_validators(ExternalNetworkId::Bitcoin.into()) + .p2p_validators(ExternalNetworkId::Bitcoin) .await .unwrap() .is_empty()); diff --git a/substrate/client/tests/emissions.rs b/substrate/client/tests/emissions.rs index 6c131567..7ee843cb 100644 --- a/substrate/client/tests/emissions.rs +++ b/substrate/client/tests/emissions.rs @@ -5,8 +5,8 @@ use serai_client::TemporalSerai; use serai_abi::{ primitives::{ - NETWORKS, COINS, TARGET_BLOCK_TIME, FAST_EPOCH_DURATION, FAST_EPOCH_INITIAL_PERIOD, BlockHash, - Coin, + EXTERNAL_NETWORKS, NETWORKS, TARGET_BLOCK_TIME, FAST_EPOCH_DURATION, FAST_EPOCH_INITIAL_PERIOD, + BlockHash, ExternalNetworkId, NetworkId, ExternalCoin, Amount, ExternalBalance, }, validator_sets::primitives::Session, emissions::primitives::{INITIAL_REWARD_PER_BLOCK, SECURE_BY}, @@ -38,17 +38,16 @@ async fn send_batches(serai: &Serai, ids: &mut HashMap) let mut block = BlockHash([0; 32]); OsRng.fill_bytes(&mut block.0); - provide_batch( - serai, - Batch { - network, - id: ids[&network], - external_network_block_hash: block, - instructions: vec![], - }, - ) - .await; - } + provide_batch( + serai, + Batch { + network, + id: ids[&network], + external_network_block_hash: block, + instructions: vec![], + }, + ) + .await; } } diff --git a/substrate/client/tests/validator_sets.rs b/substrate/client/tests/validator_sets.rs index 14a5552f..32f5d481 100644 --- a/substrate/client/tests/validator_sets.rs +++ b/substrate/client/tests/validator_sets.rs @@ -7,11 +7,11 @@ use sp_core::{ use serai_client::{ primitives::{ - FAST_EPOCH_DURATION, TARGET_BLOCK_TIME, NETWORKS, BlockHash, NetworkId, EmbeddedEllipticCurve, - insecure_pair_from_name, + FAST_EPOCH_DURATION, TARGET_BLOCK_TIME, NETWORKS, BlockHash, ExternalNetworkId, NetworkId, + EmbeddedEllipticCurve, Amount, insecure_pair_from_name, }, validator_sets::{ - primitives::{Session, ValidatorSet, ExternalValidatorSet, KeyPair}, + primitives::{Session, ExternalValidatorSet, ValidatorSet, KeyPair}, ValidatorSetsEvent, }, in_instructions::{ @@ -313,8 +313,12 @@ async fn validator_set_rotation() { // provide a batch to complete the handover and retire the previous set let mut block_hash = BlockHash([0; 32]); OsRng.fill_bytes(&mut block_hash.0); - let batch = - Batch { network: network.try_into().unwrap(), id: 0, external_network_block_hash: block_hash, instructions: vec![] }; + let batch = Batch { + network: network.try_into().unwrap(), + id: 0, + external_network_block_hash: block_hash, + instructions: vec![], + }; publish_tx( &serai, &SeraiInInstructions::execute_batch(SignedBatch { diff --git a/substrate/coins/pallet/src/tests.rs b/substrate/coins/pallet/src/tests.rs index a6d16afd..52b81d37 100644 --- a/substrate/coins/pallet/src/tests.rs +++ b/substrate/coins/pallet/src/tests.rs @@ -58,7 +58,7 @@ fn burn_with_instruction() { // we shouldn't be able to burn more than what we have let mut instruction = OutInstructionWithBalance { - instruction: OutInstruction { address: ExternalAddress::new(vec![]).unwrap(), data: None }, + instruction: OutInstruction { address: ExternalAddress::new(vec![]).unwrap() }, balance: ExternalBalance { coin: coin.try_into().unwrap(), amount: Amount(balance.amount.0 + 1), diff --git a/substrate/in-instructions/pallet/src/lib.rs b/substrate/in-instructions/pallet/src/lib.rs index 07fb2294..23d8a875 100644 --- a/substrate/in-instructions/pallet/src/lib.rs +++ b/substrate/in-instructions/pallet/src/lib.rs @@ -67,7 +67,7 @@ pub mod pallet { in_instruction_results: bitvec::vec::BitVec, }, Halt { - network: NetworkId, + network: ExternalNetworkId, }, } @@ -103,7 +103,7 @@ pub mod pallet { fn execute(instruction: &InInstructionWithBalance) -> Result<(), DispatchError> { match &instruction.instruction { InInstruction::Transfer(address) => { - Coins::::mint(address.into(), instruction.balance.into())?; + Coins::::mint((*address).into(), instruction.balance.into())?; } InInstruction::Dex(call) => { // This will only be initiated by external chain transactions. That is why we only need @@ -222,11 +222,11 @@ pub mod pallet { } InInstruction::GenesisLiquidity(address) => { Coins::::mint(GENESIS_LIQUIDITY_ACCOUNT.into(), instruction.balance.into())?; - GenesisLiq::::add_coin_liquidity(address.into(), instruction.balance)?; + GenesisLiq::::add_coin_liquidity((*address).into(), instruction.balance)?; } InInstruction::SwapToStakedSRI(address, network) => { Coins::::mint(POL_ACCOUNT.into(), instruction.balance.into())?; - Emissions::::swap_to_staked_sri(address.into(), network, instruction.balance)?; + Emissions::::swap_to_staked_sri((*address).into(), *network, instruction.balance)?; } } Ok(()) @@ -319,7 +319,10 @@ pub mod pallet { // key is publishing `Batch`s. This should only happen once the current key has verified all // `Batch`s published by the prior key, meaning they are accepting the hand-over. if prior.is_some() && (!valid_by_prior) { - ValidatorSets::::retire_set(ValidatorSet { network: network.into(), session: prior_session }); + ValidatorSets::::retire_set(ValidatorSet { + network: network.into(), + session: prior_session, + }); } // check that this validator set isn't publishing a batch more than once per block diff --git a/substrate/in-instructions/primitives/src/lib.rs b/substrate/in-instructions/primitives/src/lib.rs index c3aaad1b..5c74bf55 100644 --- a/substrate/in-instructions/primitives/src/lib.rs +++ b/substrate/in-instructions/primitives/src/lib.rs @@ -20,7 +20,7 @@ use sp_std::vec::Vec; use sp_runtime::RuntimeDebug; #[rustfmt::skip] -use serai_primitives::{BlockHash, NetworkId, Balance, SeraiAddress, ExternalAddress, system_address}; +use serai_primitives::{BlockHash, ExternalNetworkId, NetworkId, ExternalBalance, Balance, SeraiAddress, ExternalAddress, system_address}; mod shorthand; pub use shorthand::*; diff --git a/substrate/node/src/chain_spec.rs b/substrate/node/src/chain_spec.rs index 7f153f1a..ebc47fcb 100644 --- a/substrate/node/src/chain_spec.rs +++ b/substrate/node/src/chain_spec.rs @@ -88,7 +88,10 @@ fn devnet_genesis( networks: key_shares.clone(), participants: validators.clone(), }, - emissions: EmissionsConfig { networks: key_shares, participants: validators.clone() }, + emissions: EmissionsConfig { + networks: key_shares, + participants: validators.iter().map(|(validator, _)| *validator).collect(), + }, signals: SignalsConfig::default(), babe: BabeConfig { authorities: validators.iter().map(|validator| (validator.0.into(), 1)).collect(), diff --git a/substrate/primitives/src/networks.rs b/substrate/primitives/src/networks.rs index d8c4047b..ace34127 100644 --- a/substrate/primitives/src/networks.rs +++ b/substrate/primitives/src/networks.rs @@ -26,9 +26,7 @@ pub enum EmbeddedEllipticCurve { } /// The type used to identify external networks. -#[derive( - Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, PartialOrd, Ord, MaxEncodedLen, TypeInfo, -)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TypeInfo)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum ExternalNetworkId { @@ -162,6 +160,17 @@ impl ExternalNetworkId { } impl NetworkId { + /// The embedded elliptic curve actively used for this network. + /// + /// This is guaranteed to return `[]`, `[Embedwards25519]`, or + /// `[Embedwards25519, *network specific curve*]`. + pub fn embedded_elliptic_curves(&self) -> &'static [EmbeddedEllipticCurve] { + match self { + Self::Serai => &[], + Self::External(network) => network.embedded_elliptic_curves(), + } + } + pub fn coins(&self) -> Vec { match self { Self::Serai => vec![Coin::Serai], diff --git a/substrate/validator-sets/pallet/src/lib.rs b/substrate/validator-sets/pallet/src/lib.rs index bc1452e0..755e980a 100644 --- a/substrate/validator-sets/pallet/src/lib.rs +++ b/substrate/validator-sets/pallet/src/lib.rs @@ -1154,8 +1154,8 @@ pub mod pallet { // session on this assumption assert_eq!(Pallet::::latest_decided_session(network.into()), Some(current_session)); - let participants = - Participants::::get(network).expect("session existed without participants"); + let participants = Participants::::get(NetworkId::from(network)) + .expect("session existed without participants"); // Check the bitvec is of the proper length if participants.len() != signature_participants.len() { @@ -1189,7 +1189,7 @@ pub mod pallet { // Verify the signature with the MuSig key of the signers // We theoretically don't need set_keys_message to bind to removed_participants, as the // key we're signing with effectively already does so, yet there's no reason not to - if !musig_key(set, &signers).verify(&set_keys_message(&set, key_pair), signature) { + if !musig_key(set.into(), &signers).verify(&set_keys_message(&set, key_pair), signature) { Err(InvalidTransaction::BadProof)?; } @@ -1207,8 +1207,10 @@ pub mod pallet { }; // There must have been a previous session is PendingSlashReport is populated - let set = - ExternalValidatorSet { network, session: Session(Self::session(network).unwrap().0 - 1) }; + let set = ExternalValidatorSet { + network, + session: Session(Self::session(NetworkId::from(network)).unwrap().0 - 1), + }; if !key.verify(&slashes.report_slashes_message(), signature) { Err(InvalidTransaction::BadProof)?; } diff --git a/substrate/validator-sets/primitives/src/lib.rs b/substrate/validator-sets/primitives/src/lib.rs index 21b628b9..04e3b548 100644 --- a/substrate/validator-sets/primitives/src/lib.rs +++ b/substrate/validator-sets/primitives/src/lib.rs @@ -140,7 +140,7 @@ pub fn musig_key(set: ValidatorSet, set_keys: &[Public]) -> Public { } /// The message for the `set_keys` signature. -pub fn set_keys_message(set: &ValidatorSet, key_pair: &KeyPair) -> Vec { +pub fn set_keys_message(set: &ExternalValidatorSet, key_pair: &KeyPair) -> Vec { (b"ValidatorSets-set_keys", set, key_pair).encode() }