From 54eefbde0c7596d02e4dafd9046ca463ae420c99 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 4 Aug 2024 04:48:12 -0400 Subject: [PATCH] Update the coordinator binary for the new DKG This does not yet update any tests. --- Cargo.lock | 5 + coordinator/Cargo.toml | 1 + coordinator/src/main.rs | 153 ++----- coordinator/src/substrate/mod.rs | 41 +- coordinator/src/tests/tributary/dkg.rs | 6 +- coordinator/src/tests/tributary/mod.rs | 4 +- coordinator/src/tests/tributary/sync.rs | 4 +- coordinator/src/tributary/db.rs | 23 +- coordinator/src/tributary/handle.rs | 419 +++++------------- coordinator/src/tributary/mod.rs | 37 -- coordinator/src/tributary/scanner.rs | 207 ++------- coordinator/src/tributary/signing_protocol.rs | 79 ++-- coordinator/src/tributary/spec.rs | 64 +-- coordinator/src/tributary/transaction.rs | 275 +++--------- substrate/client/src/serai/validator_sets.rs | 15 + substrate/primitives/src/networks.rs | 3 + substrate/validator-sets/pallet/src/lib.rs | 17 + 17 files changed, 401 insertions(+), 952 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e7542b8..6c1e9be8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1069,6 +1069,7 @@ checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", + "serde", "tap", "wyz", ] @@ -8019,6 +8020,7 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" name = "serai-abi" version = "0.1.0" dependencies = [ + "bitvec", "borsh", "frame-support", "parity-scale-codec", @@ -8042,6 +8044,7 @@ version = "0.1.0" dependencies = [ "async-lock", "bitcoin", + "bitvec", "blake2", "ciphersuite", "dockertest", @@ -8099,6 +8102,7 @@ name = "serai-coordinator" version = "0.1.0" dependencies = [ "async-trait", + "bitvec", "blake2", "borsh", "ciphersuite", @@ -8595,6 +8599,7 @@ dependencies = [ name = "serai-validator-sets-pallet" version = "0.1.0" dependencies = [ + "bitvec", "frame-support", "frame-system", "hashbrown 0.14.5", diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index ae4e2be7..85865650 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -20,6 +20,7 @@ workspace = true async-trait = { version = "0.1", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["std"] } +bitvec = { version = "1", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } blake2 = { version = "0.10", default-features = false, features = ["std"] } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 58de348d..87db0135 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -16,7 +16,6 @@ use ciphersuite::{ Ciphersuite, Ristretto, }; use schnorr::SchnorrSignature; -use frost::Participant; use serai_db::{DbTxn, Db}; @@ -114,16 +113,17 @@ async fn add_tributary( // If we're rebooting, we'll re-fire this message // This is safe due to the message-queue deduplicating based off the intent system let set = spec.set(); - let our_i = spec - .i(&[], Ristretto::generator() * key.deref()) - .expect("adding a tributary for a set we aren't in set for"); + processors .send( set.network, processor_messages::key_gen::CoordinatorMessage::GenerateKey { - id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 }, - params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(), - shares: u16::from(our_i.end) - u16::from(our_i.start), + session: set.session, + threshold: spec.t(), + evrf_public_keys: spec.evrf_public_keys(), + // TODO + // params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(), + // shares: u16::from(our_i.end) - u16::from(our_i.start), }, ) .await; @@ -166,12 +166,9 @@ async fn handle_processor_message( // We'll only receive these if we fired GenerateKey, which we'll only do if if we're // in-set, making the Tributary relevant ProcessorMessage::KeyGen(inner_msg) => match inner_msg { - key_gen::ProcessorMessage::Commitments { id, .. } | - key_gen::ProcessorMessage::InvalidCommitments { id, .. } | - key_gen::ProcessorMessage::Shares { id, .. } | - key_gen::ProcessorMessage::InvalidShare { id, .. } | - key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } | - key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session), + key_gen::ProcessorMessage::Participation { session, .. } | + key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } | + key_gen::ProcessorMessage::Blame { session, .. } => Some(*session), }, ProcessorMessage::Sign(inner_msg) => match inner_msg { // We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing @@ -421,125 +418,33 @@ async fn handle_processor_message( let txs = match msg.msg.clone() { ProcessorMessage::KeyGen(inner_msg) => match inner_msg { - key_gen::ProcessorMessage::Commitments { id, commitments } => { - vec![Transaction::DkgCommitments { - attempt: id.attempt, - commitments, - signed: Transaction::empty_signed(), - }] + key_gen::ProcessorMessage::Participation { session, participation } => { + assert_eq!(session, spec.set().session); + vec![Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() }] } - key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => { - // This doesn't have guaranteed timing - // - // While the party *should* be fatally slashed and not included in future attempts, - // they'll actually be fatally slashed (assuming liveness before the Tributary retires) - // and not included in future attempts *which begin after the latency window completes* - let participant = spec - .reverse_lookup_i( - &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) - .expect("participating in DKG attempt yet we didn't save who was removed"), - faulty, - ) - .unwrap(); - vec![Transaction::RemoveParticipantDueToDkg { - participant, - signed: Transaction::empty_signed(), - }] - } - key_gen::ProcessorMessage::Shares { id, mut shares } => { - // Create a MuSig-based machine to inform Substrate of this key generation - let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt); - - let removed = crate::tributary::removed_as_of_dkg_attempt(&txn, genesis, id.attempt) - .expect("participating in a DKG attempt yet we didn't track who was removed yet?"); - let our_i = spec - .i(&removed, pub_key) - .expect("processor message to DKG for an attempt we aren't a validator in"); - - // `tx_shares` needs to be done here as while it can be serialized from the HashMap - // without further context, it can't be deserialized without context - let mut tx_shares = Vec::with_capacity(shares.len()); - for shares in &mut shares { - tx_shares.push(vec![]); - for i in 1 ..= spec.n(&removed) { - let i = Participant::new(i).unwrap(); - if our_i.contains(&i) { - if shares.contains_key(&i) { - panic!("processor sent us our own shares"); - } - continue; - } - tx_shares.last_mut().unwrap().push( - shares.remove(&i).expect("processor didn't send share for another validator"), - ); - } - } - - vec![Transaction::DkgShares { - attempt: id.attempt, - shares: tx_shares, - confirmation_nonces: nonces, - signed: Transaction::empty_signed(), - }] - } - key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => { - vec![Transaction::InvalidDkgShare { - attempt: id.attempt, - accuser, - faulty, - blame, - signed: Transaction::empty_signed(), - }] - } - key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => { - // TODO2: Check the KeyGenId fields - - // Tell the Tributary the key pair, get back the share for the MuSig signature - let share = crate::tributary::generated_key_pair::( + key_gen::ProcessorMessage::GeneratedKeyPair { session, substrate_key, network_key } => { + assert_eq!(session, spec.set().session); + crate::tributary::generated_key_pair::( &mut txn, - key, - spec, + genesis, &KeyPair(Public(substrate_key), network_key.try_into().unwrap()), - id.attempt, ); - // TODO: Move this into generated_key_pair? - match share { - Ok(share) => { - vec![Transaction::DkgConfirmed { - attempt: id.attempt, - confirmation_share: share, - signed: Transaction::empty_signed(), - }] - } - Err(p) => { - let participant = spec - .reverse_lookup_i( - &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) - .expect("participating in DKG attempt yet we didn't save who was removed"), - p, - ) - .unwrap(); - vec![Transaction::RemoveParticipantDueToDkg { - participant, - signed: Transaction::empty_signed(), - }] - } - } - } - key_gen::ProcessorMessage::Blame { id, participant } => { - let participant = spec - .reverse_lookup_i( - &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) - .expect("participating in DKG attempt yet we didn't save who was removed"), - participant, - ) - .unwrap(); - vec![Transaction::RemoveParticipantDueToDkg { - participant, + // Create a MuSig-based machine to inform Substrate of this key generation + let confirmation_nonces = + crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, 0); + + vec![Transaction::DkgConfirmationNonces { + attempt: 0, + confirmation_nonces, signed: Transaction::empty_signed(), }] } + key_gen::ProcessorMessage::Blame { session, participant } => { + assert_eq!(session, spec.set().session); + let participant = spec.reverse_lookup_i(participant).unwrap(); + vec![Transaction::RemoveParticipant { participant, signed: Transaction::empty_signed() }] + } }, ProcessorMessage::Sign(msg) => match msg { sign::ProcessorMessage::InvalidParticipant { .. } => { diff --git a/coordinator/src/substrate/mod.rs b/coordinator/src/substrate/mod.rs index fb1e3aed..d1946b7e 100644 --- a/coordinator/src/substrate/mod.rs +++ b/coordinator/src/substrate/mod.rs @@ -10,7 +10,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use serai_client::{ SeraiError, Block, Serai, TemporalSerai, - primitives::{BlockHash, NetworkId}, + primitives::{BlockHash, EmbeddedEllipticCurve, NetworkId}, validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent}, in_instructions::InInstructionsEvent, coins::CoinsEvent, @@ -60,13 +60,46 @@ async fn handle_new_set( { log::info!("present in set {:?}", set); - let set_data = { + let validators; + let mut evrf_public_keys = vec![]; + { let serai = serai.as_of(block.hash()); let serai = serai.validator_sets(); let set_participants = serai.participants(set.network).await?.expect("NewSet for set which doesn't exist"); - set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::>() + validators = set_participants + .iter() + .map(|(k, w)| { + ( + ::read_G::<&[u8]>(&mut k.0.as_ref()) + .expect("invalid key registered as participant"), + u16::try_from(*w).unwrap(), + ) + }) + .collect::>(); + for (validator, _) in set_participants { + // This is only run for external networks which always do a DKG for Serai + let substrate = serai + .embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519) + .await? + .expect("Serai called NewSet on a validator without an Embedwards25519 key"); + // `embedded_elliptic_curves` is documented to have the second entry be the + // network-specific curve (if it exists and is distinct from Embedwards25519) + let network = + if let Some(embedded_elliptic_curve) = set.network.embedded_elliptic_curves().get(1) { + serai.embedded_elliptic_curve_key(validator, *embedded_elliptic_curve).await?.expect( + "Serai called NewSet on a validator without the embedded key required for the network", + ) + } else { + substrate.clone() + }; + evrf_public_keys.push(( + <[u8; 32]>::try_from(substrate) + .expect("validator-sets pallet accepted a key of an invalid length"), + network, + )); + } }; let time = if let Ok(time) = block.time() { @@ -90,7 +123,7 @@ async fn handle_new_set( const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120; let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY; - let spec = TributarySpec::new(block.hash(), time, set, set_data); + let spec = TributarySpec::new(block.hash(), time, set, validators, evrf_public_keys); log::info!("creating new tributary for {:?}", spec.set()); diff --git a/coordinator/src/tests/tributary/dkg.rs b/coordinator/src/tests/tributary/dkg.rs index 04a528f9..0835dcf9 100644 --- a/coordinator/src/tests/tributary/dkg.rs +++ b/coordinator/src/tests/tributary/dkg.rs @@ -310,7 +310,7 @@ async fn dkg_test() { assert!(msgs.is_empty()); } - // Send DkgConfirmed + // Send DkgConfirmationShare let mut substrate_key = [0; 32]; OsRng.fill_bytes(&mut substrate_key); let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()]; @@ -325,7 +325,7 @@ async fn dkg_test() { crate::tributary::generated_key_pair::(&mut txn, key, &spec, &key_pair, 0).unwrap(); txn.commit(); - let mut tx = Transaction::DkgConfirmed { + let mut tx = Transaction::DkgConfirmationShare { attempt, confirmation_share: share, signed: Transaction::empty_signed(), @@ -359,7 +359,7 @@ async fn dkg_test() { assert!(removed.is_empty()); assert_eq!(self.key_pair, key_pair); assert!(signature.verify( - &*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair), + &*serai_client::validator_sets::primitives::set_keys_message(&set, &key_pair), &serai_client::Public( frost::dkg::musig::musig_key::( &serai_client::validator_sets::primitives::musig_context(set), diff --git a/coordinator/src/tests/tributary/mod.rs b/coordinator/src/tests/tributary/mod.rs index c3f98311..0091eb58 100644 --- a/coordinator/src/tests/tributary/mod.rs +++ b/coordinator/src/tests/tributary/mod.rs @@ -143,7 +143,7 @@ fn serialize_sign_data() { #[test] fn serialize_transaction() { - test_read_write(&Transaction::RemoveParticipantDueToDkg { + test_read_write(&Transaction::RemoveParticipant { participant: ::G::random(&mut OsRng), signed: random_signed_with_nonce(&mut OsRng, 0), }); @@ -213,7 +213,7 @@ fn serialize_transaction() { }); } - test_read_write(&Transaction::DkgConfirmed { + test_read_write(&Transaction::DkgConfirmationShare { attempt: random_u32(&mut OsRng), confirmation_share: { let mut share = [0; 32]; diff --git a/coordinator/src/tests/tributary/sync.rs b/coordinator/src/tests/tributary/sync.rs index 18f60864..a0b68839 100644 --- a/coordinator/src/tests/tributary/sync.rs +++ b/coordinator/src/tests/tributary/sync.rs @@ -29,7 +29,7 @@ async fn sync_test() { let mut keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); // Ensure this can have a node fail - assert!(spec.n(&[]) > spec.t()); + assert!(spec.n() > spec.t()); let mut tributaries = new_tributaries(&keys, &spec) .await @@ -142,7 +142,7 @@ async fn sync_test() { // Because only `t` validators are used in a commit, take n - t nodes offline // leaving only `t` nodes. Which should force it to participate in the consensus // of next blocks. - let spares = usize::from(spec.n(&[]) - spec.t()); + let spares = usize::from(spec.n() - spec.t()); for thread in p2p_threads.iter().take(spares) { thread.abort(); } diff --git a/coordinator/src/tributary/db.rs b/coordinator/src/tributary/db.rs index fda1c47b..095f18af 100644 --- a/coordinator/src/tributary/db.rs +++ b/coordinator/src/tributary/db.rs @@ -18,7 +18,6 @@ use crate::tributary::{Label, Transaction}; #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)] pub enum Topic { - Dkg, DkgConfirmation, SubstrateSign(SubstrateSignableId), Sign([u8; 32]), @@ -46,15 +45,13 @@ pub enum Accumulation { create_db!( Tributary { SeraiBlockNumber: (hash: [u8; 32]) -> u64, - SeraiDkgCompleted: (spec: ValidatorSet) -> [u8; 32], + SeraiDkgCompleted: (set: ValidatorSet) -> [u8; 32], TributaryBlockNumber: (block: [u8; 32]) -> u32, LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32], // TODO: Revisit the point of this FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>, - RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>, - OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>, // TODO: Combine these two FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (), SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32, @@ -67,11 +64,9 @@ create_db!( DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16, DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec, - DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec, + DkgParticipation: (genesis: [u8; 32], from: u16) -> Vec, ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap>, - DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair, - KeyToDkgAttempt: (key: [u8; 32]) -> u32, - DkgLocallyCompleted: (genesis: [u8; 32]) -> (), + DkgKeyPair: (genesis: [u8; 32]) -> KeyPair, PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>, @@ -123,12 +118,12 @@ impl AttemptDb { pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option { let attempt = Self::get(getter, genesis, &topic); - // Don't require explicit recognition of the Dkg topic as it starts when the chain does + // Don't require explicit recognition of the DkgConfirmation topic as it starts when the chain + // does // Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it // should always happen (eventually) if attempt.is_none() && - ((topic == Topic::Dkg) || - (topic == Topic::DkgConfirmation) || + ((topic == Topic::DkgConfirmation) || (topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport))) { return Some(0); @@ -155,16 +150,12 @@ impl ReattemptDb { // 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5 // Assumes no event will take longer than 15 minutes, yet grows the time in case there are // network bandwidth issues - let mut reattempt_delay = BASE_REATTEMPT_DELAY * + let reattempt_delay = BASE_REATTEMPT_DELAY * ((AttemptDb::attempt(txn, genesis, topic) .expect("scheduling re-attempt for unknown topic") / 3) + 1) .min(3); - // Allow more time for DKGs since they have an extra round and much more data - if matches!(topic, Topic::Dkg) { - reattempt_delay *= 4; - } let upon_block = current_block_number + reattempt_delay; let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]); diff --git a/coordinator/src/tributary/handle.rs b/coordinator/src/tributary/handle.rs index fbce7dd9..2a9c4063 100644 --- a/coordinator/src/tributary/handle.rs +++ b/coordinator/src/tributary/handle.rs @@ -13,7 +13,7 @@ use serai_client::{Signature, validator_sets::primitives::KeyPair}; use tributary::{Signed, TransactionKind, TransactionTrait}; use processor_messages::{ - key_gen::{self, KeyGenId}, + key_gen::self, coordinator::{self, SubstrateSignableId, SubstrateSignId}, sign::{self, SignId}, }; @@ -38,33 +38,20 @@ pub fn dkg_confirmation_nonces( txn: &mut impl DbTxn, attempt: u32, ) -> [u8; 64] { - DkgConfirmer::new(key, spec, txn, attempt) - .expect("getting DKG confirmation nonces for unknown attempt") - .preprocess() + DkgConfirmer::new(key, spec, txn, attempt).preprocess() } pub fn generated_key_pair( txn: &mut D::Transaction<'_>, - key: &Zeroizing<::F>, - spec: &TributarySpec, + genesis: [u8; 32], key_pair: &KeyPair, - attempt: u32, -) -> Result<[u8; 32], Participant> { - DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair); - KeyToDkgAttempt::set(txn, key_pair.0 .0, &attempt); - let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap(); - DkgConfirmer::new(key, spec, txn, attempt) - .expect("claiming to have generated a key pair for an unrecognized attempt") - .share(preprocesses, key_pair) +) { + DkgKeyPair::set(txn, genesis, key_pair); } -fn unflatten( - spec: &TributarySpec, - removed: &[::G], - data: &mut HashMap>, -) { +fn unflatten(spec: &TributarySpec, data: &mut HashMap>) { for (validator, _) in spec.validators() { - let Some(range) = spec.i(removed, validator) else { continue }; + let Some(range) = spec.i(validator) else { continue }; let Some(all_segments) = data.remove(&range.start) else { continue; }; @@ -88,7 +75,6 @@ impl< { fn accumulate( &mut self, - removed: &[::G], data_spec: &DataSpecification, signer: ::G, data: &Vec, @@ -99,10 +85,7 @@ impl< panic!("accumulating data for a participant multiple times"); } let signer_shares = { - let Some(signer_i) = self.spec.i(removed, signer) else { - log::warn!("accumulating data from {} who was removed", hex::encode(signer.to_bytes())); - return Accumulation::NotReady; - }; + let signer_i = self.spec.i(signer).expect("transaction signer wasn't a member of the set"); u16::from(signer_i.end) - u16::from(signer_i.start) }; @@ -115,11 +98,7 @@ impl< // If 2/3rds of the network participated in this preprocess, queue it for an automatic // re-attempt - // DkgConfirmation doesn't have a re-attempt as it's just an extension for Dkg - if (data_spec.label == Label::Preprocess) && - received_range.contains(&self.spec.t()) && - (data_spec.topic != Topic::DkgConfirmation) - { + if (data_spec.label == Label::Preprocess) && received_range.contains(&self.spec.t()) { // Double check the attempt on this entry, as we don't want to schedule a re-attempt if this // is an old entry // This is an assert, not part of the if check, as old data shouldn't be here in the first @@ -129,10 +108,7 @@ impl< } // If we have all the needed commitments/preprocesses/shares, tell the processor - let needs_everyone = - (data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation); - let needed = if needs_everyone { self.spec.n(removed) } else { self.spec.t() }; - if received_range.contains(&needed) { + if received_range.contains(&self.spec.t()) { log::debug!( "accumulation for entry {:?} attempt #{} is ready", &data_spec.topic, @@ -141,7 +117,7 @@ impl< let mut data = HashMap::new(); for validator in self.spec.validators().iter().map(|validator| validator.0) { - let Some(i) = self.spec.i(removed, validator) else { continue }; + let Some(i) = self.spec.i(validator) else { continue }; data.insert( i.start, if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) { @@ -152,10 +128,10 @@ impl< ); } - assert_eq!(data.len(), usize::from(needed)); + assert_eq!(data.len(), usize::from(self.spec.t())); // Remove our own piece of data, if we were involved - if let Some(i) = self.spec.i(removed, Ristretto::generator() * self.our_key.deref()) { + if let Some(i) = self.spec.i(Ristretto::generator() * self.our_key.deref()) { if data.remove(&i.start).is_some() { return Accumulation::Ready(DataSet::Participating(data)); } @@ -167,7 +143,6 @@ impl< fn handle_data( &mut self, - removed: &[::G], data_spec: &DataSpecification, bytes: &Vec, signed: &Signed, @@ -213,21 +188,15 @@ impl< // TODO: If this is shares, we need to check they are part of the selected signing set // Accumulate this data - self.accumulate(removed, data_spec, signed.signer, bytes) + self.accumulate(data_spec, signed.signer, bytes) } fn check_sign_data_len( &mut self, - removed: &[::G], signer: ::G, len: usize, ) -> Result<(), ()> { - let Some(signer_i) = self.spec.i(removed, signer) else { - // TODO: Ensure processor doesn't so participate/check how it handles removals for being - // offline - self.fatal_slash(signer.to_bytes(), "signer participated despite being removed"); - Err(())? - }; + let signer_i = self.spec.i(signer).expect("signer wasn't a member of the set"); if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) { self.fatal_slash( signer.to_bytes(), @@ -254,12 +223,9 @@ impl< } match tx { - Transaction::RemoveParticipantDueToDkg { participant, signed } => { - if self.spec.i(&[], participant).is_none() { - self.fatal_slash( - participant.to_bytes(), - "RemoveParticipantDueToDkg vote for non-validator", - ); + Transaction::RemoveParticipant { participant, signed } => { + if self.spec.i(participant).is_none() { + self.fatal_slash(participant.to_bytes(), "RemoveParticipant vote for non-validator"); return; } @@ -274,268 +240,106 @@ impl< let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0); let signer_votes = - self.spec.i(&[], signed.signer).expect("signer wasn't a validator for this network?"); + self.spec.i(signed.signer).expect("signer wasn't a validator for this network?"); let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start); VotesToRemove::set(self.txn, genesis, participant, &new_votes); if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) { - self.fatal_slash(participant, "RemoveParticipantDueToDkg vote") + self.fatal_slash(participant, "RemoveParticipant vote") } } - Transaction::DkgCommitments { attempt, commitments, signed } => { - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt"); - return; - }; - let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()) else { - return; - }; - let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt }; - match self.handle_data(&removed, &data_spec, &commitments.encode(), &signed) { - Accumulation::Ready(DataSet::Participating(mut commitments)) => { - log::info!("got all DkgCommitments for {}", hex::encode(genesis)); - unflatten(self.spec, &removed, &mut commitments); - self - .processors - .send( - self.spec.set().network, - key_gen::CoordinatorMessage::Commitments { - id: KeyGenId { session: self.spec.set().session, attempt }, - commitments, - }, - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => { - assert!( - removed.contains(&(Ristretto::generator() * self.our_key.deref())), - "NotParticipating in a DkgCommitments we weren't removed for" - ); - } - Accumulation::NotReady => {} - } - } - - Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => { - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt"); - return; - }; - let not_participating = removed.contains(&(Ristretto::generator() * self.our_key.deref())); - - let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else { - return; - }; - - let Some(sender_i) = self.spec.i(&removed, signed.signer) else { - self.fatal_slash( - signed.signer.to_bytes(), - "DkgShares for a DKG they aren't participating in", - ); - return; - }; - let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start); - for shares in &shares { - if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) { - self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares"); - return; - } - } - - // Save each share as needed for blame - for (from_offset, shares) in shares.iter().enumerate() { - let from = - Participant::new(u16::from(sender_i.start) + u16::try_from(from_offset).unwrap()) - .unwrap(); - - for (to_offset, share) in shares.iter().enumerate() { - // 0-indexed (the enumeration) to 1-indexed (Participant) - let mut to = u16::try_from(to_offset).unwrap() + 1; - // Adjust for the omission of the sender's own shares - if to >= u16::from(sender_i.start) { - to += u16::from(sender_i.end) - u16::from(sender_i.start); - } - let to = Participant::new(to).unwrap(); - - DkgShare::set(self.txn, genesis, from.into(), to.into(), share); - } - } - - // Filter down to only our share's bytes for handle - let our_shares = if let Some(our_i) = - self.spec.i(&removed, Ristretto::generator() * self.our_key.deref()) - { - if sender_i == our_i { - vec![] - } else { - // 1-indexed to 0-indexed - let mut our_i_pos = u16::from(our_i.start) - 1; - // Handle the omission of the sender's own data - if u16::from(our_i.start) > u16::from(sender_i.start) { - our_i_pos -= sender_is_len; - } - let our_i_pos = usize::from(our_i_pos); - shares - .iter_mut() - .map(|shares| { - shares - .drain( - our_i_pos .. - (our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))), - ) - .collect::>() - }) - .collect() - } - } else { - assert!( - not_participating, - "we didn't have an i while handling DkgShares we weren't removed for" - ); - // Since we're not participating, simply save vec![] for our shares - vec![] - }; - // Drop shares as it's presumably been mutated into invalidity - drop(shares); - - let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt }; - let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode(); - match self.handle_data(&removed, &data_spec, &encoded_data, &signed) { - Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => { - log::info!("got all DkgShares for {}", hex::encode(genesis)); - - let mut confirmation_nonces = HashMap::new(); - let mut shares = HashMap::new(); - for (participant, confirmation_nonces_and_shares) in confirmation_nonces_and_shares { - let (these_confirmation_nonces, these_shares) = - <(Vec, Vec)>::decode(&mut confirmation_nonces_and_shares.as_slice()) - .unwrap(); - confirmation_nonces.insert(participant, these_confirmation_nonces); - shares.insert(participant, these_shares); - } - ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces); - - // shares is a HashMap>>>, with the values representing: - // - Each of the sender's shares - // - Each of the our shares - // - Each share - // We need a Vec>>, with the outer being each of ours - let mut expanded_shares = vec![]; - for (sender_start_i, shares) in shares { - let shares: Vec>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap(); - for (sender_i_offset, our_shares) in shares.into_iter().enumerate() { - for (our_share_i, our_share) in our_shares.into_iter().enumerate() { - if expanded_shares.len() <= our_share_i { - expanded_shares.push(HashMap::new()); - } - expanded_shares[our_share_i].insert( - Participant::new( - u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(), - ) - .unwrap(), - our_share, - ); - } - } - } - - self - .processors - .send( - self.spec.set().network, - key_gen::CoordinatorMessage::Shares { - id: KeyGenId { session: self.spec.set().session, attempt }, - shares: expanded_shares, - }, - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => { - assert!(not_participating, "NotParticipating in a DkgShares we weren't removed for"); - } - Accumulation::NotReady => {} - } - } - - Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self - .fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt"); - return; - }; - let Some(range) = self.spec.i(&removed, signed.signer) else { - self.fatal_slash( - signed.signer.to_bytes(), - "InvalidDkgShare for a DKG they aren't participating in", - ); - return; - }; - if !range.contains(&accuser) { - self.fatal_slash( - signed.signer.to_bytes(), - "accused with a Participant index which wasn't theirs", - ); - return; - } - if range.contains(&faulty) { - self.fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare"); - return; - } - - let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else { - self.fatal_slash( - signed.signer.to_bytes(), - "InvalidDkgShare had a non-existent faulty participant", - ); - return; - }; + Transaction::DkgParticipation { participation, signed } => { + // Send the participation to the processor self .processors .send( self.spec.set().network, - key_gen::CoordinatorMessage::VerifyBlame { - id: KeyGenId { session: self.spec.set().session, attempt }, - accuser, - accused: faulty, - share, - blame, + key_gen::CoordinatorMessage::Participation { + session: self.spec.set().session, + participant: self + .spec + .i(signed.signer) + .expect("signer wasn't a validator for this network?") + .start, + participation, }, ) .await; } - Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt"); - return; - }; + Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed } => { + let data_spec = + DataSpecification { topic: Topic::DkgConfirmation, label: Label::Preprocess, attempt }; + match self.handle_data(&data_spec, &confirmation_nonces.to_vec(), &signed) { + Accumulation::Ready(DataSet::Participating(confirmation_nonces)) => { + log::info!( + "got all DkgConfirmationNonces for {}, attempt {attempt}", + hex::encode(genesis) + ); + ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces); + + // Send the expected DkgConfirmationShare + // TODO: Slight race condition here due to set, publish tx, then commit txn + let key_pair = DkgKeyPair::get(self.txn, genesis) + .expect("participating in confirming key we don't have"); + let mut tx = match DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt) + .share(confirmation_nonces, &key_pair) + { + Ok(confirmation_share) => Transaction::DkgConfirmationShare { + attempt, + confirmation_share, + signed: Transaction::empty_signed(), + }, + Err(participant) => Transaction::RemoveParticipant { + participant: self.spec.reverse_lookup_i(participant).unwrap(), + signed: Transaction::empty_signed(), + }, + }; + tx.sign(&mut OsRng, genesis, self.our_key); + self.publish_tributary_tx.publish_tributary_tx(tx).await; + } + Accumulation::Ready(DataSet::NotParticipating) | Accumulation::NotReady => {} + } + } + + Transaction::DkgConfirmationShare { attempt, confirmation_share, signed } => { let data_spec = DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt }; - match self.handle_data(&removed, &data_spec, &confirmation_share.to_vec(), &signed) { + match self.handle_data(&data_spec, &confirmation_share.to_vec(), &signed) { Accumulation::Ready(DataSet::Participating(shares)) => { - log::info!("got all DkgConfirmed for {}", hex::encode(genesis)); - - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - panic!( - "DkgConfirmed for everyone yet didn't have the removed parties for this attempt", - ); - }; + log::info!( + "got all DkgConfirmationShare for {}, attempt {attempt}", + hex::encode(genesis) + ); let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap(); + // TODO: This can technically happen under very very very specific timing as the txn - // put happens before DkgConfirmed, yet the txn commit isn't guaranteed to - let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect( - "in DkgConfirmed handling, which happens after everyone \ - (including us) fires DkgConfirmed, yet no confirming key pair", + // put happens before DkgConfirmationShare, yet the txn isn't guaranteed to be + // committed + let key_pair = DkgKeyPair::get(self.txn, genesis).expect( + "in DkgConfirmationShare handling, which happens after everyone \ + (including us) fires DkgConfirmationShare, yet no confirming key pair", ); - let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt) - .expect("confirming DKG for unrecognized attempt"); + + // Determine the bitstring representing who participated before we move `shares` + // This reserves too much capacity if the participating validators have multiple key + // shares, yet that's fine + let validators = self.spec.validators(); + let mut signature_participants = bitvec::vec::BitVec::with_capacity(validators.len()); + for (participant, _) in self.spec.validators() { + signature_participants + .push(shares.contains_key(&self.spec.i(participant).unwrap().start)); + } + + // Produce the final signature + let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt); let sig = match confirmer.complete(preprocesses, &key_pair, shares) { Ok(sig) => sig, Err(p) => { - let mut tx = Transaction::RemoveParticipantDueToDkg { - participant: self.spec.reverse_lookup_i(&removed, p).unwrap(), + let mut tx = Transaction::RemoveParticipant { + participant: self.spec.reverse_lookup_i(p).unwrap(), signed: Transaction::empty_signed(), }; tx.sign(&mut OsRng, genesis, self.our_key); @@ -544,15 +348,13 @@ impl< } }; - DkgLocallyCompleted::set(self.txn, genesis, &()); - self .publish_serai_tx .publish_set_keys( self.db, self.spec.set(), - removed.into_iter().map(|key| key.to_bytes().into()).collect(), key_pair, + signature_participants, Signature(sig), ) .await; @@ -618,19 +420,8 @@ impl< } Transaction::SubstrateSign(data) => { - // Provided transactions ensure synchrony on any signing protocol, and we won't start - // signing with threshold keys before we've confirmed them on-chain - let Some(removed) = - crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) - else { - self.fatal_slash( - data.signed.signer.to_bytes(), - "signing despite not having set keys on substrate", - ); - return; - }; let signer = data.signed.signer; - let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()) else { + let Ok(()) = self.check_sign_data_len(signer, data.data.len()) else { return; }; let expected_len = match data.label { @@ -653,11 +444,11 @@ impl< attempt: data.attempt, }; let Accumulation::Ready(DataSet::Participating(mut results)) = - self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) + self.handle_data(&data_spec, &data.data.encode(), &data.signed) else { return; }; - unflatten(self.spec, &removed, &mut results); + unflatten(self.spec, &mut results); let id = SubstrateSignId { session: self.spec.set().session, @@ -678,16 +469,7 @@ impl< } Transaction::Sign(data) => { - let Some(removed) = - crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) - else { - self.fatal_slash( - data.signed.signer.to_bytes(), - "signing despite not having set keys on substrate", - ); - return; - }; - let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()) else { + let Ok(()) = self.check_sign_data_len(data.signed.signer, data.data.len()) else { return; }; @@ -697,9 +479,9 @@ impl< attempt: data.attempt, }; if let Accumulation::Ready(DataSet::Participating(mut results)) = - self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) + self.handle_data(&data_spec, &data.data.encode(), &data.signed) { - unflatten(self.spec, &removed, &mut results); + unflatten(self.spec, &mut results); let id = SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt }; self @@ -740,8 +522,7 @@ impl< } Transaction::SlashReport(points, signed) => { - // Uses &[] as we only need the length which is independent to who else was removed - let signer_range = self.spec.i(&[], signed.signer).unwrap(); + let signer_range = self.spec.i(signed.signer).unwrap(); let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start); if points.len() != (self.spec.validators().len() - 1) { self.fatal_slash( diff --git a/coordinator/src/tributary/mod.rs b/coordinator/src/tributary/mod.rs index cc9bdb1e..6e2f2661 100644 --- a/coordinator/src/tributary/mod.rs +++ b/coordinator/src/tributary/mod.rs @@ -1,7 +1,3 @@ -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; - -use serai_client::validator_sets::primitives::ValidatorSet; - use tributary::{ ReadWrite, transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait}, @@ -24,39 +20,6 @@ pub use handle::*; pub mod scanner; -pub fn removed_as_of_dkg_attempt( - getter: &impl Get, - genesis: [u8; 32], - attempt: u32, -) -> Option::G>> { - if attempt == 0 { - Some(vec![]) - } else { - RemovedAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| { - keys.iter().map(|key| ::G::from_bytes(key).unwrap()).collect() - }) - } -} - -pub fn removed_as_of_set_keys( - getter: &impl Get, - set: ValidatorSet, - genesis: [u8; 32], -) -> Option::G>> { - // SeraiDkgCompleted has the key placed on-chain. - // This key can be uniquely mapped to an attempt so long as one participant was honest, which we - // assume as a presumably honest participant. - // Resolve from generated key to attempt to fatally slashed as of attempt. - - // This expect will trigger if this is prematurely called and Substrate has tracked the keys yet - // we haven't locally synced and handled the Tributary - // All callers of this, at the time of writing, ensure the Tributary has sufficiently synced - // making the panic with context more desirable than the None - let attempt = KeyToDkgAttempt::get(getter, SeraiDkgCompleted::get(getter, set)?) - .expect("key completed on-chain didn't have an attempt related"); - removed_as_of_dkg_attempt(getter, genesis, attempt) -} - pub async fn publish_signed_transaction( txn: &mut D::Transaction<'_>, tributary: &Tributary, diff --git a/coordinator/src/tributary/scanner.rs b/coordinator/src/tributary/scanner.rs index 9b56e0a0..c0b906ed 100644 --- a/coordinator/src/tributary/scanner.rs +++ b/coordinator/src/tributary/scanner.rs @@ -1,15 +1,17 @@ -use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration}; -use std::{sync::Arc, collections::HashSet}; +use core::{marker::PhantomData, future::Future, time::Duration}; +use std::sync::Arc; use zeroize::Zeroizing; +use rand_core::OsRng; + use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use tokio::sync::broadcast; use scale::{Encode, Decode}; use serai_client::{ - primitives::{SeraiAddress, Signature}, + primitives::Signature, validator_sets::primitives::{KeyPair, ValidatorSet}, Serai, }; @@ -67,8 +69,8 @@ pub trait PublishSeraiTransaction { &self, db: &(impl Sync + Get), set: ValidatorSet, - removed: Vec, key_pair: KeyPair, + signature_participants: bitvec::vec::BitVec, signature: Signature, ); } @@ -129,17 +131,12 @@ mod impl_pst_for_serai { &self, db: &(impl Sync + Get), set: ValidatorSet, - removed: Vec, key_pair: KeyPair, + signature_participants: bitvec::vec::BitVec, signature: Signature, ) { - // TODO: BoundedVec as an arg to avoid this expect - let tx = SeraiValidatorSets::set_keys( - set.network, - removed.try_into().expect("removing more than allowed"), - key_pair, - signature, - ); + let tx = + SeraiValidatorSets::set_keys(set.network, key_pair, signature_participants, signature); async fn check(serai: SeraiValidatorSets<'_>, set: ValidatorSet, (): ()) -> bool { if matches!(serai.keys(set).await, Ok(Some(_))) { log::info!("another coordinator set key pair for {:?}", set); @@ -249,18 +246,15 @@ impl< let genesis = self.spec.genesis(); - let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis); - // Calculate the shares still present, spinning if not enough are - // still_present_shares is used by a below branch, yet it's a natural byproduct of checking if - // we should spin, hence storing it in a variable here - let still_present_shares = { + { // Start with the original n value - let mut present_shares = self.spec.n(&[]); + let mut present_shares = self.spec.n(); // Remove everyone fatally slashed + let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis); for removed in ¤t_fatal_slashes { let original_i_for_removed = - self.spec.i(&[], *removed).expect("removed party was never present"); + self.spec.i(*removed).expect("removed party was never present"); let removed_shares = u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start); present_shares -= removed_shares; @@ -276,79 +270,17 @@ impl< tokio::time::sleep(core::time::Duration::from_secs(60)).await; } } - - present_shares - }; + } for topic in ReattemptDb::take(self.txn, genesis, self.block_number) { let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic); - log::info!("re-attempting {topic:?} with attempt {attempt}"); + log::info!("potentially re-attempting {topic:?} with attempt {attempt}"); // Slash people who failed to participate as expected in the prior attempt { let prior_attempt = attempt - 1; - let (removed, expected_participants) = match topic { - Topic::Dkg => { - // Every validator who wasn't removed is expected to have participated - let removed = - crate::tributary::removed_as_of_dkg_attempt(self.txn, genesis, prior_attempt) - .expect("prior attempt didn't have its removed saved to disk"); - let removed_set = removed.iter().copied().collect::>(); - ( - removed, - self - .spec - .validators() - .into_iter() - .filter_map(|(validator, _)| { - Some(validator).filter(|validator| !removed_set.contains(validator)) - }) - .collect(), - ) - } - Topic::DkgConfirmation => { - panic!("TODO: re-attempting DkgConfirmation when we should be re-attempting the Dkg") - } - Topic::SubstrateSign(_) | Topic::Sign(_) => { - let removed = - crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) - .expect("SubstrateSign/Sign yet have yet to set keys"); - // TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![] - let expected_participants = vec![]; - (removed, expected_participants) - } - }; - - let (expected_topic, expected_label) = match topic { - Topic::Dkg => { - let n = self.spec.n(&removed); - // If we got all the DKG shares, we should be on DKG confirmation - let share_spec = - DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt: prior_attempt }; - if DataReceived::get(self.txn, genesis, &share_spec).unwrap_or(0) == n { - // Label::Share since there is no Label::Preprocess for DkgConfirmation since the - // preprocess is part of Topic::Dkg Label::Share - (Topic::DkgConfirmation, Label::Share) - } else { - let preprocess_spec = DataSpecification { - topic: Topic::Dkg, - label: Label::Preprocess, - attempt: prior_attempt, - }; - // If we got all the DKG preprocesses, DKG shares - if DataReceived::get(self.txn, genesis, &preprocess_spec).unwrap_or(0) == n { - // Label::Share since there is no Label::Preprocess for DkgConfirmation since the - // preprocess is part of Topic::Dkg Label::Share - (Topic::Dkg, Label::Share) - } else { - (Topic::Dkg, Label::Preprocess) - } - } - } - Topic::DkgConfirmation => unreachable!(), - // If we got enough participants to move forward, then we expect shares from them all - Topic::SubstrateSign(_) | Topic::Sign(_) => (topic, Label::Share), - }; + // TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![] + let expected_participants: Vec<::G> = vec![]; let mut did_not_participate = vec![]; for expected_participant in expected_participants { @@ -356,8 +288,9 @@ impl< self.txn, genesis, &DataSpecification { - topic: expected_topic, - label: expected_label, + topic, + // Since we got the preprocesses, we were supposed to get the shares + label: Label::Share, attempt: prior_attempt, }, &expected_participant.to_bytes(), @@ -373,15 +306,8 @@ impl< // Accordingly, clear did_not_participate // TODO - // If during the DKG, explicitly mark these people as having been offline - // TODO: If they were offline sufficiently long ago, don't strike them off - if topic == Topic::Dkg { - let mut existing = OfflineDuringDkg::get(self.txn, genesis).unwrap_or(vec![]); - for did_not_participate in did_not_participate { - existing.push(did_not_participate.to_bytes()); - } - OfflineDuringDkg::set(self.txn, genesis, &existing); - } + // TODO: Increment the slash points of people who didn't preprocess in some expected window + // of time // Slash everyone who didn't participate as expected // This may be overzealous as if a minority detects a completion, they'll abort yet the @@ -411,75 +337,22 @@ impl< then preprocesses. This only sends preprocesses). */ match topic { - Topic::Dkg => { - let mut removed = current_fatal_slashes.clone(); + Topic::DkgConfirmation => { + if SeraiDkgCompleted::get(self.txn, self.spec.set()).is_none() { + log::info!("re-attempting DKG confirmation with attempt {attempt}"); - let t = self.spec.t(); - { - let mut present_shares = still_present_shares; - - // Load the parties marked as offline across the various attempts - let mut offline = OfflineDuringDkg::get(self.txn, genesis) - .unwrap_or(vec![]) - .iter() - .map(|key| ::G::from_bytes(key).unwrap()) - .collect::>(); - // Pop from the list to prioritize the removal of those recently offline - while let Some(offline) = offline.pop() { - // Make sure they weren't removed already (such as due to being fatally slashed) - // This also may trigger if they were offline across multiple attempts - if removed.contains(&offline) { - continue; - } - - // If we can remove them and still meet the threshold, do so - let original_i_for_offline = - self.spec.i(&[], offline).expect("offline was never present?"); - let offline_shares = - u16::from(original_i_for_offline.end) - u16::from(original_i_for_offline.start); - if (present_shares - offline_shares) >= t { - present_shares -= offline_shares; - removed.push(offline); - } - - // If we've removed as many people as we can, break - if present_shares == t { - break; - } - } - } - - RemovedAsOfDkgAttempt::set( - self.txn, - genesis, - attempt, - &removed.iter().map(::G::to_bytes).collect(), - ); - - if DkgLocallyCompleted::get(self.txn, genesis).is_none() { - let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref()) - else { - continue; + // Since it wasn't completed, publish our nonces for the next attempt + let confirmation_nonces = + crate::tributary::dkg_confirmation_nonces(self.our_key, self.spec, self.txn, attempt); + let mut tx = Transaction::DkgConfirmationNonces { + attempt, + confirmation_nonces, + signed: Transaction::empty_signed(), }; - - // Since it wasn't completed, instruct the processor to start the next attempt - let id = - processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt }; - - let params = - frost::ThresholdParams::new(t, self.spec.n(&removed), our_i.start).unwrap(); - let shares = u16::from(our_i.end) - u16::from(our_i.start); - - self - .processors - .send( - self.spec.set().network, - processor_messages::key_gen::CoordinatorMessage::GenerateKey { id, params, shares }, - ) - .await; + tx.sign(&mut OsRng, genesis, self.our_key); + self.publish_tributary_tx.publish_tributary_tx(tx).await; } } - Topic::DkgConfirmation => unreachable!(), Topic::SubstrateSign(inner_id) => { let id = processor_messages::coordinator::SubstrateSignId { session: self.spec.set().session, @@ -496,6 +369,8 @@ impl< crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network) .map_or(0, |cosign| cosign.block_number); if latest_cosign < block_number { + log::info!("re-attempting cosigning {block_number:?} with attempt {attempt}"); + // Instruct the processor to start the next attempt self .processors @@ -512,6 +387,8 @@ impl< SubstrateSignableId::Batch(batch) => { // If the Batch hasn't appeared on-chain... if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() { + log::info!("re-attempting signing batch {batch:?} with attempt {attempt}"); + // Instruct the processor to start the next attempt // The processor won't continue if it's already signed a Batch // Prior checking if the Batch is on-chain just may reduce the non-participating @@ -529,6 +406,11 @@ impl< // If this Tributary hasn't been retired... // (published SlashReport/took too long to do so) if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() { + log::info!( + "re-attempting signing slash report for {:?} with attempt {attempt}", + self.spec.set() + ); + let report = SlashReport::get(self.txn, self.spec.set()) .expect("re-attempting signing a SlashReport we don't have?"); self @@ -575,8 +457,7 @@ impl< }; // Assign them 0 points for themselves report.insert(i, 0); - // Uses &[] as we only need the length which is independent to who else was removed - let signer_i = self.spec.i(&[], validator).unwrap(); + let signer_i = self.spec.i(validator).unwrap(); let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start); // Push `n` copies, one for each of their shares for _ in 0 .. signer_len { diff --git a/coordinator/src/tributary/signing_protocol.rs b/coordinator/src/tributary/signing_protocol.rs index a90ed479..d00be867 100644 --- a/coordinator/src/tributary/signing_protocol.rs +++ b/coordinator/src/tributary/signing_protocol.rs @@ -63,10 +63,7 @@ use rand_core::OsRng; use blake2::{Digest, Blake2s256}; -use ciphersuite::{ - group::{ff::PrimeField, GroupEncoding}, - Ciphersuite, Ristretto, -}; +use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto}; use frost::{ FrostError, dkg::{Participant, musig::musig}, @@ -77,10 +74,7 @@ use frost_schnorrkel::Schnorrkel; use scale::Encode; -use serai_client::{ - Public, - validator_sets::primitives::{KeyPair, musig_context, set_keys_message}, -}; +use serai_client::validator_sets::primitives::{KeyPair, musig_context, set_keys_message}; use serai_db::*; @@ -89,6 +83,7 @@ use crate::tributary::TributarySpec; create_db!( SigningProtocolDb { CachedPreprocesses: (context: &impl Encode) -> [u8; 32] + DataSignedWith: (context: &impl Encode) -> (Vec, HashMap>), } ); @@ -117,16 +112,22 @@ impl SigningProtocol<'_, T, C> { }; let encryption_key_slice: &mut [u8] = encryption_key.as_mut(); - let algorithm = Schnorrkel::new(b"substrate"); + // Create the MuSig keys let keys: ThresholdKeys = musig(&musig_context(self.spec.set()), self.key, participants) .expect("signing for a set we aren't in/validator present multiple times") .into(); + // Define the algorithm + let algorithm = Schnorrkel::new(b"substrate"); + + // Check if we've prior preprocessed if CachedPreprocesses::get(self.txn, &self.context).is_none() { + // If we haven't, we create a machine solely to obtain the preprocess with let (machine, _) = AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng); + // Cache and save the preprocess to disk let mut cache = machine.cache(); assert_eq!(cache.0.len(), 32); #[allow(clippy::needless_range_loop)] @@ -137,13 +138,15 @@ impl SigningProtocol<'_, T, C> { CachedPreprocesses::set(self.txn, &self.context, &cache.0); } + // We're now guaranteed to have the preprocess, hence why this `unwrap` is safe let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap(); - let mut cached: Zeroizing<[u8; 32]> = Zeroizing::new(cached); + let mut cached = Zeroizing::new(cached); #[allow(clippy::needless_range_loop)] for b in 0 .. 32 { cached[b] ^= encryption_key_slice[b]; } encryption_key_slice.zeroize(); + // Create the machine from the cached preprocess let (machine, preprocess) = AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached)); @@ -156,8 +159,29 @@ impl SigningProtocol<'_, T, C> { mut serialized_preprocesses: HashMap>, msg: &[u8], ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { - let machine = self.preprocess_internal(participants).0; + // We can't clear the preprocess as we sitll need it to accumulate all of the shares + // We do save the message we signed so any future calls with distinct messages panic + // This assumes the txn deciding this data is committed before the share is broaadcast + if let Some((existing_msg, existing_preprocesses)) = + DataSignedWith::get(self.txn, &self.context) + { + assert_eq!(msg, &existing_msg, "obtaining a signature share for a distinct message"); + assert_eq!( + &serialized_preprocesses, &existing_preprocesses, + "obtaining a signature share with a distinct set of preprocesses" + ); + } else { + DataSignedWith::set( + self.txn, + &self.context, + &(msg.to_vec(), serialized_preprocesses.clone()), + ); + } + // Get the preprocessed machine + let (machine, _) = self.preprocess_internal(participants); + + // Deserialize all the preprocesses let mut participants = serialized_preprocesses.keys().copied().collect::>(); participants.sort(); let mut preprocesses = HashMap::new(); @@ -170,13 +194,14 @@ impl SigningProtocol<'_, T, C> { ); } + // Sign the share let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e { FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), FrostError::InvalidParticipant(_, _) | FrostError::InvalidSigningSet(_) | FrostError::InvalidParticipantQuantity(_, _) | FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), + FrostError::MissingParticipant(_) => panic!("unexpected error during sign: {e:?}"), FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, })?; @@ -207,24 +232,23 @@ impl SigningProtocol<'_, T, C> { } // Get the keys of the participants, noted by their threshold is, and return a new map indexed by -// the MuSig is. +// their MuSig is. fn threshold_i_map_to_keys_and_musig_i_map( spec: &TributarySpec, - removed: &[::G], our_key: &Zeroizing<::F>, mut map: HashMap>, ) -> (Vec<::G>, HashMap>) { // Insert our own index so calculations aren't offset let our_threshold_i = spec - .i(removed, ::generator() * our_key.deref()) - .expect("MuSig t-of-n signing a for a protocol we were removed from") + .i(::generator() * our_key.deref()) + .expect("not in a set we're signing for") .start; assert!(map.insert(our_threshold_i, vec![]).is_none()); let spec_validators = spec.validators(); let key_from_threshold_i = |threshold_i| { for (key, _) in &spec_validators { - if threshold_i == spec.i(removed, *key).expect("MuSig t-of-n participant was removed").start { + if threshold_i == spec.i(*key).expect("validator wasn't in a set they're in").start { return *key; } } @@ -257,7 +281,6 @@ type DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'static [u8; pub(crate) struct DkgConfirmer<'a, T: DbTxn> { key: &'a Zeroizing<::F>, spec: &'a TributarySpec, - removed: Vec<::G>, txn: &'a mut T, attempt: u32, } @@ -268,12 +291,10 @@ impl DkgConfirmer<'_, T> { spec: &'a TributarySpec, txn: &'a mut T, attempt: u32, - ) -> Option> { - // This relies on how confirmations are inlined into the DKG protocol and they accordingly - // share attempts - let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?; - Some(DkgConfirmer { key, spec, removed, txn, attempt }) + ) -> DkgConfirmer<'a, T> { + DkgConfirmer { key, spec, txn, attempt } } + fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> { let context = (b"DkgConfirmer", self.attempt); SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context } @@ -294,13 +315,8 @@ impl DkgConfirmer<'_, T> { key_pair: &KeyPair, ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { let participants = self.spec.validators().iter().map(|val| val.0).collect::>(); - let preprocesses = - threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, preprocesses).1; - let msg = set_keys_message( - &self.spec.set(), - &self.removed.iter().map(|key| Public(key.to_bytes())).collect::>(), - key_pair, - ); + let preprocesses = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses).1; + let msg = set_keys_message(&self.spec.set(), key_pair); self.signing_protocol().share_internal(&participants, preprocesses, &msg) } // Get the share for this confirmation, if the preprocesses are valid. @@ -318,8 +334,7 @@ impl DkgConfirmer<'_, T> { key_pair: &KeyPair, shares: HashMap>, ) -> Result<[u8; 64], Participant> { - let shares = - threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, shares).1; + let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares).1; let machine = self .share_internal(preprocesses, key_pair) diff --git a/coordinator/src/tributary/spec.rs b/coordinator/src/tributary/spec.rs index 92905490..efc792e6 100644 --- a/coordinator/src/tributary/spec.rs +++ b/coordinator/src/tributary/spec.rs @@ -9,7 +9,7 @@ use frost::Participant; use scale::Encode; use borsh::{BorshSerialize, BorshDeserialize}; -use serai_client::{primitives::PublicKey, validator_sets::primitives::ValidatorSet}; +use serai_client::validator_sets::primitives::ValidatorSet; fn borsh_serialize_validators( validators: &Vec<(::G, u16)>, @@ -49,6 +49,7 @@ pub struct TributarySpec { deserialize_with = "borsh_deserialize_validators" )] validators: Vec<(::G, u16)>, + evrf_public_keys: Vec<([u8; 32], Vec)>, } impl TributarySpec { @@ -56,16 +57,10 @@ impl TributarySpec { serai_block: [u8; 32], start_time: u64, set: ValidatorSet, - set_participants: Vec<(PublicKey, u16)>, + validators: Vec<(::G, u16)>, + evrf_public_keys: Vec<([u8; 32], Vec)>, ) -> TributarySpec { - let mut validators = vec![]; - for (participant, shares) in set_participants { - let participant = ::read_G::<&[u8]>(&mut participant.0.as_ref()) - .expect("invalid key registered as participant"); - validators.push((participant, shares)); - } - - Self { serai_block, start_time, set, validators } + Self { serai_block, start_time, set, validators, evrf_public_keys } } pub fn set(&self) -> ValidatorSet { @@ -88,24 +83,15 @@ impl TributarySpec { self.start_time } - pub fn n(&self, removed_validators: &[::G]) -> u16 { - self - .validators - .iter() - .map(|(validator, weight)| if removed_validators.contains(validator) { 0 } else { *weight }) - .sum() + pub fn n(&self) -> u16 { + self.validators.iter().map(|(_, weight)| *weight).sum() } pub fn t(&self) -> u16 { - // t doesn't change with regards to the amount of removed validators - ((2 * self.n(&[])) / 3) + 1 + ((2 * self.n()) / 3) + 1 } - pub fn i( - &self, - removed_validators: &[::G], - key: ::G, - ) -> Option> { + pub fn i(&self, key: ::G) -> Option> { let mut all_is = HashMap::new(); let mut i = 1; for (validator, weight) in &self.validators { @@ -116,34 +102,12 @@ impl TributarySpec { i += weight; } - let original_i = all_is.get(&key)?.clone(); - let mut result_i = original_i.clone(); - for removed_validator in removed_validators { - let removed_i = all_is - .get(removed_validator) - .expect("removed validator wasn't present in set to begin with"); - // If the queried key was removed, return None - if &original_i == removed_i { - return None; - } - - // If the removed was before the queried, shift the queried down accordingly - if removed_i.start < original_i.start { - let removed_shares = u16::from(removed_i.end) - u16::from(removed_i.start); - result_i.start = Participant::new(u16::from(original_i.start) - removed_shares).unwrap(); - result_i.end = Participant::new(u16::from(original_i.end) - removed_shares).unwrap(); - } - } - Some(result_i) + Some(all_is.get(&key)?.clone()) } - pub fn reverse_lookup_i( - &self, - removed_validators: &[::G], - i: Participant, - ) -> Option<::G> { + pub fn reverse_lookup_i(&self, i: Participant) -> Option<::G> { for (validator, _) in &self.validators { - if self.i(removed_validators, *validator).map_or(false, |range| range.contains(&i)) { + if self.i(*validator).map_or(false, |range| range.contains(&i)) { return Some(*validator); } } @@ -153,4 +117,8 @@ impl TributarySpec { pub fn validators(&self) -> Vec<(::G, u64)> { self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect() } + + pub fn evrf_public_keys(&self) -> Vec<([u8; 32], Vec)> { + self.evrf_public_keys.clone() + } } diff --git a/coordinator/src/tributary/transaction.rs b/coordinator/src/tributary/transaction.rs index 8d8bdd4c..36bf79ac 100644 --- a/coordinator/src/tributary/transaction.rs +++ b/coordinator/src/tributary/transaction.rs @@ -12,7 +12,6 @@ use ciphersuite::{ Ciphersuite, Ristretto, }; use schnorr::SchnorrSignature; -use frost::Participant; use scale::{Encode, Decode}; use processor_messages::coordinator::SubstrateSignableId; @@ -130,32 +129,26 @@ impl SignData { #[derive(Clone, PartialEq, Eq)] pub enum Transaction { - RemoveParticipantDueToDkg { + RemoveParticipant { participant: ::G, signed: Signed, }, - DkgCommitments { - attempt: u32, - commitments: Vec>, + DkgParticipation { + participation: Vec, signed: Signed, }, - DkgShares { + DkgConfirmationNonces { + // The confirmation attempt attempt: u32, - // Sending Participant, Receiving Participant, Share - shares: Vec>>, + // The nonces for DKG confirmation attempt #attempt confirmation_nonces: [u8; 64], signed: Signed, }, - InvalidDkgShare { - attempt: u32, - accuser: Participant, - faulty: Participant, - blame: Option>, - signed: Signed, - }, - DkgConfirmed { + DkgConfirmationShare { + // The confirmation attempt attempt: u32, + // The share for DKG confirmation attempt #attempt confirmation_share: [u8; 32], signed: Signed, }, @@ -197,29 +190,22 @@ pub enum Transaction { impl Debug for Transaction { fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { match self { - Transaction::RemoveParticipantDueToDkg { participant, signed } => fmt - .debug_struct("Transaction::RemoveParticipantDueToDkg") + Transaction::RemoveParticipant { participant, signed } => fmt + .debug_struct("Transaction::RemoveParticipant") .field("participant", &hex::encode(participant.to_bytes())) .field("signer", &hex::encode(signed.signer.to_bytes())) .finish_non_exhaustive(), - Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt - .debug_struct("Transaction::DkgCommitments") + Transaction::DkgParticipation { signed, .. } => fmt + .debug_struct("Transaction::DkgParticipation") + .field("signer", &hex::encode(signed.signer.to_bytes())) + .finish_non_exhaustive(), + Transaction::DkgConfirmationNonces { attempt, signed, .. } => fmt + .debug_struct("Transaction::DkgConfirmationNonces") .field("attempt", attempt) .field("signer", &hex::encode(signed.signer.to_bytes())) .finish_non_exhaustive(), - Transaction::DkgShares { attempt, signed, .. } => fmt - .debug_struct("Transaction::DkgShares") - .field("attempt", attempt) - .field("signer", &hex::encode(signed.signer.to_bytes())) - .finish_non_exhaustive(), - Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt - .debug_struct("Transaction::InvalidDkgShare") - .field("attempt", attempt) - .field("accuser", accuser) - .field("faulty", faulty) - .finish_non_exhaustive(), - Transaction::DkgConfirmed { attempt, confirmation_share: _, signed } => fmt - .debug_struct("Transaction::DkgConfirmed") + Transaction::DkgConfirmationShare { attempt, signed, .. } => fmt + .debug_struct("Transaction::DkgConfirmationShare") .field("attempt", attempt) .field("signer", &hex::encode(signed.signer.to_bytes())) .finish_non_exhaustive(), @@ -261,43 +247,32 @@ impl ReadWrite for Transaction { reader.read_exact(&mut kind)?; match kind[0] { - 0 => Ok(Transaction::RemoveParticipantDueToDkg { + 0 => Ok(Transaction::RemoveParticipant { participant: Ristretto::read_G(reader)?, signed: Signed::read_without_nonce(reader, 0)?, }), 1 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); + let participation = { + let mut participation_len = [0; 4]; + reader.read_exact(&mut participation_len)?; + let participation_len = u32::from_le_bytes(participation_len); - let commitments = { - let mut commitments_len = [0; 1]; - reader.read_exact(&mut commitments_len)?; - let commitments_len = usize::from(commitments_len[0]); - if commitments_len == 0 { - Err(io::Error::other("zero commitments in DkgCommitments"))?; - } - - let mut each_commitments_len = [0; 2]; - reader.read_exact(&mut each_commitments_len)?; - let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len)); - if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT { + if participation_len > u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() { Err(io::Error::other( - "commitments present in transaction exceeded transaction size limit", + "participation present in transaction exceeded transaction size limit", ))?; } - let mut commitments = vec![vec![]; commitments_len]; - for commitments in &mut commitments { - *commitments = vec![0; each_commitments_len]; - reader.read_exact(commitments)?; - } - commitments + let participation_len = usize::try_from(participation_len).unwrap(); + + let mut participation = vec![0; participation_len]; + reader.read_exact(&mut participation)?; + participation }; let signed = Signed::read_without_nonce(reader, 0)?; - Ok(Transaction::DkgCommitments { attempt, commitments, signed }) + Ok(Transaction::DkgParticipation { participation, signed }) } 2 => { @@ -305,36 +280,12 @@ impl ReadWrite for Transaction { reader.read_exact(&mut attempt)?; let attempt = u32::from_le_bytes(attempt); - let shares = { - let mut share_quantity = [0; 1]; - reader.read_exact(&mut share_quantity)?; - - let mut key_share_quantity = [0; 1]; - reader.read_exact(&mut key_share_quantity)?; - - let mut share_len = [0; 2]; - reader.read_exact(&mut share_len)?; - let share_len = usize::from(u16::from_le_bytes(share_len)); - - let mut all_shares = vec![]; - for _ in 0 .. share_quantity[0] { - let mut shares = vec![]; - for _ in 0 .. key_share_quantity[0] { - let mut share = vec![0; share_len]; - reader.read_exact(&mut share)?; - shares.push(share); - } - all_shares.push(shares); - } - all_shares - }; - let mut confirmation_nonces = [0; 64]; reader.read_exact(&mut confirmation_nonces)?; - let signed = Signed::read_without_nonce(reader, 1)?; + let signed = Signed::read_without_nonce(reader, 0)?; - Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed }) + Ok(Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed }) } 3 => { @@ -342,53 +293,21 @@ impl ReadWrite for Transaction { reader.read_exact(&mut attempt)?; let attempt = u32::from_le_bytes(attempt); - let mut accuser = [0; 2]; - reader.read_exact(&mut accuser)?; - let accuser = Participant::new(u16::from_le_bytes(accuser)) - .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; - - let mut faulty = [0; 2]; - reader.read_exact(&mut faulty)?; - let faulty = Participant::new(u16::from_le_bytes(faulty)) - .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; - - let mut blame_len = [0; 2]; - reader.read_exact(&mut blame_len)?; - let mut blame = vec![0; u16::from_le_bytes(blame_len).into()]; - reader.read_exact(&mut blame)?; - - // This shares a nonce with DkgConfirmed as only one is expected - let signed = Signed::read_without_nonce(reader, 2)?; - - Ok(Transaction::InvalidDkgShare { - attempt, - accuser, - faulty, - blame: Some(blame).filter(|blame| !blame.is_empty()), - signed, - }) - } - - 4 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - let mut confirmation_share = [0; 32]; reader.read_exact(&mut confirmation_share)?; - let signed = Signed::read_without_nonce(reader, 2)?; + let signed = Signed::read_without_nonce(reader, 0)?; - Ok(Transaction::DkgConfirmed { attempt, confirmation_share, signed }) + Ok(Transaction::DkgConfirmationShare { attempt, confirmation_share, signed }) } - 5 => { + 4 => { let mut block = [0; 32]; reader.read_exact(&mut block)?; Ok(Transaction::CosignSubstrateBlock(block)) } - 6 => { + 5 => { let mut block = [0; 32]; reader.read_exact(&mut block)?; let mut batch = [0; 4]; @@ -396,16 +315,16 @@ impl ReadWrite for Transaction { Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) }) } - 7 => { + 6 => { let mut block = [0; 8]; reader.read_exact(&mut block)?; Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block))) } - 8 => SignData::read(reader).map(Transaction::SubstrateSign), - 9 => SignData::read(reader).map(Transaction::Sign), + 7 => SignData::read(reader).map(Transaction::SubstrateSign), + 8 => SignData::read(reader).map(Transaction::Sign), - 10 => { + 9 => { let mut plan = [0; 32]; reader.read_exact(&mut plan)?; @@ -420,7 +339,7 @@ impl ReadWrite for Transaction { Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature }) } - 11 => { + 10 => { let mut len = [0]; reader.read_exact(&mut len)?; let len = len[0]; @@ -445,109 +364,59 @@ impl ReadWrite for Transaction { fn write(&self, writer: &mut W) -> io::Result<()> { match self { - Transaction::RemoveParticipantDueToDkg { participant, signed } => { + Transaction::RemoveParticipant { participant, signed } => { writer.write_all(&[0])?; writer.write_all(&participant.to_bytes())?; signed.write_without_nonce(writer) } - Transaction::DkgCommitments { attempt, commitments, signed } => { + Transaction::DkgParticipation { participation, signed } => { writer.write_all(&[1])?; - writer.write_all(&attempt.to_le_bytes())?; - if commitments.is_empty() { - Err(io::Error::other("zero commitments in DkgCommitments"))? - } - writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?; - for commitments_i in commitments { - if commitments_i.len() != commitments[0].len() { - Err(io::Error::other("commitments of differing sizes in DkgCommitments"))? - } - } - writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?; - for commitments in commitments { - writer.write_all(commitments)?; - } + writer.write_all(&u32::try_from(participation.len()).unwrap().to_le_bytes())?; + writer.write_all(participation)?; signed.write_without_nonce(writer) } - Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => { + Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed } => { writer.write_all(&[2])?; writer.write_all(&attempt.to_le_bytes())?; - - // `shares` is a Vec which is supposed to map to a HashMap>. Since we - // bound participants to 150, this conversion is safe if a valid in-memory transaction. - writer.write_all(&[u8::try_from(shares.len()).unwrap()])?; - // This assumes at least one share is being sent to another party - writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?; - let share_len = shares[0][0].len(); - // For BLS12-381 G2, this would be: - // - A 32-byte share - // - A 96-byte ephemeral key - // - A 128-byte signature - // Hence why this has to be u16 - writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?; - - for these_shares in shares { - assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable"); - for share in these_shares { - assert_eq!(share.len(), share_len, "sent shares were of variable length"); - writer.write_all(share)?; - } - } - writer.write_all(confirmation_nonces)?; signed.write_without_nonce(writer) } - Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { + Transaction::DkgConfirmationShare { attempt, confirmation_share, signed } => { writer.write_all(&[3])?; writer.write_all(&attempt.to_le_bytes())?; - writer.write_all(&u16::from(*accuser).to_le_bytes())?; - writer.write_all(&u16::from(*faulty).to_le_bytes())?; - - // Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length - assert!(blame.as_ref().map_or(1, Vec::len) != 0); - let blame_len = - u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB"); - writer.write_all(&blame_len.to_le_bytes())?; - writer.write_all(blame.as_ref().unwrap_or(&vec![]))?; - - signed.write_without_nonce(writer) - } - - Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { - writer.write_all(&[4])?; - writer.write_all(&attempt.to_le_bytes())?; writer.write_all(confirmation_share)?; signed.write_without_nonce(writer) } Transaction::CosignSubstrateBlock(block) => { - writer.write_all(&[5])?; + writer.write_all(&[4])?; writer.write_all(block) } Transaction::Batch { block, batch } => { - writer.write_all(&[6])?; + writer.write_all(&[5])?; writer.write_all(block)?; writer.write_all(&batch.to_le_bytes()) } Transaction::SubstrateBlock(block) => { - writer.write_all(&[7])?; + writer.write_all(&[6])?; writer.write_all(&block.to_le_bytes()) } Transaction::SubstrateSign(data) => { - writer.write_all(&[8])?; + writer.write_all(&[7])?; data.write(writer) } Transaction::Sign(data) => { - writer.write_all(&[9])?; + writer.write_all(&[8])?; data.write(writer) } Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => { - writer.write_all(&[10])?; + writer.write_all(&[9])?; writer.write_all(plan)?; writer .write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?; @@ -556,7 +425,7 @@ impl ReadWrite for Transaction { signature.write(writer) } Transaction::SlashReport(points, signed) => { - writer.write_all(&[11])?; + writer.write_all(&[10])?; writer.write_all(&[u8::try_from(points.len()).unwrap()])?; for points in points { writer.write_all(&points.to_le_bytes())?; @@ -570,15 +439,18 @@ impl ReadWrite for Transaction { impl TransactionTrait for Transaction { fn kind(&self) -> TransactionKind<'_> { match self { - Transaction::RemoveParticipantDueToDkg { participant, signed } => { + Transaction::RemoveParticipant { participant, signed } => { TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed) } - Transaction::DkgCommitments { attempt, commitments: _, signed } | - Transaction::DkgShares { attempt, signed, .. } | - Transaction::InvalidDkgShare { attempt, signed, .. } | - Transaction::DkgConfirmed { attempt, signed, .. } => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) + Transaction::DkgParticipation { signed, .. } => { + TransactionKind::Signed(b"dkg".to_vec(), signed) + } + Transaction::DkgConfirmationNonces { attempt, signed, .. } => { + TransactionKind::Signed((b"dkg_confirmation_nonces", attempt).encode(), signed) + } + Transaction::DkgConfirmationShare { attempt, signed, .. } => { + TransactionKind::Signed((b"dkg_confirmation_share", attempt).encode(), signed) } Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"), @@ -645,11 +517,11 @@ impl Transaction { fn signed(tx: &mut Transaction) -> (u32, &mut Signed) { #[allow(clippy::match_same_arms)] // Doesn't make semantic sense here let nonce = match tx { - Transaction::RemoveParticipantDueToDkg { .. } => 0, + Transaction::RemoveParticipant { .. } => 0, - Transaction::DkgCommitments { .. } => 0, - Transaction::DkgShares { .. } => 1, - Transaction::InvalidDkgShare { .. } | Transaction::DkgConfirmed { .. } => 2, + Transaction::DkgParticipation { .. } => 0, + // Uses a nonce of 0 as it has an internal attempt counter we distinguish by + Transaction::DkgConfirmationNonces { .. } | Transaction::DkgConfirmationShare { .. } => 0, Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), @@ -668,11 +540,10 @@ impl Transaction { nonce, #[allow(clippy::match_same_arms)] match tx { - Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } | - Transaction::DkgCommitments { ref mut signed, .. } | - Transaction::DkgShares { ref mut signed, .. } | - Transaction::InvalidDkgShare { ref mut signed, .. } | - Transaction::DkgConfirmed { ref mut signed, .. } => signed, + Transaction::RemoveParticipant { ref mut signed, .. } | + Transaction::DkgParticipation { ref mut signed, .. } | + Transaction::DkgConfirmationNonces { ref mut signed, .. } => signed, + Transaction::DkgConfirmationShare { ref mut signed, .. } => signed, Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), diff --git a/substrate/client/src/serai/validator_sets.rs b/substrate/client/src/serai/validator_sets.rs index 87ccde46..27d7b6dc 100644 --- a/substrate/client/src/serai/validator_sets.rs +++ b/substrate/client/src/serai/validator_sets.rs @@ -108,6 +108,21 @@ impl<'a> SeraiValidatorSets<'a> { self.0.storage(PALLET, "CurrentSession", network).await } + pub async fn embedded_elliptic_curve_key( + &self, + validator: Public, + embedded_elliptic_curve: EmbeddedEllipticCurve, + ) -> Result>, SeraiError> { + self + .0 + .storage( + PALLET, + "EmbeddedEllipticCurveKeys", + (sp_core::hashing::blake2_128(&validator.encode()), validator, embedded_elliptic_curve), + ) + .await + } + pub async fn participants( &self, network: NetworkId, diff --git a/substrate/primitives/src/networks.rs b/substrate/primitives/src/networks.rs index 0f30d8de..930dd66c 100644 --- a/substrate/primitives/src/networks.rs +++ b/substrate/primitives/src/networks.rs @@ -37,6 +37,9 @@ pub enum NetworkId { } impl NetworkId { /// The embedded elliptic curve actively used for this network. + /// + /// This is guaranteed to return `[]`, `[Embedwards25519]`, or + /// `[Embedwards25519, *network specific curve*]`. pub fn embedded_elliptic_curves(&self) -> &'static [EmbeddedEllipticCurve] { match self { // We don't use any embedded elliptic curves for Serai as we don't perform a DKG for Serai diff --git a/substrate/validator-sets/pallet/src/lib.rs b/substrate/validator-sets/pallet/src/lib.rs index 4ceda9fa..f97b3d13 100644 --- a/substrate/validator-sets/pallet/src/lib.rs +++ b/substrate/validator-sets/pallet/src/lib.rs @@ -416,6 +416,11 @@ pub mod pallet { pub enum Error { /// Validator Set doesn't exist. NonExistentValidatorSet, + /// An invalid embedded elliptic curve key was specified. + /// + /// This error not being raised does not mean the key was valid. Solely that it wasn't detected + /// by this pallet as invalid. + InvalidEmbeddedEllipticCurveKey, /// Trying to perform an operation requiring an embedded elliptic curve key, without an /// embedded elliptic curve key. MissingEmbeddedEllipticCurveKey, @@ -988,6 +993,18 @@ pub mod pallet { key: BoundedVec>, ) -> DispatchResult { let validator = ensure_signed(origin)?; + + // We don't have the curve formulas, nor the BigInt arithmetic, necessary here to validate + // these keys. Instead, we solely check the key lengths. Validators are responsible to not + // provide invalid keys. + let expected_len = match embedded_elliptic_curve { + EmbeddedEllipticCurve::Embedwards25519 => 32, + EmbeddedEllipticCurve::Secq256k1 => 33, + }; + if key.len() != expected_len { + Err(Error::InvalidEmbeddedEllipticCurveKey)?; + } + // This does allow overwriting an existing key which... is unlikely to be done? // Yet it isn't an issue as we'll fix to the key as of any set's declaration (uncaring to if // it's distinct at the latest block)