From 2f564c230e648200c81b63c214001c1621991f23 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 1 Aug 2024 03:49:28 -0400 Subject: [PATCH] Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. --- crypto/dkg/src/evrf/mod.rs | 19 +-- crypto/dkg/src/evrf/proof.rs | 14 +-- crypto/evrf/divisors/src/lib.rs | 5 +- processor/src/key_gen.rs | 203 ++++++++++++++++++++------------ processor/src/main.rs | 65 +++++----- processor/src/networks/mod.rs | 4 +- 6 files changed, 174 insertions(+), 136 deletions(-) diff --git a/crypto/dkg/src/evrf/mod.rs b/crypto/dkg/src/evrf/mod.rs index 8b3723d4..cad78984 100644 --- a/crypto/dkg/src/evrf/mod.rs +++ b/crypto/dkg/src/evrf/mod.rs @@ -238,11 +238,7 @@ pub struct EvrfDkg { HashMap::G; 2], C::F)>>, } -impl EvrfDkg -where - <::EmbeddedCurve as Ciphersuite>::G: - DivisorCurve::F>, -{ +impl EvrfDkg { // Form the initial transcript for the proofs. fn initial_transcript( invocation: [u8; 32], @@ -497,10 +493,15 @@ where for i in valid.keys() { let evrf_public_key = evrf_public_keys[usize::from(u16::from(*i)) - 1]; - // We remove all keys considered participating from the Vec in order to ensure they aren't - // counted multiple times. That could happen if a participant shares a key with another - // participant. While that's presumably some degree of invalid, we're robust against it - // regardless. + // Remove this key from the Vec to prevent double-counting + /* + Double-counting would be a risk if multiple participants shared an eVRF public key and + participated. This code does still allow such participants (in order to let participants + be weighted), and any one of them participating will count as all participating. This is + fine as any one such participant will be able to decrypt the shares for themselves and + all other participants, so this is still a key generated by an amount of participants who + could simply reconstruct the key. + */ let start_len = evrf_public_keys.len(); evrf_public_keys.retain(|key| *key != evrf_public_key); let end_len = evrf_public_keys.len(); diff --git a/crypto/dkg/src/evrf/proof.rs b/crypto/dkg/src/evrf/proof.rs index 1cb45f36..ce9c57d1 100644 --- a/crypto/dkg/src/evrf/proof.rs +++ b/crypto/dkg/src/evrf/proof.rs @@ -29,7 +29,7 @@ use generalized_bulletproofs_ec_gadgets::*; /// A pair of curves to perform the eVRF with. pub trait EvrfCurve: Ciphersuite { - type EmbeddedCurve: Ciphersuite; + type EmbeddedCurve: Ciphersuite::F>>; type EmbeddedCurveParameters: DiscreteLogParameters; } @@ -67,11 +67,7 @@ fn sample_point(rng: &mut (impl RngCore + CryptoRng)) -> C::G { #[derive(Clone, Debug)] pub struct EvrfGenerators(pub(crate) Generators); -impl EvrfGenerators -where - <::EmbeddedCurve as Ciphersuite>::G: - DivisorCurve::F>, -{ +impl EvrfGenerators { /// Create a new set of generators. pub fn new(max_threshold: u16, max_participants: u16) -> EvrfGenerators { let g = C::generator(); @@ -117,11 +113,7 @@ impl fmt::Debug for EvrfVerifyResult { /// A struct to prove/verify eVRFs with. pub(crate) struct Evrf(PhantomData); -impl Evrf -where - <::EmbeddedCurve as Ciphersuite>::G: - DivisorCurve::F>, -{ +impl Evrf { // Sample uniform points (via rejection-sampling) on the embedded elliptic curve fn transcript_to_points( seed: [u8; 32], diff --git a/crypto/evrf/divisors/src/lib.rs b/crypto/evrf/divisors/src/lib.rs index ade96cdf..08091553 100644 --- a/crypto/evrf/divisors/src/lib.rs +++ b/crypto/evrf/divisors/src/lib.rs @@ -15,10 +15,7 @@ pub use poly::*; mod tests; /// A curve usable with this library. -pub trait DivisorCurve: Group -where - Self::Scalar: PrimeField, -{ +pub trait DivisorCurve: Group { /// An element of the field this curve is defined over. type FieldElement: PrimeField; diff --git a/processor/src/key_gen.rs b/processor/src/key_gen.rs index 263ce228..e34177e2 100644 --- a/processor/src/key_gen.rs +++ b/processor/src/key_gen.rs @@ -11,7 +11,7 @@ use ciphersuite::{ group::{Group, GroupEncoding}, Ciphersuite, Ristretto, }; -use frost::dkg::{Participant, ThresholdCore, ThresholdKeys, evrf::*}; +use dkg::{Participant, ThresholdCore, ThresholdKeys, evrf::*}; use log::info; @@ -20,6 +20,48 @@ use messages::key_gen::*; use crate::{Get, DbTxn, Db, create_db, networks::Network}; +mod generators { + use core::any::{TypeId, Any}; + use std::{ + sync::{LazyLock, Mutex}, + collections::HashMap, + }; + + use frost::dkg::evrf::*; + + use serai_client::validator_sets::primitives::MAX_KEY_SHARES_PER_SET; + + /// A cache of the generators used by the eVRF DKG. + /// + /// This performs a lookup of the Ciphersuite to its generators. Since the Ciphersuite is a + /// generic, this takes advantage of `Any`. This static is isolated in a module to ensure + /// correctness can be evaluated solely by reviewing these few lines of code. + /// + /// This is arguably over-engineered as of right now, as we only need generators for Ristretto + /// and N::Curve. By having this HashMap, we enable de-duplication of the Ristretto == N::Curve + /// case, and we automatically support the n-curve case (rather than hard-coding to the 2-curve + /// case). + static GENERATORS: LazyLock>> = + LazyLock::new(|| Mutex::new(HashMap::new())); + + pub(crate) fn generators() -> &'static EvrfGenerators { + GENERATORS + .lock() + .unwrap() + .entry(TypeId::of::()) + .or_insert_with(|| { + // If we haven't prior needed generators for this Ciphersuite, generate new ones + Box::leak(Box::new(EvrfGenerators::::new( + ((MAX_KEY_SHARES_PER_SET * 2 / 3) + 1).try_into().unwrap(), + MAX_KEY_SHARES_PER_SET.try_into().unwrap(), + ))) + }) + .downcast_ref() + .unwrap() + } +} +use generators::generators; + #[derive(Debug)] pub struct KeyConfirmed { pub substrate_keys: Vec>, @@ -66,7 +108,7 @@ impl GeneratedKeysDb { fn save_keys( txn: &mut impl DbTxn, session: &Session, - substrate_keys: &[ThresholdCore], + substrate_keys: &[ThresholdKeys], network_keys: &[ThresholdKeys], ) { let mut keys = Zeroizing::new(vec![]); @@ -74,7 +116,7 @@ impl GeneratedKeysDb { keys.extend(substrate_keys.serialize().as_slice()); keys.extend(network_keys.serialize().as_slice()); } - txn.put(Self::key(&session), keys); + txn.put(Self::key(session), keys); } } @@ -176,7 +218,7 @@ fn coerce_keys( faulty.push(i); // Generate a random key - let mut rng = ChaCha20Rng::from_seed(Blake2s256::digest(&key).into()); + let mut rng = ChaCha20Rng::from_seed(Blake2s256::digest(key).into()); loop { let mut repr = <::G as GroupEncoding>::Repr::default(); rng.fill_bytes(repr.as_mut()); @@ -201,11 +243,7 @@ pub struct KeyGen { network_evrf_private_key: Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, } -impl KeyGen -where - <::EmbeddedCurve as Ciphersuite>::G: - ec_divisors::DivisorCurve::F>, -{ +impl KeyGen { #[allow(clippy::new_ret_no_self)] pub fn new( db: D, @@ -264,13 +302,6 @@ where let network_evrf_public_keys = evrf_public_keys.into_iter().map(|(_, key)| key).collect::>(); - // Save the params - ParamsDb::set( - txn, - &session, - &(threshold, substrate_evrf_public_keys, network_evrf_public_keys), - ); - let mut participation = Vec::with_capacity(2048); let mut faulty = HashSet::new(); { @@ -278,9 +309,9 @@ where for faulty_i in faulty_is { faulty.insert(faulty_i); } - let participation = EvrfDkg::::participate( + EvrfDkg::::participate( &mut OsRng, - todo!("TODO"), + generators(), context(session, SUBSTRATE_KEY_CONTEXT), threshold, &coerced_keys, @@ -297,7 +328,7 @@ where } EvrfDkg::::participate( &mut OsRng, - todo!("TODO"), + generators(), context(session, NETWORK_KEY_CONTEXT), threshold, &coerced_keys, @@ -308,6 +339,13 @@ where .unwrap(); } + // Save the params + ParamsDb::set( + txn, + &session, + &(threshold, substrate_evrf_public_keys, network_evrf_public_keys), + ); + // Send back our Participation and all faulty parties let mut faulty = faulty.into_iter().collect::>(); faulty.sort(); @@ -324,21 +362,51 @@ where CoordinatorMessage::Participation { session, participant, participation } => { info!("Received participation from {:?}", participant); - // TODO: Read Pariticpations, declare faulty if necessary, then re-serialize - let substrate_participation: Vec = todo!("TODO"); - let network_participation: Vec = todo!("TODO"); - let (threshold, substrate_evrf_public_keys, network_evrf_public_keys) = ParamsDb::get(txn, &session).unwrap(); + + let n = substrate_evrf_public_keys + .len() + .try_into() + .expect("performing a key gen with more than u16::MAX participants"); + + // Read these `Participation`s + // If they fail basic sanity checks, fail fast + let (substrate_participation, network_participation) = { + let mid_point = { + let mut participation = participation.as_slice(); + let start_len = participation.len(); + + let blame = vec![ProcessorMessage::Blame { session, participant }]; + if Participation::::read(&mut participation, n).is_err() { + return blame; + } + let len_at_mid_point = participation.len(); + if Participation::::read(&mut participation, n).is_err() { + return blame; + }; + + // If they added random noise after their participations, they're faulty + // This prevents DoS by causing a slash upon such spam + if !participation.is_empty() { + return blame; + } + + start_len - len_at_mid_point + }; + + // Instead of re-serializing the `Participation`s we read, we just use the relevant + // sections of the existing byte buffer + (participation[.. mid_point].to_vec(), participation[mid_point ..].to_vec()) + }; + + // Since these are valid `Participation`s, save them let (mut substrate_participations, mut network_participations) = ParticipationDb::get(txn, &session) .unwrap_or((HashMap::with_capacity(1), HashMap::with_capacity(1))); assert!( - substrate_participations.insert(participant, substrate_participation).is_none(), - "received participation for someone multiple times" - ); - assert!( - network_participations.insert(participant, network_participation).is_none(), + substrate_participations.insert(participant, substrate_participation).is_none() && + network_participations.insert(participant, network_participation).is_none(), "received participation for someone multiple times" ); ParticipationDb::set( @@ -355,7 +423,15 @@ where for i in substrate_participations.keys() { let evrf_public_key = evrf_public_keys[usize::from(u16::from(*i)) - 1]; - // Removes from Vec to prevent double-counting + // Remove this key from the Vec to prevent double-counting + /* + Double-counting would be a risk if multiple participants shared an eVRF public key + and participated. This code does still allow such participants (in order to let + participants be weighted), and any one of them participating will count as all + participating. This is fine as any one such participant will be able to decrypt + the shares for themselves and all other participants, so this is still a key + generated by an amount of participants who could simply reconstruct the key. + */ let start_len = evrf_public_keys.len(); evrf_public_keys.retain(|key| *key != evrf_public_key); let end_len = evrf_public_keys.len(); @@ -371,7 +447,7 @@ where let mut res = Vec::with_capacity(1); let substrate_dkg = match EvrfDkg::::verify( &mut OsRng, - &todo!("TODO"), + generators(), context(session, SUBSTRATE_KEY_CONTEXT), threshold, // Ignores the list of participants who couldn't have their keys coerced due to prior @@ -382,14 +458,8 @@ where .map(|(key, participation)| { ( *key, - Participation::read( - &mut participation.as_slice(), - substrate_evrf_public_keys - .len() - .try_into() - .expect("performing a key gen with more than u16::MAX participants"), - ) - .expect("prior read participation was invalid"), + Participation::read(&mut participation.as_slice(), n) + .expect("prior read participation was invalid"), ) }) .collect(), @@ -418,7 +488,7 @@ where }; let network_dkg = match EvrfDkg::::verify( &mut OsRng, - &todo!("TODO"), + generators(), context(session, NETWORK_KEY_CONTEXT), threshold, // Ignores the list of participants who couldn't have their keys coerced due to prior @@ -429,14 +499,8 @@ where .map(|(key, participation)| { ( *key, - Participation::read( - &mut participation.as_slice(), - network_evrf_public_keys - .len() - .try_into() - .expect("performing a key gen with more than u16::MAX participants"), - ) - .expect("prior read participation was invalid"), + Participation::read(&mut participation.as_slice(), n) + .expect("prior read participation was invalid"), ) }) .collect(), @@ -463,36 +527,23 @@ where } }; - /* - let mut these_network_keys = ThresholdKeys::new(these_network_keys); - N::tweak_keys(&mut these_network_keys); + let substrate_keys = substrate_dkg.keys(&self.substrate_evrf_private_key); + let mut network_keys = network_dkg.keys(&self.network_evrf_private_key); + // TODO: Some of these keys may be decrypted by us, yet not actually meant for us, if + // another validator set our eVRF public key as their eVRF public key. We either need to + // ensure the coordinator tracks amount of shares we're supposed to have by the eVRF public + // keys OR explicitly reduce to the keys we're supposed to have based on our `i` index. + for network_keys in &mut network_keys { + N::tweak_keys(network_keys); + } + GeneratedKeysDb::save_keys::(txn, &session, &substrate_keys, &network_keys); - substrate_keys.push(these_substrate_keys); - network_keys.push(these_network_keys); - - let mut generated_substrate_key = None; - let mut generated_network_key = None; - for keys in substrate_keys.iter().zip(&network_keys) { - if generated_substrate_key.is_none() { - generated_substrate_key = Some(keys.0.group_key()); - generated_network_key = Some(keys.1.group_key()); - } else { - assert_eq!(generated_substrate_key, Some(keys.0.group_key())); - assert_eq!(generated_network_key, Some(keys.1.group_key())); - } - } - - GeneratedKeysDb::save_keys::(txn, &id, &substrate_keys, &network_keys); - - ProcessorMessage::GeneratedKeyPair { - id, - substrate_key: generated_substrate_key.unwrap().to_bytes(), - // TODO: This can be made more efficient since tweaked keys may be a subset of keys - network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(), - } - */ - - todo!("TODO") + vec![ProcessorMessage::GeneratedKeyPair { + session, + substrate_key: substrate_keys[0].group_key().to_bytes(), + // TODO: This can be made more efficient since tweaked keys may be a subset of keys + network_key: network_keys[0].group_key().to_bytes().as_ref().to_vec(), + }] } } } diff --git a/processor/src/main.rs b/processor/src/main.rs index e0d97aa6..acfdfaf4 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -2,8 +2,11 @@ use std::{time::Duration, collections::HashMap}; use zeroize::{Zeroize, Zeroizing}; -use transcript::{Transcript, RecommendedTranscript}; -use ciphersuite::{group::GroupEncoding, Ciphersuite}; +use ciphersuite::{ + group::{ff::PrimeField, GroupEncoding}, + Ciphersuite, Ristretto, +}; +use dkg::evrf::EvrfCurve; use log::{info, warn}; use tokio::time::sleep; @@ -224,7 +227,9 @@ async fn handle_coordinator_msg( match msg.msg.clone() { CoordinatorMessage::KeyGen(msg) => { - coordinator.send(tributary_mutable.key_gen.handle(txn, msg)).await; + for msg in tributary_mutable.key_gen.handle(txn, msg) { + coordinator.send(msg).await; + } } CoordinatorMessage::Sign(msg) => { @@ -485,41 +490,31 @@ async fn boot( network: &N, coordinator: &mut Co, ) -> (D, TributaryMutable, SubstrateMutable) { - let mut entropy_transcript = { - let entropy = Zeroizing::new(env::var("ENTROPY").expect("entropy wasn't specified")); - if entropy.len() != 64 { - panic!("entropy isn't the right length"); + fn read_key_from_env(label: &'static str) -> Zeroizing { + let key_hex = + Zeroizing::new(env::var(label).unwrap_or_else(|| panic!("{label} wasn't provided"))); + let bytes = Zeroizing::new( + hex::decode(key_hex).unwrap_or_else(|_| panic!("{label} wasn't a valid hex string")), + ); + + let mut repr = ::Repr::default(); + if repr.as_ref().len() != bytes.len() { + panic!("{label} wasn't the correct length"); } - let mut bytes = - Zeroizing::new(hex::decode(entropy).map_err(|_| ()).expect("entropy wasn't hex-formatted")); - if bytes.len() != 32 { - bytes.zeroize(); - panic!("entropy wasn't 32 bytes"); - } - let mut entropy = Zeroizing::new([0; 32]); - let entropy_mut: &mut [u8] = entropy.as_mut(); - entropy_mut.copy_from_slice(bytes.as_ref()); - - let mut transcript = RecommendedTranscript::new(b"Serai Processor Entropy"); - transcript.append_message(b"entropy", entropy); - transcript - }; - - // TODO: Save a hash of the entropy to the DB and make sure the entropy didn't change - - let mut entropy = |label| { - let mut challenge = entropy_transcript.challenge(label); - let mut res = Zeroizing::new([0; 32]); - let res_mut: &mut [u8] = res.as_mut(); - res_mut.copy_from_slice(&challenge[.. 32]); - challenge.zeroize(); + repr.as_mut().copy_from_slice(bytes.as_slice()); + let res = Zeroizing::new( + Option::from(::from_repr(repr)) + .unwrap_or_else(|| panic!("{label} wasn't a valid scalar")), + ); + repr.as_mut().zeroize(); res - }; + } - // We don't need to re-issue GenerateKey orders because the coordinator is expected to - // schedule/notify us of new attempts - // TODO: Is this above comment still true? Not at all due to the planned lack of DKG timeouts? - let key_gen = KeyGen::::new(raw_db.clone(), entropy(b"key-gen_entropy")); + let key_gen = KeyGen::::new( + raw_db.clone(), + read_key_from_env::<::EmbeddedCurve>("SUBSTRATE_EVRF_KEY"), + read_key_from_env::<::EmbeddedCurve>("NETWORK_EVRF_KEY"), + ); let (multisig_manager, current_keys, actively_signing) = MultisigManager::new(raw_db, network).await; diff --git a/processor/src/networks/mod.rs b/processor/src/networks/mod.rs index b6118571..81838ae1 100644 --- a/processor/src/networks/mod.rs +++ b/processor/src/networks/mod.rs @@ -241,9 +241,11 @@ pub struct PreparedSend { } #[async_trait] +#[rustfmt::skip] pub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug { /// The elliptic curve used for this network. - type Curve: Curve + EvrfCurve; + type Curve: Curve + + EvrfCurve::F>>>; /// The type representing the transaction for this network. type Transaction: Transaction; // TODO: Review use of