Rewrite processor key-gen around the eVRF DKG

Still a WIP.
This commit is contained in:
Luke Parker
2024-07-31 22:36:48 -04:00
parent fb7e966b94
commit 12f74e1813
6 changed files with 382 additions and 497 deletions

2
Cargo.lock generated
View File

@@ -8429,12 +8429,14 @@ version = "0.1.0"
dependencies = [
"async-trait",
"bitcoin-serai",
"blake2",
"borsh",
"ciphersuite",
"const-hex",
"dalek-ff-group",
"dkg",
"dockertest",
"ec-divisors",
"env_logger",
"ethereum-serai",
"flexible-transcript",

View File

@@ -491,14 +491,26 @@ where
// We check at least t key shares of people have participated in contributing entropy
// Since the key shares of the participants exceed t, meaning if they're malicious they can
// reconstruct the key regardless, this is safe to the threshold
let mut participating_weight = 0;
for i in valid.keys() {
let evrf_public_key = evrf_public_keys[usize::from(u16::from(*i)) - 1];
participating_weight +=
evrf_public_keys.iter().filter(|key| **key == evrf_public_key).count();
}
if participating_weight < usize::from(t) {
return Ok(VerifyResult::NotEnoughParticipants);
{
let mut participating_weight = 0;
let mut evrf_public_keys = evrf_public_keys.to_vec();
for i in valid.keys() {
let evrf_public_key = evrf_public_keys[usize::from(u16::from(*i)) - 1];
// We remove all keys considered participating from the Vec in order to ensure they aren't
// counted multiple times. That could happen if a participant shares a key with another
// participant. While that's presumably some degree of invalid, we're robust against it
// regardless.
let start_len = evrf_public_keys.len();
evrf_public_keys.retain(|key| *key != evrf_public_key);
let end_len = evrf_public_keys.len();
let count = start_len - end_len;
participating_weight += count;
}
if participating_weight < usize::from(t) {
return Ok(VerifyResult::NotEnoughParticipants);
}
}
// If we now have >= t participations, calculate the group key and verification shares

View File

@@ -36,7 +36,9 @@ serde_json = { version = "1", default-features = false, features = ["std"] }
# Cryptography
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
blake2 = { version = "0.10", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std"] }
ec-divisors = { package = "ec-divisors", path = "../crypto/evrf/divisors", default-features = false }
dkg = { package = "dkg", path = "../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] }
frost = { package = "modular-frost", path = "../crypto/frost", default-features = false, features = ["ristretto"] }
frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false }

View File

@@ -3,7 +3,7 @@ use std::collections::HashMap;
use scale::{Encode, Decode};
use borsh::{BorshSerialize, BorshDeserialize};
use dkg::{Participant, ThresholdParams};
use dkg::Participant;
use serai_primitives::BlockHash;
use in_instructions_primitives::{Batch, SignedBatch};
@@ -19,41 +19,13 @@ pub struct SubstrateContext {
pub mod key_gen {
use super::*;
#[derive(
Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize,
)]
pub struct KeyGenId {
pub session: Session,
pub attempt: u32,
}
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub enum CoordinatorMessage {
// Instructs the Processor to begin the key generation process.
// TODO: Should this be moved under Substrate?
GenerateKey {
id: KeyGenId,
params: ThresholdParams,
shares: u16,
},
// Received commitments for the specified key generation protocol.
Commitments {
id: KeyGenId,
commitments: HashMap<Participant, Vec<u8>>,
},
// Received shares for the specified key generation protocol.
Shares {
id: KeyGenId,
shares: Vec<HashMap<Participant, Vec<u8>>>,
},
/// Instruction to verify a blame accusation.
VerifyBlame {
id: KeyGenId,
accuser: Participant,
accused: Participant,
share: Vec<u8>,
blame: Option<Vec<u8>>,
},
GenerateKey { session: Session, threshold: u16, evrf_public_keys: Vec<([u8; 32], Vec<u8>)> },
// Received participations for the specified key generation protocol.
Participation { session: Session, participant: Participant, participation: Vec<u8> },
}
impl CoordinatorMessage {
@@ -64,40 +36,12 @@ pub mod key_gen {
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub enum ProcessorMessage {
// Created commitments for the specified key generation protocol.
Commitments {
id: KeyGenId,
commitments: Vec<Vec<u8>>,
},
// Participant published invalid commitments.
InvalidCommitments {
id: KeyGenId,
faulty: Participant,
},
// Created shares for the specified key generation protocol.
Shares {
id: KeyGenId,
shares: Vec<HashMap<Participant, Vec<u8>>>,
},
// Participant published an invalid share.
#[rustfmt::skip]
InvalidShare {
id: KeyGenId,
accuser: Participant,
faulty: Participant,
blame: Option<Vec<u8>>,
},
// Participated in the specified key generation protocol.
Participation { session: Session, participation: Vec<u8> },
// Resulting keys from the specified key generation protocol.
GeneratedKeyPair {
id: KeyGenId,
substrate_key: [u8; 32],
network_key: Vec<u8>,
},
GeneratedKeyPair { session: Session, substrate_key: [u8; 32], network_key: Vec<u8> },
// Blame this participant.
Blame {
id: KeyGenId,
participant: Participant,
},
Blame { session: Session, participant: Participant },
}
}
@@ -328,16 +272,19 @@ impl CoordinatorMessage {
pub fn intent(&self) -> Vec<u8> {
match self {
CoordinatorMessage::KeyGen(msg) => {
// Unique since key gen ID embeds the session and attempt
let (sub, id) = match msg {
key_gen::CoordinatorMessage::GenerateKey { id, .. } => (0, id),
key_gen::CoordinatorMessage::Commitments { id, .. } => (1, id),
key_gen::CoordinatorMessage::Shares { id, .. } => (2, id),
key_gen::CoordinatorMessage::VerifyBlame { id, .. } => (3, id),
// Unique since we only have one attempt per session
key_gen::CoordinatorMessage::GenerateKey { session, .. } => {
(0, borsh::to_vec(session).unwrap())
}
// Unique since one participation per participant per session
key_gen::CoordinatorMessage::Participation { session, participant, .. } => {
(1, borsh::to_vec(&(session, participant)).unwrap())
}
};
let mut res = vec![COORDINATOR_UID, TYPE_KEY_GEN_UID, sub];
res.extend(&id.encode());
res.extend(&id);
res
}
CoordinatorMessage::Sign(msg) => {
@@ -400,17 +347,21 @@ impl ProcessorMessage {
match self {
ProcessorMessage::KeyGen(msg) => {
let (sub, id) = match msg {
// Unique since KeyGenId
key_gen::ProcessorMessage::Commitments { id, .. } => (0, id),
key_gen::ProcessorMessage::InvalidCommitments { id, .. } => (1, id),
key_gen::ProcessorMessage::Shares { id, .. } => (2, id),
key_gen::ProcessorMessage::InvalidShare { id, .. } => (3, id),
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => (4, id),
key_gen::ProcessorMessage::Blame { id, .. } => (5, id),
// Unique since we only have one participation per session (due to no re-attempts)
key_gen::ProcessorMessage::Participation { session, .. } => {
(0, borsh::to_vec(session).unwrap())
}
key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } => {
(1, borsh::to_vec(session).unwrap())
}
// Unique since we only blame a participant once (as this is fatal)
key_gen::ProcessorMessage::Blame { session, participant } => {
(2, borsh::to_vec(&(session, participant)).unwrap())
}
};
let mut res = vec![PROCESSOR_UID, TYPE_KEY_GEN_UID, sub];
res.extend(&id.encode());
res.extend(&id);
res
}
ProcessorMessage::Sign(msg) => {

View File

@@ -1,18 +1,17 @@
use std::collections::HashMap;
use std::collections::{HashSet, HashMap};
use zeroize::Zeroizing;
use rand_core::SeedableRng;
use rand_core::{RngCore, SeedableRng, OsRng};
use rand_chacha::ChaCha20Rng;
use blake2::{Digest, Blake2s256};
use transcript::{Transcript, RecommendedTranscript};
use ciphersuite::group::GroupEncoding;
use frost::{
curve::{Ciphersuite, Ristretto},
dkg::{
DkgError, Participant, ThresholdParams, ThresholdCore, ThresholdKeys, encryption::*, pedpop::*,
},
use ciphersuite::{
group::{Group, GroupEncoding},
Ciphersuite, Ristretto,
};
use frost::dkg::{Participant, ThresholdCore, ThresholdKeys, evrf::*};
use log::info;
@@ -29,14 +28,15 @@ pub struct KeyConfirmed<C: Ciphersuite> {
create_db!(
KeyGenDb {
ParamsDb: (session: &Session, attempt: u32) -> (ThresholdParams, u16),
// Not scoped to the set since that'd have latter attempts overwrite former
// A former attempt may become the finalized attempt, even if it doesn't in a timely manner
// Overwriting its commitments would be accordingly poor
CommitmentsDb: (key: &KeyGenId) -> HashMap<Participant, Vec<u8>>,
GeneratedKeysDb: (session: &Session, substrate_key: &[u8; 32], network_key: &[u8]) -> Vec<u8>,
// These do assume a key is only used once across sets, which holds true so long as a single
// participant is honest in their execution of the protocol
ParamsDb: (session: &Session) -> (u16, Vec<[u8; 32]>, Vec<Vec<u8>>),
ParticipationDb: (session: &Session) -> (
HashMap<Participant, Vec<u8>>,
HashMap<Participant, Vec<u8>>,
),
GeneratedKeysDb: (session: &Session) -> Vec<u8>,
// These do assume a key is only used once across sets, which holds true if the threshold is
// honest
// TODO: Remove this assumption
KeysDb: (network_key: &[u8]) -> Vec<u8>,
SessionDb: (network_key: &[u8]) -> Session,
NetworkKeyDb: (session: Session) -> Vec<u8>,
@@ -65,7 +65,7 @@ impl GeneratedKeysDb {
fn save_keys<N: Network>(
txn: &mut impl DbTxn,
id: &KeyGenId,
session: &Session,
substrate_keys: &[ThresholdCore<Ristretto>],
network_keys: &[ThresholdKeys<N::Curve>],
) {
@@ -74,14 +74,7 @@ impl GeneratedKeysDb {
keys.extend(substrate_keys.serialize().as_slice());
keys.extend(network_keys.serialize().as_slice());
}
txn.put(
Self::key(
&id.session,
&substrate_keys[0].group_key().to_bytes(),
network_keys[0].group_key().to_bytes().as_ref(),
),
keys,
);
txn.put(Self::key(&session), keys);
}
}
@@ -91,11 +84,8 @@ impl KeysDb {
session: Session,
key_pair: &KeyPair,
) -> (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>) {
let (keys_vec, keys) = GeneratedKeysDb::read_keys::<N>(
txn,
&GeneratedKeysDb::key(&session, &key_pair.0 .0, key_pair.1.as_ref()),
)
.unwrap();
let (keys_vec, keys) =
GeneratedKeysDb::read_keys::<N>(txn, &GeneratedKeysDb::key(&session)).unwrap();
assert_eq!(key_pair.0 .0, keys.0[0].group_key().to_bytes());
assert_eq!(
{
@@ -130,32 +120,106 @@ impl KeysDb {
}
}
type SecretShareMachines<N> =
Vec<(SecretShareMachine<Ristretto>, SecretShareMachine<<N as Network>::Curve>)>;
type KeyMachines<N> = Vec<(KeyMachine<Ristretto>, KeyMachine<<N as Network>::Curve>)>;
/*
On the Serai blockchain, users specify their public keys on the embedded curves. Substrate does
not have the libraries for the embedded curves and is unable to evaluate if the keys are valid
or not.
We could add the libraries for the embedded curves to the blockchain, yet this would be a
non-trivial scope for what's effectively an embedded context. It'd also permanently bind our
consensus to these arbitrary curves. We would have the benefit of being able to also require PoKs
for the keys, ensuring no one uses someone else's key (creating oddities there). Since someone
who uses someone else's key can't actually participate, all it does in effect is give more key
shares to the holder of the private key, and make us unable to rely on eVRF keys as a secure way
to index validators (hence the usage of `Participant` throughout the messages here).
We could remove invalid keys from the DKG, yet this would create a view of the DKG only the
processor (which does have the embedded curves) has. We'd need to reconcile it with the view of
the DKG which does include all keys (even the invalid keys).
The easiest solution is to keep the views consistent by replacing invalid keys with valid keys
(which no one has the private key for). This keeps the view consistent. This does prevent those
who posted invalid keys from participating, and receiving their keys, which is the understood and
declared effect of them posting invalid keys. Since at least `t` people must honestly participate
for the DKG to complete, and since their honest participation means they had valid keys, we do
ensure at least `t` people participated and the DKG result can be reconstructed.
We do lose fault tolerance, yet only by losing those faulty. Accordingly, this is accepted.
*/
fn coerce_keys<C: EvrfCurve>(
key_bytes: &[impl AsRef<[u8]>],
) -> (Vec<<C::EmbeddedCurve as Ciphersuite>::G>, Vec<Participant>) {
fn evrf_key<C: EvrfCurve>(key: &[u8]) -> Option<<C::EmbeddedCurve as Ciphersuite>::G> {
let mut repr = <<C::EmbeddedCurve as Ciphersuite>::G as GroupEncoding>::Repr::default();
if repr.as_ref().len() != key.len() {
None?;
}
repr.as_mut().copy_from_slice(key);
let point = Option::<<C::EmbeddedCurve as Ciphersuite>::G>::from(<_>::from_bytes(&repr))?;
if bool::from(point.is_identity()) {
None?;
}
Some(point)
}
let mut keys = Vec::with_capacity(key_bytes.len());
let mut faulty = vec![];
for (i, key) in key_bytes.iter().enumerate() {
let i = Participant::new(
1 + u16::try_from(i).expect("performing a key gen with more than u16::MAX participants"),
)
.unwrap();
keys.push(match evrf_key::<C>(key.as_ref()) {
Some(key) => key,
None => {
// Mark this participant faulty
faulty.push(i);
// Generate a random key
let mut rng = ChaCha20Rng::from_seed(Blake2s256::digest(&key).into());
loop {
let mut repr = <<C::EmbeddedCurve as Ciphersuite>::G as GroupEncoding>::Repr::default();
rng.fill_bytes(repr.as_mut());
if let Some(key) =
Option::<<C::EmbeddedCurve as Ciphersuite>::G>::from(<_>::from_bytes(&repr))
{
break key;
}
}
}
});
}
(keys, faulty)
}
#[derive(Debug)]
pub struct KeyGen<N: Network, D: Db> {
db: D,
entropy: Zeroizing<[u8; 32]>,
active_commit: HashMap<Session, (SecretShareMachines<N>, Vec<Vec<u8>>)>,
#[allow(clippy::type_complexity)]
active_share: HashMap<Session, (KeyMachines<N>, Vec<HashMap<Participant, Vec<u8>>>)>,
substrate_evrf_private_key:
Zeroizing<<<Ristretto as EvrfCurve>::EmbeddedCurve as Ciphersuite>::F>,
network_evrf_private_key: Zeroizing<<<N::Curve as EvrfCurve>::EmbeddedCurve as Ciphersuite>::F>,
}
impl<N: Network, D: Db> KeyGen<N, D> {
impl<N: Network, D: Db> KeyGen<N, D>
where
<<N::Curve as EvrfCurve>::EmbeddedCurve as Ciphersuite>::G:
ec_divisors::DivisorCurve<FieldElement = <N::Curve as Ciphersuite>::F>,
{
#[allow(clippy::new_ret_no_self)]
pub fn new(db: D, entropy: Zeroizing<[u8; 32]>) -> KeyGen<N, D> {
KeyGen { db, entropy, active_commit: HashMap::new(), active_share: HashMap::new() }
pub fn new(
db: D,
substrate_evrf_private_key: Zeroizing<
<<Ristretto as EvrfCurve>::EmbeddedCurve as Ciphersuite>::F,
>,
network_evrf_private_key: Zeroizing<<<N::Curve as EvrfCurve>::EmbeddedCurve as Ciphersuite>::F>,
) -> KeyGen<N, D> {
KeyGen { db, substrate_evrf_private_key, network_evrf_private_key }
}
pub fn in_set(&self, session: &Session) -> bool {
// We determine if we're in set using if we have the parameters for a session's key generation
// The usage of 0 for the attempt is valid so long as we aren't malicious and accordingly
// aren't fatally slashed
// TODO: Revisit once we do DKG removals for being offline
ParamsDb::get(&self.db, session, 0).is_some()
ParamsDb::get(&self.db, session).is_some()
}
#[allow(clippy::type_complexity)]
@@ -179,403 +243,256 @@ impl<N: Network, D: Db> KeyGen<N, D> {
&mut self,
txn: &mut D::Transaction<'_>,
msg: CoordinatorMessage,
) -> ProcessorMessage {
const SUBSTRATE_KEY_CONTEXT: &str = "substrate";
const NETWORK_KEY_CONTEXT: &str = "network";
let context = |id: &KeyGenId, key| {
) -> Vec<ProcessorMessage> {
const SUBSTRATE_KEY_CONTEXT: &[u8] = b"substrate";
const NETWORK_KEY_CONTEXT: &[u8] = b"network";
let context = |session: Session, key| {
// TODO2: Also embed the chain ID/genesis block
let mut transcript = RecommendedTranscript::new(b"Serai Key Gen");
transcript.append_message(b"session", id.session.0.to_le_bytes());
let mut transcript = RecommendedTranscript::new(b"Serai eVRF Key Gen");
transcript.append_message(b"network", N::ID);
transcript.append_message(b"attempt", id.attempt.to_le_bytes());
transcript.append_message(b"session", session.0.to_le_bytes());
transcript.append_message(b"key", key);
<[u8; 32]>::try_from(&(&transcript.challenge(b"context"))[.. 32]).unwrap()
};
let rng = |label, id: KeyGenId| {
let mut transcript = RecommendedTranscript::new(label);
transcript.append_message(b"entropy", &self.entropy);
transcript.append_message(b"context", context(&id, "rng"));
ChaCha20Rng::from_seed(transcript.rng_seed(b"rng"))
};
let coefficients_rng = |id| rng(b"Key Gen Coefficients", id);
let secret_shares_rng = |id| rng(b"Key Gen Secret Shares", id);
let share_rng = |id| rng(b"Key Gen Share", id);
let key_gen_machines = |id, params: ThresholdParams, shares| {
let mut rng = coefficients_rng(id);
let mut machines = vec![];
let mut commitments = vec![];
for s in 0 .. shares {
let params = ThresholdParams::new(
params.t(),
params.n(),
Participant::new(u16::from(params.i()) + s).unwrap(),
)
.unwrap();
let substrate = KeyGenMachine::new(params, context(&id, SUBSTRATE_KEY_CONTEXT))
.generate_coefficients(&mut rng);
let network = KeyGenMachine::new(params, context(&id, NETWORK_KEY_CONTEXT))
.generate_coefficients(&mut rng);
machines.push((substrate.0, network.0));
let mut serialized = vec![];
substrate.1.write(&mut serialized).unwrap();
network.1.write(&mut serialized).unwrap();
commitments.push(serialized);
}
(machines, commitments)
};
let secret_share_machines = |id,
params: ThresholdParams,
machines: SecretShareMachines<N>,
commitments: HashMap<Participant, Vec<u8>>|
-> Result<_, ProcessorMessage> {
let mut rng = secret_shares_rng(id);
#[allow(clippy::type_complexity)]
fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng,
id: KeyGenId,
machine: SecretShareMachine<C>,
commitments: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> Result<
(KeyMachine<C>, HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>),
ProcessorMessage,
> {
match machine.generate_secret_shares(rng, commitments) {
Ok(res) => Ok(res),
Err(e) => match e {
DkgError::ZeroParameter(_, _) |
DkgError::InvalidThreshold(_, _) |
DkgError::InvalidParticipant(_, _) |
DkgError::InvalidSigningSet |
DkgError::InvalidShare { .. } => unreachable!("{e:?}"),
DkgError::InvalidParticipantQuantity(_, _) |
DkgError::DuplicatedParticipant(_) |
DkgError::MissingParticipant(_) => {
panic!("coordinator sent invalid DKG commitments: {e:?}")
}
DkgError::InvalidCommitments(i) => {
Err(ProcessorMessage::InvalidCommitments { id, faulty: i })?
}
},
}
}
let mut substrate_commitments = HashMap::new();
let mut network_commitments = HashMap::new();
for i in 1 ..= params.n() {
let i = Participant::new(i).unwrap();
let mut commitments = commitments[&i].as_slice();
substrate_commitments.insert(
i,
EncryptionKeyMessage::<Ristretto, Commitments<Ristretto>>::read(&mut commitments, params)
.map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?,
);
network_commitments.insert(
i,
EncryptionKeyMessage::<N::Curve, Commitments<N::Curve>>::read(&mut commitments, params)
.map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?,
);
if !commitments.is_empty() {
// Malicious Participant included extra bytes in their commitments
// (a potential DoS attack)
Err(ProcessorMessage::InvalidCommitments { id, faulty: i })?;
}
}
let mut key_machines = vec![];
let mut shares = vec![];
for (m, (substrate_machine, network_machine)) in machines.into_iter().enumerate() {
let actual_i = Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap();
let mut substrate_commitments = substrate_commitments.clone();
substrate_commitments.remove(&actual_i);
let (substrate_machine, mut substrate_shares) =
handle_machine::<Ristretto>(&mut rng, id, substrate_machine, substrate_commitments)?;
let mut network_commitments = network_commitments.clone();
network_commitments.remove(&actual_i);
let (network_machine, network_shares) =
handle_machine(&mut rng, id, network_machine, network_commitments.clone())?;
key_machines.push((substrate_machine, network_machine));
let mut these_shares: HashMap<_, _> =
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
for (i, share) in &mut these_shares {
share.extend(network_shares[i].serialize());
}
shares.push(these_shares);
}
Ok((key_machines, shares))
};
match msg {
CoordinatorMessage::GenerateKey { id, params, shares } => {
info!("Generating new key. ID: {id:?} Params: {params:?} Shares: {shares}");
CoordinatorMessage::GenerateKey { session, threshold, evrf_public_keys } => {
info!("Generating new key. Session: {session:?}");
// Remove old attempts
if self.active_commit.remove(&id.session).is_none() &&
self.active_share.remove(&id.session).is_none()
let substrate_evrf_public_keys =
evrf_public_keys.iter().map(|(key, _)| *key).collect::<Vec<_>>();
let network_evrf_public_keys =
evrf_public_keys.into_iter().map(|(_, key)| key).collect::<Vec<_>>();
// Save the params
ParamsDb::set(
txn,
&session,
&(threshold, substrate_evrf_public_keys, network_evrf_public_keys),
);
let mut participation = Vec::with_capacity(2048);
let mut faulty = HashSet::new();
{
// If we haven't handled this session before, save the params
ParamsDb::set(txn, &id.session, id.attempt, &(params, shares));
}
let (machines, commitments) = key_gen_machines(id, params, shares);
self.active_commit.insert(id.session, (machines, commitments.clone()));
ProcessorMessage::Commitments { id, commitments }
}
CoordinatorMessage::Commitments { id, mut commitments } => {
info!("Received commitments for {:?}", id);
if self.active_share.contains_key(&id.session) {
// We should've been told of a new attempt before receiving commitments again
// The coordinator is either missing messages or repeating itself
// Either way, it's faulty
panic!("commitments when already handled commitments");
}
let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap();
// Unwrap the machines, rebuilding them if we didn't have them in our cache
// We won't if the processor rebooted
// This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for
// attempt y
// The coordinator is trusted to be proper in this regard
let (prior, our_commitments) = self
.active_commit
.remove(&id.session)
.unwrap_or_else(|| key_gen_machines(id, params, share_quantity));
for (i, our_commitments) in our_commitments.into_iter().enumerate() {
assert!(commitments
.insert(
Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),
our_commitments,
)
.is_none());
}
CommitmentsDb::set(txn, &id, &commitments);
match secret_share_machines(id, params, prior, commitments) {
Ok((machines, shares)) => {
self.active_share.insert(id.session, (machines, shares.clone()));
ProcessorMessage::Shares { id, shares }
let (coerced_keys, faulty_is) = coerce_keys::<Ristretto>(&substrate_evrf_public_keys);
for faulty_i in faulty_is {
faulty.insert(faulty_i);
}
Err(e) => e,
}
}
CoordinatorMessage::Shares { id, shares } => {
info!("Received shares for {:?}", id);
let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap();
// Same commentary on inconsistency as above exists
let (machines, our_shares) = self.active_share.remove(&id.session).unwrap_or_else(|| {
let prior = key_gen_machines(id, params, share_quantity).0;
let (machines, shares) =
secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap())
.expect("got Shares for a key gen which faulted");
(machines, shares)
});
let mut rng = share_rng(id);
fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng,
id: KeyGenId,
// These are the params of our first share, not this machine's shares
params: ThresholdParams,
m: usize,
machine: KeyMachine<C>,
shares_ref: &mut HashMap<Participant, &[u8]>,
) -> Result<ThresholdCore<C>, ProcessorMessage> {
let params = ThresholdParams::new(
params.t(),
params.n(),
Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap(),
let participation = EvrfDkg::<Ristretto>::participate(
&mut OsRng,
todo!("TODO"),
context(session, SUBSTRATE_KEY_CONTEXT),
threshold,
&coerced_keys,
&self.substrate_evrf_private_key,
)
.unwrap()
.write(&mut participation)
.unwrap();
// Parse the shares
let mut shares = HashMap::new();
for i in 1 ..= params.n() {
let i = Participant::new(i).unwrap();
let Some(share) = shares_ref.get_mut(&i) else { continue };
shares.insert(
i,
EncryptedMessage::<C, SecretShare<C::F>>::read(share, params).map_err(|_| {
ProcessorMessage::InvalidShare { id, accuser: params.i(), faulty: i, blame: None }
})?,
);
}
{
let (coerced_keys, faulty_is) = coerce_keys::<N::Curve>(&network_evrf_public_keys);
for faulty_i in faulty_is {
faulty.insert(faulty_i);
}
Ok(
(match machine.calculate_share(rng, shares) {
Ok(res) => res,
Err(e) => match e {
DkgError::ZeroParameter(_, _) |
DkgError::InvalidThreshold(_, _) |
DkgError::InvalidParticipant(_, _) |
DkgError::InvalidSigningSet |
DkgError::InvalidCommitments(_) => unreachable!("{e:?}"),
DkgError::InvalidParticipantQuantity(_, _) |
DkgError::DuplicatedParticipant(_) |
DkgError::MissingParticipant(_) => {
panic!("coordinator sent invalid DKG shares: {e:?}")
}
DkgError::InvalidShare { participant, blame } => {
Err(ProcessorMessage::InvalidShare {
id,
accuser: params.i(),
faulty: participant,
blame: Some(blame.map(|blame| blame.serialize())).flatten(),
})?
}
},
})
.complete(),
EvrfDkg::<N::Curve>::participate(
&mut OsRng,
todo!("TODO"),
context(session, NETWORK_KEY_CONTEXT),
threshold,
&coerced_keys,
&self.network_evrf_private_key,
)
.unwrap()
.write(&mut participation)
.unwrap();
}
let mut substrate_keys = vec![];
let mut network_keys = vec![];
for (m, machines) in machines.into_iter().enumerate() {
let mut shares_ref: HashMap<Participant, &[u8]> =
shares[m].iter().map(|(i, shares)| (*i, shares.as_ref())).collect();
for (i, our_shares) in our_shares.iter().enumerate() {
if m != i {
assert!(shares_ref
.insert(
Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),
our_shares
[&Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap()]
.as_ref(),
// Send back our Participation and all faulty parties
let mut faulty = faulty.into_iter().collect::<Vec<_>>();
faulty.sort();
let mut res = Vec::with_capacity(1 + faulty.len());
res.push(ProcessorMessage::Participation { session, participation });
for faulty in faulty {
res.push(ProcessorMessage::Blame { session, participant: faulty });
}
res
}
CoordinatorMessage::Participation { session, participant, participation } => {
info!("Received participation from {:?}", participant);
// TODO: Read Pariticpations, declare faulty if necessary, then re-serialize
let substrate_participation: Vec<u8> = todo!("TODO");
let network_participation: Vec<u8> = todo!("TODO");
let (threshold, substrate_evrf_public_keys, network_evrf_public_keys) =
ParamsDb::get(txn, &session).unwrap();
let (mut substrate_participations, mut network_participations) =
ParticipationDb::get(txn, &session)
.unwrap_or((HashMap::with_capacity(1), HashMap::with_capacity(1)));
assert!(
substrate_participations.insert(participant, substrate_participation).is_none(),
"received participation for someone multiple times"
);
assert!(
network_participations.insert(participant, network_participation).is_none(),
"received participation for someone multiple times"
);
ParticipationDb::set(
txn,
&session,
&(substrate_participations.clone(), network_participations.clone()),
);
// This block is taken from the eVRF DKG itself to evaluate the amount participating
{
let mut participating_weight = 0;
// This uses the Substrate maps as the maps are kept in synchrony
let mut evrf_public_keys = substrate_evrf_public_keys.clone();
for i in substrate_participations.keys() {
let evrf_public_key = evrf_public_keys[usize::from(u16::from(*i)) - 1];
// Removes from Vec to prevent double-counting
let start_len = evrf_public_keys.len();
evrf_public_keys.retain(|key| *key != evrf_public_key);
let end_len = evrf_public_keys.len();
let count = start_len - end_len;
participating_weight += count;
}
if participating_weight < usize::from(threshold) {
return vec![];
}
}
let mut res = Vec::with_capacity(1);
let substrate_dkg = match EvrfDkg::<Ristretto>::verify(
&mut OsRng,
&todo!("TODO"),
context(session, SUBSTRATE_KEY_CONTEXT),
threshold,
// Ignores the list of participants who couldn't have their keys coerced due to prior
// handling those
&coerce_keys::<Ristretto>(&substrate_evrf_public_keys).0,
&substrate_participations
.iter()
.map(|(key, participation)| {
(
*key,
Participation::read(
&mut participation.as_slice(),
substrate_evrf_public_keys
.len()
.try_into()
.expect("performing a key gen with more than u16::MAX participants"),
)
.is_none());
.expect("prior read participation was invalid"),
)
})
.collect(),
)
.unwrap()
{
VerifyResult::Valid(dkg) => dkg,
VerifyResult::Invalid(faulty) => {
for participant in faulty {
// Remove from both maps for simplicity's sake
// There's no point in having one DKG complete yet not the other
assert!(substrate_participations.remove(&participant).is_some());
assert!(network_participations.remove(&participant).is_some());
res.push(ProcessorMessage::Blame { session, participant });
}
ParticipationDb::set(
txn,
&session,
&(substrate_participations.clone(), network_participations.clone()),
);
return res;
}
let these_substrate_keys =
match handle_machine(&mut rng, id, params, m, machines.0, &mut shares_ref) {
Ok(keys) => keys,
Err(msg) => return msg,
};
let these_network_keys =
match handle_machine(&mut rng, id, params, m, machines.1, &mut shares_ref) {
Ok(keys) => keys,
Err(msg) => return msg,
};
for i in 1 ..= params.n() {
let i = Participant::new(i).unwrap();
let Some(shares) = shares_ref.get(&i) else { continue };
if !shares.is_empty() {
return ProcessorMessage::InvalidShare {
id,
accuser: these_substrate_keys.params().i(),
faulty: i,
blame: None,
};
VerifyResult::NotEnoughParticipants => {
panic!("not enough participants despite checking we were at the threshold")
}
};
let network_dkg = match EvrfDkg::<N::Curve>::verify(
&mut OsRng,
&todo!("TODO"),
context(session, NETWORK_KEY_CONTEXT),
threshold,
// Ignores the list of participants who couldn't have their keys coerced due to prior
// handling those
&coerce_keys::<N::Curve>(&network_evrf_public_keys).0,
&network_participations
.iter()
.map(|(key, participation)| {
(
*key,
Participation::read(
&mut participation.as_slice(),
network_evrf_public_keys
.len()
.try_into()
.expect("performing a key gen with more than u16::MAX participants"),
)
.expect("prior read participation was invalid"),
)
})
.collect(),
)
.unwrap()
{
VerifyResult::Valid(dkg) => dkg,
VerifyResult::Invalid(faulty) => {
for participant in faulty {
assert!(substrate_participations.remove(&participant).is_some());
assert!(network_participations.remove(&participant).is_some());
res.push(ProcessorMessage::Blame { session, participant });
}
ParticipationDb::set(
txn,
&session,
&(substrate_participations.clone(), network_participations.clone()),
);
return res;
}
VerifyResult::NotEnoughParticipants => {
// We may have lost the required amount of participants when doing the Substrate DKG
return res;
}
};
/*
let mut these_network_keys = ThresholdKeys::new(these_network_keys);
N::tweak_keys(&mut these_network_keys);
substrate_keys.push(these_substrate_keys);
network_keys.push(these_network_keys);
}
let mut generated_substrate_key = None;
let mut generated_network_key = None;
for keys in substrate_keys.iter().zip(&network_keys) {
if generated_substrate_key.is_none() {
generated_substrate_key = Some(keys.0.group_key());
generated_network_key = Some(keys.1.group_key());
} else {
assert_eq!(generated_substrate_key, Some(keys.0.group_key()));
assert_eq!(generated_network_key, Some(keys.1.group_key()));
let mut generated_substrate_key = None;
let mut generated_network_key = None;
for keys in substrate_keys.iter().zip(&network_keys) {
if generated_substrate_key.is_none() {
generated_substrate_key = Some(keys.0.group_key());
generated_network_key = Some(keys.1.group_key());
} else {
assert_eq!(generated_substrate_key, Some(keys.0.group_key()));
assert_eq!(generated_network_key, Some(keys.1.group_key()));
}
}
}
GeneratedKeysDb::save_keys::<N>(txn, &id, &substrate_keys, &network_keys);
GeneratedKeysDb::save_keys::<N>(txn, &id, &substrate_keys, &network_keys);
ProcessorMessage::GeneratedKeyPair {
id,
substrate_key: generated_substrate_key.unwrap().to_bytes(),
// TODO: This can be made more efficient since tweaked keys may be a subset of keys
network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(),
}
}
ProcessorMessage::GeneratedKeyPair {
id,
substrate_key: generated_substrate_key.unwrap().to_bytes(),
// TODO: This can be made more efficient since tweaked keys may be a subset of keys
network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(),
}
*/
CoordinatorMessage::VerifyBlame { id, accuser, accused, share, blame } => {
let params = ParamsDb::get(txn, &id.session, id.attempt).unwrap().0;
let mut share_ref = share.as_slice();
let Ok(substrate_share) = EncryptedMessage::<
Ristretto,
SecretShare<<Ristretto as Ciphersuite>::F>,
>::read(&mut share_ref, params) else {
return ProcessorMessage::Blame { id, participant: accused };
};
let Ok(network_share) = EncryptedMessage::<
N::Curve,
SecretShare<<N::Curve as Ciphersuite>::F>,
>::read(&mut share_ref, params) else {
return ProcessorMessage::Blame { id, participant: accused };
};
if !share_ref.is_empty() {
return ProcessorMessage::Blame { id, participant: accused };
}
let mut substrate_commitment_msgs = HashMap::new();
let mut network_commitment_msgs = HashMap::new();
let commitments = CommitmentsDb::get(txn, &id).unwrap();
for (i, commitments) in commitments {
let mut commitments = commitments.as_slice();
substrate_commitment_msgs
.insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap());
network_commitment_msgs
.insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap());
}
// There is a mild DoS here where someone with a valid blame bloats it to the maximum size
// Given the ambiguity, and limited potential to DoS (this being called means *someone* is
// getting fatally slashed) voids the need to ensure blame is minimal
let substrate_blame =
blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok());
let network_blame =
blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok());
let substrate_blame = AdditionalBlameMachine::new(
context(&id, SUBSTRATE_KEY_CONTEXT),
params.n(),
substrate_commitment_msgs,
)
.unwrap()
.blame(accuser, accused, substrate_share, substrate_blame);
let network_blame = AdditionalBlameMachine::new(
context(&id, NETWORK_KEY_CONTEXT),
params.n(),
network_commitment_msgs,
)
.unwrap()
.blame(accuser, accused, network_share, network_blame);
// If the accused was blamed for either, mark them as at fault
if (substrate_blame == accused) || (network_blame == accused) {
return ProcessorMessage::Blame { id, participant: accused };
}
ProcessorMessage::Blame { id, participant: accuser }
todo!("TODO")
}
}
}

View File

@@ -5,6 +5,7 @@ use async_trait::async_trait;
use thiserror::Error;
use frost::{
dkg::evrf::EvrfCurve,
curve::{Ciphersuite, Curve},
ThresholdKeys,
sign::PreprocessMachine,
@@ -242,7 +243,7 @@ pub struct PreparedSend<N: Network> {
#[async_trait]
pub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug {
/// The elliptic curve used for this network.
type Curve: Curve;
type Curve: Curve + EvrfCurve;
/// The type representing the transaction for this network.
type Transaction: Transaction<Self>; // TODO: Review use of