5 Commits

Author SHA1 Message Date
Luke Parker
2ae2883106 Update spec to the new DKG 2024-08-05 06:58:44 -04:00
Luke Parker
e74c8f38d5 Get coordinator tests to pass 2024-08-05 06:50:26 -04:00
Luke Parker
9e8e134ef7 Replace Interpolation::None with Interpolation::Constant
Allows the MuSig DKG to keep the secret share as the original private key,
enabling deriving FROST nonces consistently regardless of the MuSig context.
2024-08-05 06:32:37 -04:00
Luke Parker
f08faeadff Have the DKG explicitly declare how to interpolate its shares
Removes the hack for MuSig where we multiply keys by the inverse of their
lagrange interpolation factor.
2024-08-05 06:06:56 -04:00
Luke Parker
1b7613329c Add sensible Debug to key_gen::[Processor, Coordinator]Message 2024-08-05 04:04:02 -04:00
21 changed files with 326 additions and 449 deletions

View File

@@ -7,12 +7,8 @@ use zeroize::Zeroizing;
use rand_core::{RngCore, CryptoRng, OsRng};
use futures_util::{task::Poll, poll};
use ciphersuite::{
group::{ff::Field, GroupEncoding},
Ciphersuite, Ristretto,
};
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use sp_application_crypto::sr25519;
use borsh::BorshDeserialize;
use serai_client::{
primitives::NetworkId,
@@ -52,12 +48,22 @@ pub fn new_spec<R: RngCore + CryptoRng>(
let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin };
let set_participants = keys
let validators = keys
.iter()
.map(|key| (sr25519::Public((<Ristretto as Ciphersuite>::generator() * **key).to_bytes()), 1))
.map(|key| ((<Ristretto as Ciphersuite>::generator() * **key), 1))
.collect::<Vec<_>>();
let res = TributarySpec::new(serai_block, start_time, set, set_participants);
// Generate random eVRF keys as none of these test rely on them to have any structure
let mut evrf_keys = vec![];
for _ in 0 .. keys.len() {
let mut substrate = [0; 32];
OsRng.fill_bytes(&mut substrate);
let mut network = vec![0; 64];
OsRng.fill_bytes(&mut network);
evrf_keys.push((substrate, network));
}
let res = TributarySpec::new(serai_block, start_time, set, validators, evrf_keys);
assert_eq!(
TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(),
res,

View File

@@ -1,5 +1,4 @@
use core::time::Duration;
use std::collections::HashMap;
use zeroize::Zeroizing;
use rand_core::{RngCore, OsRng};
@@ -9,7 +8,7 @@ use frost::Participant;
use sp_runtime::traits::Verify;
use serai_client::{
primitives::{SeraiAddress, Signature},
primitives::Signature,
validator_sets::primitives::{ValidatorSet, KeyPair},
};
@@ -17,10 +16,7 @@ use tokio::time::sleep;
use serai_db::{Get, DbTxn, Db, MemDb};
use processor_messages::{
key_gen::{self, KeyGenId},
CoordinatorMessage,
};
use processor_messages::{key_gen, CoordinatorMessage};
use tributary::{TransactionTrait, Tributary};
@@ -54,44 +50,41 @@ async fn dkg_test() {
tokio::spawn(run_tributaries(tributaries.clone()));
let mut txs = vec![];
// Create DKG commitments for each key
// Create DKG participation for each key
for key in &keys {
let attempt = 0;
let mut commitments = vec![0; 256];
OsRng.fill_bytes(&mut commitments);
let mut participation = vec![0; 4096];
OsRng.fill_bytes(&mut participation);
let mut tx = Transaction::DkgCommitments {
attempt,
commitments: vec![commitments],
signed: Transaction::empty_signed(),
};
let mut tx =
Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() };
tx.sign(&mut OsRng, spec.genesis(), key);
txs.push(tx);
}
let block_before_tx = tributaries[0].1.tip().await;
// Publish all commitments but one
for (i, tx) in txs.iter().enumerate().skip(1) {
// Publish t-1 participations
let t = ((keys.len() * 2) / 3) + 1;
for (i, tx) in txs.iter().take(t - 1).enumerate() {
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
}
// Wait until these are included
for tx in txs.iter().skip(1) {
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
}
let expected_commitments: HashMap<_, _> = txs
let expected_participations = txs
.iter()
.enumerate()
.map(|(i, tx)| {
if let Transaction::DkgCommitments { commitments, .. } = tx {
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())
if let Transaction::DkgParticipation { participation, .. } = tx {
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Participation {
session: spec.set().session,
participant: Participant::new((i + 1).try_into().unwrap()).unwrap(),
participation: participation.clone(),
})
} else {
panic!("txs had non-commitments");
panic!("txs wasn't a DkgParticipation");
}
})
.collect();
.collect::<Vec<_>>();
async fn new_processors(
db: &mut MemDb,
@@ -120,28 +113,30 @@ async fn dkg_test() {
processors
}
// Instantiate a scanner and verify it has nothing to report
// Instantiate a scanner and verify it has the first two participations to report (and isn't
// waiting for `t`)
let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await;
assert!(processors.0.read().await.is_empty());
assert_eq!(processors.0.read().await.get(&spec.set().network).unwrap().len(), t - 1);
// Publish the last commitment
// Publish the rest of the participations
let block_before_tx = tributaries[0].1.tip().await;
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
for tx in txs.iter().skip(t - 1) {
assert_eq!(tributaries[0].1.add_transaction(tx.clone()).await, Ok(true));
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
}
// Verify the scanner emits a KeyGen::Commitments message
// Verify the scanner emits all KeyGen::Participations messages
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
&mut dbs[0],
&keys[0],
&|_, _, _, _| async {
panic!("provided TX caused recognized_id to be called after Commitments")
panic!("provided TX caused recognized_id to be called after DkgParticipation")
},
&processors,
&(),
&|_| async {
panic!(
"test tried to publish a new Tributary TX from handle_application_tx after Commitments"
"test tried to publish a new Tributary TX from handle_application_tx after DkgParticipation"
)
},
&spec,
@@ -150,17 +145,11 @@ async fn dkg_test() {
.await;
{
let mut msgs = processors.0.write().await;
assert_eq!(msgs.len(), 1);
let msgs = msgs.get_mut(&spec.set().network).unwrap();
let mut expected_commitments = expected_commitments.clone();
expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap());
assert_eq!(
msgs.pop_front().unwrap(),
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
assert_eq!(msgs.len(), keys.len());
for expected in &expected_participations {
assert_eq!(&msgs.pop_front().unwrap(), expected);
}
assert!(msgs.is_empty());
}
@@ -168,149 +157,14 @@ async fn dkg_test() {
for (i, key) in keys.iter().enumerate().skip(1) {
let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await;
let mut msgs = processors.0.write().await;
assert_eq!(msgs.len(), 1);
let msgs = msgs.get_mut(&spec.set().network).unwrap();
let mut expected_commitments = expected_commitments.clone();
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
assert_eq!(
msgs.pop_front().unwrap(),
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
assert_eq!(msgs.len(), keys.len());
for expected in &expected_participations {
assert_eq!(&msgs.pop_front().unwrap(), expected);
}
assert!(msgs.is_empty());
}
// Now do shares
let mut txs = vec![];
for (k, key) in keys.iter().enumerate() {
let attempt = 0;
let mut shares = vec![vec![]];
for i in 0 .. keys.len() {
if i != k {
let mut share = vec![0; 256];
OsRng.fill_bytes(&mut share);
shares.last_mut().unwrap().push(share);
}
}
let mut txn = dbs[k].txn();
let mut tx = Transaction::DkgShares {
attempt,
shares,
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
signed: Transaction::empty_signed(),
};
txn.commit();
tx.sign(&mut OsRng, spec.genesis(), key);
txs.push(tx);
}
let block_before_tx = tributaries[0].1.tip().await;
for (i, tx) in txs.iter().enumerate().skip(1) {
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
}
for tx in txs.iter().skip(1) {
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
}
// With just 4 sets of shares, nothing should happen yet
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
&mut dbs[0],
&keys[0],
&|_, _, _, _| async {
panic!("provided TX caused recognized_id to be called after some shares")
},
&processors,
&(),
&|_| async {
panic!(
"test tried to publish a new Tributary TX from handle_application_tx after some shares"
)
},
&spec,
&tributaries[0].1.reader(),
)
.await;
assert_eq!(processors.0.read().await.len(), 1);
assert!(processors.0.read().await[&spec.set().network].is_empty());
// Publish the final set of shares
let block_before_tx = tributaries[0].1.tip().await;
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
// Each scanner should emit a distinct shares message
let shares_for = |i: usize| {
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
id: KeyGenId { session: spec.set().session, attempt: 0 },
shares: vec![txs
.iter()
.enumerate()
.filter_map(|(l, tx)| {
if let Transaction::DkgShares { shares, .. } = tx {
if i == l {
None
} else {
let relative_i = i - (if i > l { 1 } else { 0 });
Some((
Participant::new((l + 1).try_into().unwrap()).unwrap(),
shares[0][relative_i].clone(),
))
}
} else {
panic!("txs had non-shares");
}
})
.collect::<HashMap<_, _>>()],
})
};
// Any scanner which has handled the prior blocks should only emit the new event
for (i, key) in keys.iter().enumerate() {
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
&mut dbs[i],
key,
&|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
&processors,
&(),
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
&spec,
&tributaries[i].1.reader(),
)
.await;
{
let mut msgs = processors.0.write().await;
assert_eq!(msgs.len(), 1);
let msgs = msgs.get_mut(&spec.set().network).unwrap();
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
assert!(msgs.is_empty());
}
}
// Yet new scanners should emit all events
for (i, key) in keys.iter().enumerate() {
let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await;
let mut msgs = processors.0.write().await;
assert_eq!(msgs.len(), 1);
let msgs = msgs.get_mut(&spec.set().network).unwrap();
let mut expected_commitments = expected_commitments.clone();
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
assert_eq!(
msgs.pop_front().unwrap(),
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
assert!(msgs.is_empty());
}
// Send DkgConfirmationShare
let mut substrate_key = [0; 32];
OsRng.fill_bytes(&mut substrate_key);
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
@@ -319,17 +173,19 @@ async fn dkg_test() {
let mut txs = vec![];
for (i, key) in keys.iter().enumerate() {
let attempt = 0;
let mut txn = dbs[i].txn();
let share =
crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();
txn.commit();
let mut tx = Transaction::DkgConfirmationShare {
// Claim we've generated the key pair
crate::tributary::generated_key_pair::<MemDb>(&mut txn, spec.genesis(), &key_pair);
// Publish the nonces
let attempt = 0;
let mut tx = Transaction::DkgConfirmationNonces {
attempt,
confirmation_share: share,
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
signed: Transaction::empty_signed(),
};
txn.commit();
tx.sign(&mut OsRng, spec.genesis(), key);
txs.push(tx);
}
@@ -341,6 +197,35 @@ async fn dkg_test() {
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
}
// This should not cause any new processor event as the processor doesn't handle DKG confirming
for (i, key) in keys.iter().enumerate() {
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
&mut dbs[i],
key,
&|_, _, _, _| async {
panic!("provided TX caused recognized_id to be called after DkgConfirmationNonces")
},
&processors,
&(),
// The Tributary handler should publish ConfirmationShare itself after ConfirmationNonces
&|tx| async { assert_eq!(tributaries[i].1.add_transaction(tx).await, Ok(true)) },
&spec,
&tributaries[i].1.reader(),
)
.await;
{
assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty());
}
}
// Yet once these TXs are on-chain, the tributary should itself publish the confirmation shares
// This means in the block after the next block, the keys should be set onto Serai
// Sleep twice as long as two blocks, in case there's some stability issue
sleep(Duration::from_secs(
2 * 2 * u64::from(Tributary::<MemDb, Transaction, LocalP2p>::block_time()),
))
.await;
struct CheckPublishSetKeys {
spec: TributarySpec,
key_pair: KeyPair,
@@ -351,19 +236,24 @@ async fn dkg_test() {
&self,
_db: &(impl Sync + Get),
set: ValidatorSet,
removed: Vec<SeraiAddress>,
key_pair: KeyPair,
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
signature: Signature,
) {
assert_eq!(set, self.spec.set());
assert!(removed.is_empty());
assert_eq!(self.key_pair, key_pair);
assert!(signature.verify(
&*serai_client::validator_sets::primitives::set_keys_message(&set, &key_pair),
&serai_client::Public(
frost::dkg::musig::musig_key::<Ristretto>(
&serai_client::validator_sets::primitives::musig_context(set),
&self.spec.validators().into_iter().map(|(validator, _)| validator).collect::<Vec<_>>()
&self
.spec
.validators()
.into_iter()
.zip(signature_participants)
.filter_map(|((validator, _), included)| included.then_some(validator))
.collect::<Vec<_>>()
)
.unwrap()
.to_bytes()

View File

@@ -6,7 +6,7 @@ use ciphersuite::{group::Group, Ciphersuite, Ristretto};
use scale::{Encode, Decode};
use serai_client::{
primitives::{SeraiAddress, Signature},
primitives::Signature,
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair},
};
use processor_messages::coordinator::SubstrateSignableId;
@@ -32,8 +32,8 @@ impl PublishSeraiTransaction for () {
&self,
_db: &(impl Sync + serai_db::Get),
_set: ValidatorSet,
_removed: Vec<SeraiAddress>,
_key_pair: KeyPair,
_signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
_signature: Signature,
) {
panic!("publish_set_keys was called in test")
@@ -148,70 +148,20 @@ fn serialize_transaction() {
signed: random_signed_with_nonce(&mut OsRng, 0),
});
{
let mut commitments = vec![random_vec(&mut OsRng, 512)];
for _ in 0 .. (OsRng.next_u64() % 100) {
let mut temp = commitments[0].clone();
OsRng.fill_bytes(&mut temp);
commitments.push(temp);
}
test_read_write(&Transaction::DkgCommitments {
attempt: random_u32(&mut OsRng),
commitments,
test_read_write(&Transaction::DkgParticipation {
participation: random_vec(&mut OsRng, 4096),
signed: random_signed_with_nonce(&mut OsRng, 0),
});
}
{
// This supports a variable share length, and variable amount of sent shares, yet share length
// and sent shares is expected to be constant among recipients
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap();
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap();
// Create a valid vec of shares
let mut shares = vec![];
// Create up to 150 participants
for _ in 0 ..= (OsRng.next_u64() % 150) {
// Give each sender multiple shares
let mut sender_shares = vec![];
for _ in 0 .. amount_of_shares {
let mut share = vec![0; share_len];
OsRng.fill_bytes(&mut share);
sender_shares.push(share);
}
shares.push(sender_shares);
}
test_read_write(&Transaction::DkgShares {
test_read_write(&Transaction::DkgConfirmationNonces {
attempt: random_u32(&mut OsRng),
shares,
confirmation_nonces: {
let mut nonces = [0; 64];
OsRng.fill_bytes(&mut nonces);
nonces
},
signed: random_signed_with_nonce(&mut OsRng, 1),
signed: random_signed_with_nonce(&mut OsRng, 0),
});
}
for i in 0 .. 2 {
test_read_write(&Transaction::InvalidDkgShare {
attempt: random_u32(&mut OsRng),
accuser: frost::Participant::new(
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
)
.unwrap(),
faulty: frost::Participant::new(
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
)
.unwrap(),
blame: if i == 0 {
None
} else {
Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty())
},
signed: random_signed_with_nonce(&mut OsRng, 2),
});
}
test_read_write(&Transaction::DkgConfirmationShare {
attempt: random_u32(&mut OsRng),
@@ -220,7 +170,7 @@ fn serialize_transaction() {
OsRng.fill_bytes(&mut share);
share
},
signed: random_signed_with_nonce(&mut OsRng, 2),
signed: random_signed_with_nonce(&mut OsRng, 1),
});
{

View File

@@ -37,15 +37,14 @@ async fn tx_test() {
usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap();
let key = keys[sender].clone();
let attempt = 0;
let mut commitments = vec![0; 256];
OsRng.fill_bytes(&mut commitments);
// Create the TX with a null signature so we can get its sig hash
let block_before_tx = tributaries[sender].1.tip().await;
let mut tx = Transaction::DkgCommitments {
attempt,
commitments: vec![commitments.clone()],
// Create the TX with a null signature so we can get its sig hash
let mut tx = Transaction::DkgParticipation {
participation: {
let mut participation = vec![0; 4096];
OsRng.fill_bytes(&mut participation);
participation
},
signed: Transaction::empty_signed(),
};
tx.sign(&mut OsRng, spec.genesis(), &key);

View File

@@ -324,13 +324,13 @@ impl<
);
// Determine the bitstring representing who participated before we move `shares`
// This reserves too much capacity if the participating validators have multiple key
// shares, yet that's fine
let validators = self.spec.validators();
let mut signature_participants = bitvec::vec::BitVec::with_capacity(validators.len());
for (participant, _) in self.spec.validators() {
signature_participants
.push(shares.contains_key(&self.spec.i(participant).unwrap().start));
for (participant, _) in validators {
signature_participants.push(
(participant == (<Ristretto as Ciphersuite>::generator() * self.our_key.deref())) ||
shares.contains_key(&self.spec.i(participant).unwrap().start),
);
}
// Produce the final signature

View File

@@ -55,7 +55,7 @@
*/
use core::ops::Deref;
use std::collections::HashMap;
use std::collections::{HashSet, HashMap};
use zeroize::{Zeroize, Zeroizing};
@@ -243,6 +243,7 @@ fn threshold_i_map_to_keys_and_musig_i_map(
.i(<Ristretto as Ciphersuite>::generator() * our_key.deref())
.expect("not in a set we're signing for")
.start;
// Asserts we weren't unexpectedly already present
assert!(map.insert(our_threshold_i, vec![]).is_none());
let spec_validators = spec.validators();
@@ -259,19 +260,27 @@ fn threshold_i_map_to_keys_and_musig_i_map(
let mut threshold_is = map.keys().copied().collect::<Vec<_>>();
threshold_is.sort();
for threshold_i in threshold_is {
sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap()));
sorted.push((
threshold_i,
key_from_threshold_i(threshold_i),
map.remove(&threshold_i).unwrap(),
));
}
// Now that signers are sorted, with their shares, create a map with the is needed for MuSig
let mut participants = vec![];
let mut map = HashMap::new();
for (raw_i, (key, share)) in sorted.into_iter().enumerate() {
let musig_i = u16::try_from(raw_i).unwrap() + 1;
let mut our_musig_i = None;
for (raw_i, (threshold_i, key, share)) in sorted.into_iter().enumerate() {
let musig_i = Participant::new(u16::try_from(raw_i).unwrap() + 1).unwrap();
if threshold_i == our_threshold_i {
our_musig_i = Some(musig_i);
}
participants.push(key);
map.insert(Participant::new(musig_i).unwrap(), share);
map.insert(musig_i, share);
}
map.remove(&our_threshold_i).unwrap();
map.remove(&our_musig_i.unwrap()).unwrap();
(participants, map)
}
@@ -301,7 +310,9 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
}
fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
// This preprocesses with just us as we only decide the participants after obtaining
// preprocesses
let participants = vec![<Ristretto as Ciphersuite>::generator() * self.key.deref()];
self.signing_protocol().preprocess_internal(&participants)
}
// Get the preprocess for this confirmation.
@@ -314,8 +325,8 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
preprocesses: HashMap<Participant, Vec<u8>>,
key_pair: &KeyPair,
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
let preprocesses = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses).1;
let (participants, preprocesses) =
threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses);
let msg = set_keys_message(&self.spec.set(), key_pair);
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
}
@@ -334,6 +345,8 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
key_pair: &KeyPair,
shares: HashMap<Participant, Vec<u8>>,
) -> Result<[u8; 64], Participant> {
assert_eq!(preprocesses.keys().collect::<HashSet<_>>(), shares.keys().collect::<HashSet<_>>());
let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares).1;
let machine = self

View File

@@ -296,7 +296,7 @@ impl ReadWrite for Transaction {
let mut confirmation_share = [0; 32];
reader.read_exact(&mut confirmation_share)?;
let signed = Signed::read_without_nonce(reader, 0)?;
let signed = Signed::read_without_nonce(reader, 1)?;
Ok(Transaction::DkgConfirmationShare { attempt, confirmation_share, signed })
}
@@ -446,11 +446,9 @@ impl TransactionTrait for Transaction {
Transaction::DkgParticipation { signed, .. } => {
TransactionKind::Signed(b"dkg".to_vec(), signed)
}
Transaction::DkgConfirmationNonces { attempt, signed, .. } => {
TransactionKind::Signed((b"dkg_confirmation_nonces", attempt).encode(), signed)
}
Transaction::DkgConfirmationNonces { attempt, signed, .. } |
Transaction::DkgConfirmationShare { attempt, signed, .. } => {
TransactionKind::Signed((b"dkg_confirmation_share", attempt).encode(), signed)
TransactionKind::Signed((b"dkg_confirmation", attempt).encode(), signed)
}
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
@@ -521,7 +519,10 @@ impl Transaction {
Transaction::DkgParticipation { .. } => 0,
// Uses a nonce of 0 as it has an internal attempt counter we distinguish by
Transaction::DkgConfirmationNonces { .. } | Transaction::DkgConfirmationShare { .. } => 0,
Transaction::DkgConfirmationNonces { .. } => 0,
// Uses a nonce of 1 due to internal attempt counter and due to following
// DkgConfirmationNonces
Transaction::DkgConfirmationShare { .. } => 1,
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),

View File

@@ -88,7 +88,7 @@ use multiexp::multiexp_vartime;
use generalized_bulletproofs::arithmetic_circuit_proof::*;
use ec_divisors::DivisorCurve;
use crate::{Participant, ThresholdParams, ThresholdCore, ThresholdKeys};
use crate::{Participant, ThresholdParams, Interpolation, ThresholdCore, ThresholdKeys};
pub(crate) mod proof;
use proof::*;
@@ -571,6 +571,7 @@ impl<C: EvrfCurve> EvrfDkg<C> {
res.push(ThresholdKeys::from(ThresholdCore {
params: ThresholdParams::new(self.t, self.n, i).unwrap(),
interpolation: Interpolation::Lagrange,
secret_share,
group_key: self.group_key,
verification_shares: self.verification_shares.clone(),

View File

@@ -209,8 +209,17 @@ mod lib {
}
}
/// Calculate the lagrange coefficient for a signing set.
pub fn lagrange<F: PrimeField>(i: Participant, included: &[Participant]) -> F {
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub(crate) enum Interpolation<F: Zeroize + PrimeField> {
Constant(Vec<F>),
Lagrange,
}
impl<F: Zeroize + PrimeField> Interpolation<F> {
pub(crate) fn interpolation_factor(&self, i: Participant, included: &[Participant]) -> F {
match self {
Interpolation::Constant(c) => c[usize::from(u16::from(i) - 1)],
Interpolation::Lagrange => {
let i_f = F::from(u64::from(u16::from(i)));
let mut num = F::ONE;
@@ -229,6 +238,9 @@ mod lib {
// (which we have an if case to avoid)
num * denom.invert().unwrap()
}
}
}
}
/// Keys and verification shares generated by a DKG.
/// Called core as they're expected to be wrapped into an Arc before usage in various operations.
@@ -236,6 +248,8 @@ mod lib {
pub struct ThresholdCore<C: Ciphersuite> {
/// Threshold Parameters.
pub(crate) params: ThresholdParams,
/// The interpolation method used.
pub(crate) interpolation: Interpolation<C::F>,
/// Secret share key.
pub(crate) secret_share: Zeroizing<C::F>,
@@ -250,6 +264,7 @@ mod lib {
fmt
.debug_struct("ThresholdCore")
.field("params", &self.params)
.field("interpolation", &self.interpolation)
.field("group_key", &self.group_key)
.field("verification_shares", &self.verification_shares)
.finish_non_exhaustive()
@@ -259,6 +274,7 @@ mod lib {
impl<C: Ciphersuite> Zeroize for ThresholdCore<C> {
fn zeroize(&mut self) {
self.params.zeroize();
self.interpolation.zeroize();
self.secret_share.zeroize();
self.group_key.zeroize();
for share in self.verification_shares.values_mut() {
@@ -270,16 +286,14 @@ mod lib {
impl<C: Ciphersuite> ThresholdCore<C> {
pub(crate) fn new(
params: ThresholdParams,
interpolation: Interpolation<C::F>,
secret_share: Zeroizing<C::F>,
verification_shares: HashMap<Participant, C::G>,
) -> ThresholdCore<C> {
let t = (1 ..= params.t()).map(Participant).collect::<Vec<_>>();
ThresholdCore {
params,
secret_share,
group_key: t.iter().map(|i| verification_shares[i] * lagrange::<C::F>(*i, &t)).sum(),
verification_shares,
}
let group_key =
t.iter().map(|i| verification_shares[i] * interpolation.interpolation_factor(*i, &t)).sum();
ThresholdCore { params, interpolation, secret_share, group_key, verification_shares }
}
/// Parameters for these keys.
@@ -308,6 +322,15 @@ mod lib {
writer.write_all(&self.params.t.to_le_bytes())?;
writer.write_all(&self.params.n.to_le_bytes())?;
writer.write_all(&self.params.i.to_bytes())?;
match &self.interpolation {
Interpolation::Constant(c) => {
writer.write_all(&[0])?;
for c in c {
writer.write_all(c.to_repr().as_ref())?;
}
}
Interpolation::Lagrange => writer.write_all(&[1])?,
};
let mut share_bytes = self.secret_share.to_repr();
writer.write_all(share_bytes.as_ref())?;
share_bytes.as_mut().zeroize();
@@ -356,6 +379,20 @@ mod lib {
)
};
let mut interpolation = [0];
reader.read_exact(&mut interpolation)?;
let interpolation = match interpolation[0] {
0 => Interpolation::Constant({
let mut res = Vec::with_capacity(usize::from(n));
for _ in 0 .. n {
res.push(C::read_F(reader)?);
}
res
}),
1 => Interpolation::Lagrange,
_ => Err(io::Error::other("invalid interpolation method"))?,
};
let secret_share = Zeroizing::new(C::read_F(reader)?);
let mut verification_shares = HashMap::new();
@@ -365,6 +402,7 @@ mod lib {
Ok(ThresholdCore::new(
ThresholdParams::new(t, n, i).map_err(|_| io::Error::other("invalid parameters"))?,
interpolation,
secret_share,
verification_shares,
))
@@ -387,6 +425,7 @@ mod lib {
/// View of keys, interpolated and offset for usage.
#[derive(Clone)]
pub struct ThresholdView<C: Ciphersuite> {
interpolation: Interpolation<C::F>,
offset: C::F,
group_key: C::G,
included: Vec<Participant>,
@@ -399,6 +438,7 @@ mod lib {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("ThresholdView")
.field("interpolation", &self.interpolation)
.field("offset", &self.offset)
.field("group_key", &self.group_key)
.field("included", &self.included)
@@ -484,12 +524,13 @@ mod lib {
included.sort();
let mut secret_share = Zeroizing::new(
lagrange::<C::F>(self.params().i(), &included) * self.secret_share().deref(),
self.core.interpolation.interpolation_factor(self.params().i(), &included) *
self.secret_share().deref(),
);
let mut verification_shares = self.verification_shares();
for (i, share) in &mut verification_shares {
*share *= lagrange::<C::F>(*i, &included);
*share *= self.core.interpolation.interpolation_factor(*i, &included);
}
// The offset is included by adding it to the participant with the lowest ID
@@ -500,6 +541,7 @@ mod lib {
*verification_shares.get_mut(&included[0]).unwrap() += C::generator() * offset;
Ok(ThresholdView {
interpolation: self.core.interpolation.clone(),
offset,
group_key: self.group_key(),
secret_share,
@@ -532,6 +574,14 @@ mod lib {
&self.included
}
/// Return the interpolation factor for a signer.
pub fn interpolation_factor(&self, participant: Participant) -> Option<C::F> {
if !self.included.contains(&participant) {
None?
}
Some(self.interpolation.interpolation_factor(participant, &self.included))
}
/// Return the interpolated, offset secret share.
pub fn secret_share(&self) -> &Zeroizing<C::F> {
&self.secret_share

View File

@@ -7,8 +7,6 @@ use std_shims::collections::HashMap;
#[cfg(feature = "std")]
use zeroize::Zeroizing;
#[cfg(feature = "std")]
use ciphersuite::group::ff::Field;
use ciphersuite::{
group::{Group, GroupEncoding},
Ciphersuite,
@@ -16,7 +14,7 @@ use ciphersuite::{
use crate::DkgError;
#[cfg(feature = "std")]
use crate::{Participant, ThresholdParams, ThresholdCore, lagrange};
use crate::{Participant, ThresholdParams, Interpolation, ThresholdCore};
fn check_keys<C: Ciphersuite>(keys: &[C::G]) -> Result<u16, DkgError<()>> {
if keys.is_empty() {
@@ -104,38 +102,26 @@ pub fn musig<C: Ciphersuite>(
binding.push(binding_factor::<C>(transcript.clone(), i));
}
// Multiply our private key by our binding factor
let mut secret_share = private_key.clone();
*secret_share *= binding[pos];
// Our secret share is our private key
let secret_share = private_key.clone();
// Calculate verification shares
let mut verification_shares = HashMap::new();
// When this library offers a ThresholdView for a specific signing set, it applies the lagrange
// factor
// Since this is a n-of-n scheme, there's only one possible signing set, and one possible
// lagrange factor
// In the name of simplicity, we define the group key as the sum of all bound keys
// Accordingly, the secret share must be multiplied by the inverse of the lagrange factor, along
// with all verification shares
// This is less performant than simply defining the group key as the sum of all post-lagrange
// bound keys, yet the simplicity is preferred
let included = (1 ..= keys_len)
// This error also shouldn't be possible, for the same reasons as documented above
.map(|l| Participant::new(l).ok_or(DkgError::InvalidSigningSet))
.collect::<Result<Vec<_>, _>>()?;
let mut group_key = C::G::identity();
for (l, p) in included.iter().enumerate() {
let bound = keys[l] * binding[l];
group_key += bound;
for l in 1 ..= keys_len {
let key = keys[usize::from(l) - 1];
group_key += key * binding[usize::from(l - 1)];
let lagrange_inv = lagrange::<C::F>(*p, &included).invert().unwrap();
if params.i() == *p {
*secret_share *= lagrange_inv;
}
verification_shares.insert(*p, bound * lagrange_inv);
// These errors also shouldn't be possible, for the same reasons as documented above
verification_shares.insert(Participant::new(l).ok_or(DkgError::InvalidSigningSet)?, key);
}
debug_assert_eq!(C::generator() * secret_share.deref(), verification_shares[&params.i()]);
debug_assert_eq!(musig_key::<C>(context, keys).unwrap(), group_key);
Ok(ThresholdCore { params, secret_share, group_key, verification_shares })
Ok(ThresholdCore::new(
params,
Interpolation::Constant(binding),
secret_share,
verification_shares,
))
}

View File

@@ -22,7 +22,7 @@ use multiexp::{multiexp_vartime, BatchVerifier};
use schnorr::SchnorrSignature;
use crate::{
Participant, DkgError, ThresholdParams, ThresholdCore, validate_map,
Participant, DkgError, ThresholdParams, Interpolation, ThresholdCore, validate_map,
encryption::{
ReadWrite, EncryptionKeyMessage, EncryptedMessage, Encryption, Decryption, EncryptionKeyProof,
DecryptionError,
@@ -477,6 +477,7 @@ impl<C: Ciphersuite> KeyMachine<C> {
encryption: encryption.into_decryption(),
result: Some(ThresholdCore {
params,
interpolation: Interpolation::Lagrange,
secret_share: secret,
group_key: stripes[0],
verification_shares,

View File

@@ -113,6 +113,7 @@ impl<C1: Ciphersuite, C2: Ciphersuite<F = C1::F, G = C1::G>> GeneratorPromotion<
Ok(ThresholdKeys {
core: Arc::new(ThresholdCore::new(
params,
self.base.core.interpolation.clone(),
self.base.secret_share().clone(),
verification_shares,
)),

View File

@@ -6,7 +6,7 @@ use rand_core::{RngCore, CryptoRng};
use ciphersuite::{group::ff::Field, Ciphersuite};
use crate::{Participant, ThresholdCore, ThresholdKeys, lagrange, musig::musig as musig_fn};
use crate::{Participant, ThresholdCore, ThresholdKeys, musig::musig as musig_fn};
mod musig;
pub use musig::test_musig;
@@ -46,7 +46,8 @@ pub fn recover_key<C: Ciphersuite>(keys: &HashMap<Participant, ThresholdKeys<C>>
let included = keys.keys().copied().collect::<Vec<_>>();
let group_private = keys.iter().fold(C::F::ZERO, |accum, (i, keys)| {
accum + (lagrange::<C::F>(*i, &included) * keys.secret_share().deref())
accum +
(first.core.interpolation.interpolation_factor(*i, &included) * keys.secret_share().deref())
});
assert_eq!(C::generator() * group_private, first.group_key(), "failed to recover keys");
group_private

View File

@@ -20,7 +20,6 @@ use group::{
use transcript::{Transcript, RecommendedTranscript};
use dalek_ff_group as dfg;
use frost::{
dkg::lagrange,
curve::Ed25519,
Participant, FrostError, ThresholdKeys, ThresholdView,
algorithm::{WriteAddendum, Algorithm},
@@ -233,8 +232,10 @@ impl Algorithm<Ed25519> for ClsagMultisig {
.append_message(b"key_image_share", addendum.key_image_share.compress().to_bytes());
// Accumulate the interpolated share
let interpolated_key_image_share =
addendum.key_image_share * lagrange::<dfg::Scalar>(l, view.included());
let interpolated_key_image_share = addendum.key_image_share *
view
.interpolation_factor(l)
.ok_or(FrostError::InternalError("processing addendum of non-participant"))?;
*self.image.as_mut().unwrap() += interpolated_key_image_share;
self

View File

@@ -14,7 +14,6 @@ use transcript::{Transcript, RecommendedTranscript};
use frost::{
curve::Ed25519,
Participant, FrostError, ThresholdKeys,
dkg::lagrange,
sign::{
Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine, SignatureMachine,
AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine,
@@ -34,7 +33,7 @@ use crate::send::{SendError, SignableTransaction, key_image_sort};
pub struct TransactionMachine {
signable: SignableTransaction,
i: Participant,
keys: ThresholdKeys<Ed25519>,
// The key image generator, and the scalar offset from the spend key
key_image_generators_and_offsets: Vec<(EdwardsPoint, Scalar)>,
@@ -45,7 +44,7 @@ pub struct TransactionMachine {
pub struct TransactionSignMachine {
signable: SignableTransaction,
i: Participant,
keys: ThresholdKeys<Ed25519>,
key_image_generators_and_offsets: Vec<(EdwardsPoint, Scalar)>,
clsags: Vec<(ClsagMultisigMaskSender, AlgorithmSignMachine<Ed25519, ClsagMultisig>)>,
@@ -61,7 +60,7 @@ pub struct TransactionSignatureMachine {
impl SignableTransaction {
/// Create a FROST signing machine out of this signable transaction.
pub fn multisig(self, keys: &ThresholdKeys<Ed25519>) -> Result<TransactionMachine, SendError> {
pub fn multisig(self, keys: ThresholdKeys<Ed25519>) -> Result<TransactionMachine, SendError> {
let mut clsags = vec![];
let mut key_image_generators_and_offsets = vec![];
@@ -85,12 +84,7 @@ impl SignableTransaction {
clsags.push((clsag_mask_send, AlgorithmMachine::new(clsag, offset)));
}
Ok(TransactionMachine {
signable: self,
i: keys.params().i(),
key_image_generators_and_offsets,
clsags,
})
Ok(TransactionMachine { signable: self, keys, key_image_generators_and_offsets, clsags })
}
}
@@ -120,7 +114,7 @@ impl PreprocessMachine for TransactionMachine {
TransactionSignMachine {
signable: self.signable,
i: self.i,
keys: self.keys,
key_image_generators_and_offsets: self.key_image_generators_and_offsets,
clsags,
@@ -173,12 +167,12 @@ impl SignMachine<Transaction> for TransactionSignMachine {
// We do not need to be included here, yet this set of signers has yet to be validated
// We explicitly remove ourselves to ensure we aren't included twice, if we were redundantly
// included
commitments.remove(&self.i);
commitments.remove(&self.keys.params().i());
// Find out who's included
let mut included = commitments.keys().copied().collect::<Vec<_>>();
// This push won't duplicate due to the above removal
included.push(self.i);
included.push(self.keys.params().i());
// unstable sort may reorder elements of equal order
// Given our lack of duplicates, we should have no elements of equal order
included.sort_unstable();
@@ -192,12 +186,15 @@ impl SignMachine<Transaction> for TransactionSignMachine {
}
// Convert the serialized nonces commitments to a parallelized Vec
let view = self.keys.view(included.clone()).map_err(|_| {
FrostError::InvalidSigningSet("couldn't form an interpolated view of the key")
})?;
let mut commitments = (0 .. self.clsags.len())
.map(|c| {
included
.iter()
.map(|l| {
let preprocess = if *l == self.i {
let preprocess = if *l == self.keys.params().i() {
self.our_preprocess[c].clone()
} else {
commitments.get_mut(l).ok_or(FrostError::MissingParticipant(*l))?[c].clone()
@@ -206,7 +203,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
// While here, calculate the key image as needed to call sign
// The CLSAG algorithm will independently calculate the key image/verify these shares
key_images[c] +=
preprocess.addendum.key_image_share().0 * lagrange::<dfg::Scalar>(*l, &included).0;
preprocess.addendum.key_image_share().0 * view.interpolation_factor(*l).unwrap().0;
Ok((*l, preprocess))
})
@@ -217,7 +214,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
// The above inserted our own preprocess into these maps (which is unnecessary)
// Remove it now
for map in &mut commitments {
map.remove(&self.i);
map.remove(&self.keys.params().i());
}
// The actual TX will have sorted its inputs by key image

View File

@@ -281,7 +281,7 @@ macro_rules! test {
{
let mut machines = HashMap::new();
for i in (1 ..= THRESHOLD).map(|i| Participant::new(i).unwrap()) {
machines.insert(i, tx.clone().multisig(&keys[&i]).unwrap());
machines.insert(i, tx.clone().multisig(keys[&i].clone()).unwrap());
}
frost::tests::sign_without_caching(&mut OsRng, machines, &[])

View File

@@ -19,7 +19,7 @@ pub struct SubstrateContext {
pub mod key_gen {
use super::*;
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub enum CoordinatorMessage {
// Instructs the Processor to begin the key generation process.
// TODO: Should this be moved under Substrate?
@@ -28,13 +28,31 @@ pub mod key_gen {
Participation { session: Session, participant: Participant, participation: Vec<u8> },
}
impl core::fmt::Debug for CoordinatorMessage {
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
match self {
CoordinatorMessage::GenerateKey { session, threshold, evrf_public_keys } => fmt
.debug_struct("CoordinatorMessage::GenerateKey")
.field("session", &session)
.field("threshold", &threshold)
.field("evrf_public_keys.len()", &evrf_public_keys.len())
.finish_non_exhaustive(),
CoordinatorMessage::Participation { session, participant, .. } => fmt
.debug_struct("CoordinatorMessage::Participation")
.field("session", &session)
.field("participant", &participant)
.finish_non_exhaustive(),
}
}
}
impl CoordinatorMessage {
pub fn required_block(&self) -> Option<BlockHash> {
None
}
}
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub enum ProcessorMessage {
// Participated in the specified key generation protocol.
Participation { session: Session, participation: Vec<u8> },
@@ -43,6 +61,26 @@ pub mod key_gen {
// Blame this participant.
Blame { session: Session, participant: Participant },
}
impl core::fmt::Debug for ProcessorMessage {
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
match self {
ProcessorMessage::Participation { session, .. } => fmt
.debug_struct("ProcessorMessage::Participation")
.field("session", &session)
.finish_non_exhaustive(),
ProcessorMessage::GeneratedKeyPair { session, .. } => fmt
.debug_struct("ProcessorMessage::GeneratedKeyPair")
.field("session", &session)
.finish_non_exhaustive(),
ProcessorMessage::Blame { session, participant } => fmt
.debug_struct("ProcessorMessage::Blame")
.field("session", &session)
.field("participant", &participant)
.finish_non_exhaustive(),
}
}
}
}
pub mod sign {

View File

@@ -657,7 +657,7 @@ impl Network for Monero {
keys: ThresholdKeys<Self::Curve>,
transaction: SignableTransaction,
) -> Result<Self::TransactionMachine, NetworkError> {
match transaction.0.clone().multisig(&keys) {
match transaction.0.clone().multisig(keys) {
Ok(machine) => Ok(machine),
Err(e) => panic!("failed to create a multisig machine for TX: {e}"),
}

View File

@@ -1,23 +0,0 @@
Upon an issue with the DKG, the honest validators must remove the malicious
validators. Ideally, a threshold signature would be used, yet that would require
a threshold key (which would require authentication by a MuSig signature). A
MuSig signature which specifies the signing set (or rather, the excluded
signers) achieves the most efficiency.
While that resolves the on-chain behavior, the Tributary also has to perform
exclusion. This has the following forms:
1) Rejecting further transactions (required)
2) Rejecting further participation in Tendermint
With regards to rejecting further participation in Tendermint, it's *ideal* to
remove the validator from the list of validators. Each validator removed from
participation, yet not from the list of validators, increases the likelihood of
the network failing to form consensus.
With regards to the economic security, an honest 67% may remove a faulty
(explicitly or simply offline) 33%, letting 67% of the remaining 67% (4/9ths)
take control of the associated private keys. In such a case, the malicious
parties are defined as the 4/9ths of validators with access to the private key
and the 33% removed (who together form >67% of the originally intended
validator set and have presumably provided enough stake to cover losses).

View File

@@ -1,35 +1,7 @@
# Distributed Key Generation
Serai uses a modification of Pedersen's Distributed Key Generation, which is
actually Feldman's Verifiable Secret Sharing Scheme run by every participant, as
described in the FROST paper. The modification included in FROST was to include
a Schnorr Proof of Knowledge for coefficient zero, preventing rogue key attacks.
This results in a two-round protocol.
### Encryption
In order to protect the secret shares during communication, the `dkg` library
establishes a public key for encryption at the start of a given protocol.
Every encrypted message (such as the secret shares) then includes a per-message
encryption key. These two keys are used in an Elliptic-curve Diffie-Hellman
handshake to derive a shared key. This shared key is then hashed to obtain a key
and IV for use in a ChaCha20 stream cipher instance, which is xor'd against a
message to encrypt it.
### Blame
Since each message has a distinct key attached, and accordingly a distinct
shared key, it's possible to reveal the shared key for a specific message
without revealing any other message's decryption keys. This is utilized when a
participant misbehaves. A participant who receives an invalid encrypted message
publishes its key, able to without concern for side effects, With the key
published, all participants can decrypt the message in order to decide blame.
While key reuse by a participant is considered as them revealing the messages
themselves, and therefore out of scope, there is an attack where a malicious
adversary claims another participant's encryption key. They'll fail to encrypt
their message, and the recipient will issue a blame statement. This blame
statement, intended to reveal the malicious adversary, also reveals the message
by the participant whose keys were co-opted. To resolve this, a
proof-of-possession is also included with encrypted messages, ensuring only
those actually with per-message keys can claim to use them.
Serai uses a modification of the one-round Distributed Key Generation described
in the [eVRF](https://eprint.iacr.org/2024/397) paper. We only require a
threshold to participate, sacrificing unbiased for robustness, and implement a
verifiable encryption scheme such that anyone can can verify a ciphertext
encrypts the expected secret share.

View File

@@ -9,29 +9,22 @@ This document primarily discusses its flow with regards to the coordinator.
### Generate Key
On `key_gen::CoordinatorMessage::GenerateKey`, the processor begins a pair of
instances of the distributed key generation protocol specified in the FROST
paper.
instances of the distributed key generation protocol.
The first instance is for a key to use on the external network. The second
instance is for a Ristretto public key used to publish data to the Serai
blockchain. This pair of FROST DKG instances is considered a single instance of
Serai's overall key generation protocol.
The first instance is for a Ristretto public key used to publish data to the
Serai blockchain. The second instance is for a key to use on the external
network. This pair of DKG instances is considered a single instance of Serai's
overall DKG protocol.
The commitments for both protocols are sent to the coordinator in a single
`key_gen::ProcessorMessage::Commitments`.
The participations in both protocols are sent to the coordinator in a single
`key_gen::ProcessorMessage::Participation`.
### Key Gen Commitments
### Key Gen Participations
On `key_gen::CoordinatorMessage::Commitments`, the processor continues the
specified key generation instance. The secret shares for each fellow
participant are sent to the coordinator in a
`key_gen::ProcessorMessage::Shares`.
#### Key Gen Shares
On `key_gen::CoordinatorMessage::Shares`, the processor completes the specified
key generation instance. The generated key pair is sent to the coordinator in a
`key_gen::ProcessorMessage::GeneratedKeyPair`.
On `key_gen::CoordinatorMessage::Participation`, the processor stores the
contained participation, verifying participations as sane. Once it receives `t`
honest participations, the processor completes the DKG and sends the generated
key pair to the coordinator in a `key_gen::ProcessorMessage::GeneratedKeyPair`.
### Confirm Key Pair