Get coordinator tests to pass

This commit is contained in:
Luke Parker
2024-08-05 06:50:26 -04:00
parent 9e8e134ef7
commit e74c8f38d5
7 changed files with 154 additions and 295 deletions

View File

@@ -7,12 +7,8 @@ use zeroize::Zeroizing;
use rand_core::{RngCore, CryptoRng, OsRng}; use rand_core::{RngCore, CryptoRng, OsRng};
use futures_util::{task::Poll, poll}; use futures_util::{task::Poll, poll};
use ciphersuite::{ use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
group::{ff::Field, GroupEncoding},
Ciphersuite, Ristretto,
};
use sp_application_crypto::sr25519;
use borsh::BorshDeserialize; use borsh::BorshDeserialize;
use serai_client::{ use serai_client::{
primitives::NetworkId, primitives::NetworkId,
@@ -52,12 +48,22 @@ pub fn new_spec<R: RngCore + CryptoRng>(
let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin }; let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin };
let set_participants = keys let validators = keys
.iter() .iter()
.map(|key| (sr25519::Public((<Ristretto as Ciphersuite>::generator() * **key).to_bytes()), 1)) .map(|key| ((<Ristretto as Ciphersuite>::generator() * **key), 1))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let res = TributarySpec::new(serai_block, start_time, set, set_participants); // Generate random eVRF keys as none of these test rely on them to have any structure
let mut evrf_keys = vec![];
for _ in 0 .. keys.len() {
let mut substrate = [0; 32];
OsRng.fill_bytes(&mut substrate);
let mut network = vec![0; 64];
OsRng.fill_bytes(&mut network);
evrf_keys.push((substrate, network));
}
let res = TributarySpec::new(serai_block, start_time, set, validators, evrf_keys);
assert_eq!( assert_eq!(
TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(), TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(),
res, res,

View File

@@ -1,5 +1,4 @@
use core::time::Duration; use core::time::Duration;
use std::collections::HashMap;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use rand_core::{RngCore, OsRng}; use rand_core::{RngCore, OsRng};
@@ -9,7 +8,7 @@ use frost::Participant;
use sp_runtime::traits::Verify; use sp_runtime::traits::Verify;
use serai_client::{ use serai_client::{
primitives::{SeraiAddress, Signature}, primitives::Signature,
validator_sets::primitives::{ValidatorSet, KeyPair}, validator_sets::primitives::{ValidatorSet, KeyPair},
}; };
@@ -17,10 +16,7 @@ use tokio::time::sleep;
use serai_db::{Get, DbTxn, Db, MemDb}; use serai_db::{Get, DbTxn, Db, MemDb};
use processor_messages::{ use processor_messages::{key_gen, CoordinatorMessage};
key_gen::{self, KeyGenId},
CoordinatorMessage,
};
use tributary::{TransactionTrait, Tributary}; use tributary::{TransactionTrait, Tributary};
@@ -54,44 +50,41 @@ async fn dkg_test() {
tokio::spawn(run_tributaries(tributaries.clone())); tokio::spawn(run_tributaries(tributaries.clone()));
let mut txs = vec![]; let mut txs = vec![];
// Create DKG commitments for each key // Create DKG participation for each key
for key in &keys { for key in &keys {
let attempt = 0; let mut participation = vec![0; 4096];
let mut commitments = vec![0; 256]; OsRng.fill_bytes(&mut participation);
OsRng.fill_bytes(&mut commitments);
let mut tx = Transaction::DkgCommitments { let mut tx =
attempt, Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() };
commitments: vec![commitments],
signed: Transaction::empty_signed(),
};
tx.sign(&mut OsRng, spec.genesis(), key); tx.sign(&mut OsRng, spec.genesis(), key);
txs.push(tx); txs.push(tx);
} }
let block_before_tx = tributaries[0].1.tip().await; let block_before_tx = tributaries[0].1.tip().await;
// Publish all commitments but one // Publish t-1 participations
for (i, tx) in txs.iter().enumerate().skip(1) { let t = ((keys.len() * 2) / 3) + 1;
for (i, tx) in txs.iter().take(t - 1).enumerate() {
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
}
// Wait until these are included
for tx in txs.iter().skip(1) {
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
} }
let expected_commitments: HashMap<_, _> = txs let expected_participations = txs
.iter() .iter()
.enumerate() .enumerate()
.map(|(i, tx)| { .map(|(i, tx)| {
if let Transaction::DkgCommitments { commitments, .. } = tx { if let Transaction::DkgParticipation { participation, .. } = tx {
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone()) CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Participation {
session: spec.set().session,
participant: Participant::new((i + 1).try_into().unwrap()).unwrap(),
participation: participation.clone(),
})
} else { } else {
panic!("txs had non-commitments"); panic!("txs wasn't a DkgParticipation");
} }
}) })
.collect(); .collect::<Vec<_>>();
async fn new_processors( async fn new_processors(
db: &mut MemDb, db: &mut MemDb,
@@ -120,28 +113,30 @@ async fn dkg_test() {
processors processors
} }
// Instantiate a scanner and verify it has nothing to report // Instantiate a scanner and verify it has the first two participations to report (and isn't
// waiting for `t`)
let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await; let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await;
assert!(processors.0.read().await.is_empty()); assert_eq!(processors.0.read().await.get(&spec.set().network).unwrap().len(), t - 1);
// Publish the last commitment // Publish the rest of the participations
let block_before_tx = tributaries[0].1.tip().await; let block_before_tx = tributaries[0].1.tip().await;
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true)); for tx in txs.iter().skip(t - 1) {
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await; assert_eq!(tributaries[0].1.add_transaction(tx.clone()).await, Ok(true));
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await; wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
}
// Verify the scanner emits a KeyGen::Commitments message // Verify the scanner emits all KeyGen::Participations messages
handle_new_blocks::<_, _, _, _, _, LocalP2p>( handle_new_blocks::<_, _, _, _, _, LocalP2p>(
&mut dbs[0], &mut dbs[0],
&keys[0], &keys[0],
&|_, _, _, _| async { &|_, _, _, _| async {
panic!("provided TX caused recognized_id to be called after Commitments") panic!("provided TX caused recognized_id to be called after DkgParticipation")
}, },
&processors, &processors,
&(), &(),
&|_| async { &|_| async {
panic!( panic!(
"test tried to publish a new Tributary TX from handle_application_tx after Commitments" "test tried to publish a new Tributary TX from handle_application_tx after DkgParticipation"
) )
}, },
&spec, &spec,
@@ -150,17 +145,11 @@ async fn dkg_test() {
.await; .await;
{ {
let mut msgs = processors.0.write().await; let mut msgs = processors.0.write().await;
assert_eq!(msgs.len(), 1);
let msgs = msgs.get_mut(&spec.set().network).unwrap(); let msgs = msgs.get_mut(&spec.set().network).unwrap();
let mut expected_commitments = expected_commitments.clone(); assert_eq!(msgs.len(), keys.len());
expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap()); for expected in &expected_participations {
assert_eq!( assert_eq!(&msgs.pop_front().unwrap(), expected);
msgs.pop_front().unwrap(), }
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
assert!(msgs.is_empty()); assert!(msgs.is_empty());
} }
@@ -168,149 +157,14 @@ async fn dkg_test() {
for (i, key) in keys.iter().enumerate().skip(1) { for (i, key) in keys.iter().enumerate().skip(1) {
let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await; let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await;
let mut msgs = processors.0.write().await; let mut msgs = processors.0.write().await;
assert_eq!(msgs.len(), 1);
let msgs = msgs.get_mut(&spec.set().network).unwrap(); let msgs = msgs.get_mut(&spec.set().network).unwrap();
let mut expected_commitments = expected_commitments.clone(); assert_eq!(msgs.len(), keys.len());
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap()); for expected in &expected_participations {
assert_eq!( assert_eq!(&msgs.pop_front().unwrap(), expected);
msgs.pop_front().unwrap(), }
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
assert!(msgs.is_empty()); assert!(msgs.is_empty());
} }
// Now do shares
let mut txs = vec![];
for (k, key) in keys.iter().enumerate() {
let attempt = 0;
let mut shares = vec![vec![]];
for i in 0 .. keys.len() {
if i != k {
let mut share = vec![0; 256];
OsRng.fill_bytes(&mut share);
shares.last_mut().unwrap().push(share);
}
}
let mut txn = dbs[k].txn();
let mut tx = Transaction::DkgShares {
attempt,
shares,
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
signed: Transaction::empty_signed(),
};
txn.commit();
tx.sign(&mut OsRng, spec.genesis(), key);
txs.push(tx);
}
let block_before_tx = tributaries[0].1.tip().await;
for (i, tx) in txs.iter().enumerate().skip(1) {
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
}
for tx in txs.iter().skip(1) {
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
}
// With just 4 sets of shares, nothing should happen yet
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
&mut dbs[0],
&keys[0],
&|_, _, _, _| async {
panic!("provided TX caused recognized_id to be called after some shares")
},
&processors,
&(),
&|_| async {
panic!(
"test tried to publish a new Tributary TX from handle_application_tx after some shares"
)
},
&spec,
&tributaries[0].1.reader(),
)
.await;
assert_eq!(processors.0.read().await.len(), 1);
assert!(processors.0.read().await[&spec.set().network].is_empty());
// Publish the final set of shares
let block_before_tx = tributaries[0].1.tip().await;
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
// Each scanner should emit a distinct shares message
let shares_for = |i: usize| {
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
id: KeyGenId { session: spec.set().session, attempt: 0 },
shares: vec![txs
.iter()
.enumerate()
.filter_map(|(l, tx)| {
if let Transaction::DkgShares { shares, .. } = tx {
if i == l {
None
} else {
let relative_i = i - (if i > l { 1 } else { 0 });
Some((
Participant::new((l + 1).try_into().unwrap()).unwrap(),
shares[0][relative_i].clone(),
))
}
} else {
panic!("txs had non-shares");
}
})
.collect::<HashMap<_, _>>()],
})
};
// Any scanner which has handled the prior blocks should only emit the new event
for (i, key) in keys.iter().enumerate() {
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
&mut dbs[i],
key,
&|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
&processors,
&(),
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
&spec,
&tributaries[i].1.reader(),
)
.await;
{
let mut msgs = processors.0.write().await;
assert_eq!(msgs.len(), 1);
let msgs = msgs.get_mut(&spec.set().network).unwrap();
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
assert!(msgs.is_empty());
}
}
// Yet new scanners should emit all events
for (i, key) in keys.iter().enumerate() {
let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await;
let mut msgs = processors.0.write().await;
assert_eq!(msgs.len(), 1);
let msgs = msgs.get_mut(&spec.set().network).unwrap();
let mut expected_commitments = expected_commitments.clone();
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
assert_eq!(
msgs.pop_front().unwrap(),
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
assert!(msgs.is_empty());
}
// Send DkgConfirmationShare
let mut substrate_key = [0; 32]; let mut substrate_key = [0; 32];
OsRng.fill_bytes(&mut substrate_key); OsRng.fill_bytes(&mut substrate_key);
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()]; let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
@@ -319,17 +173,19 @@ async fn dkg_test() {
let mut txs = vec![]; let mut txs = vec![];
for (i, key) in keys.iter().enumerate() { for (i, key) in keys.iter().enumerate() {
let attempt = 0;
let mut txn = dbs[i].txn(); let mut txn = dbs[i].txn();
let share =
crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();
txn.commit();
let mut tx = Transaction::DkgConfirmationShare { // Claim we've generated the key pair
crate::tributary::generated_key_pair::<MemDb>(&mut txn, spec.genesis(), &key_pair);
// Publish the nonces
let attempt = 0;
let mut tx = Transaction::DkgConfirmationNonces {
attempt, attempt,
confirmation_share: share, confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
signed: Transaction::empty_signed(), signed: Transaction::empty_signed(),
}; };
txn.commit();
tx.sign(&mut OsRng, spec.genesis(), key); tx.sign(&mut OsRng, spec.genesis(), key);
txs.push(tx); txs.push(tx);
} }
@@ -341,6 +197,35 @@ async fn dkg_test() {
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
} }
// This should not cause any new processor event as the processor doesn't handle DKG confirming
for (i, key) in keys.iter().enumerate() {
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
&mut dbs[i],
key,
&|_, _, _, _| async {
panic!("provided TX caused recognized_id to be called after DkgConfirmationNonces")
},
&processors,
&(),
// The Tributary handler should publish ConfirmationShare itself after ConfirmationNonces
&|tx| async { assert_eq!(tributaries[i].1.add_transaction(tx).await, Ok(true)) },
&spec,
&tributaries[i].1.reader(),
)
.await;
{
assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty());
}
}
// Yet once these TXs are on-chain, the tributary should itself publish the confirmation shares
// This means in the block after the next block, the keys should be set onto Serai
// Sleep twice as long as two blocks, in case there's some stability issue
sleep(Duration::from_secs(
2 * 2 * u64::from(Tributary::<MemDb, Transaction, LocalP2p>::block_time()),
))
.await;
struct CheckPublishSetKeys { struct CheckPublishSetKeys {
spec: TributarySpec, spec: TributarySpec,
key_pair: KeyPair, key_pair: KeyPair,
@@ -351,19 +236,24 @@ async fn dkg_test() {
&self, &self,
_db: &(impl Sync + Get), _db: &(impl Sync + Get),
set: ValidatorSet, set: ValidatorSet,
removed: Vec<SeraiAddress>,
key_pair: KeyPair, key_pair: KeyPair,
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
signature: Signature, signature: Signature,
) { ) {
assert_eq!(set, self.spec.set()); assert_eq!(set, self.spec.set());
assert!(removed.is_empty());
assert_eq!(self.key_pair, key_pair); assert_eq!(self.key_pair, key_pair);
assert!(signature.verify( assert!(signature.verify(
&*serai_client::validator_sets::primitives::set_keys_message(&set, &key_pair), &*serai_client::validator_sets::primitives::set_keys_message(&set, &key_pair),
&serai_client::Public( &serai_client::Public(
frost::dkg::musig::musig_key::<Ristretto>( frost::dkg::musig::musig_key::<Ristretto>(
&serai_client::validator_sets::primitives::musig_context(set), &serai_client::validator_sets::primitives::musig_context(set),
&self.spec.validators().into_iter().map(|(validator, _)| validator).collect::<Vec<_>>() &self
.spec
.validators()
.into_iter()
.zip(signature_participants)
.filter_map(|((validator, _), included)| included.then_some(validator))
.collect::<Vec<_>>()
) )
.unwrap() .unwrap()
.to_bytes() .to_bytes()

View File

@@ -6,7 +6,7 @@ use ciphersuite::{group::Group, Ciphersuite, Ristretto};
use scale::{Encode, Decode}; use scale::{Encode, Decode};
use serai_client::{ use serai_client::{
primitives::{SeraiAddress, Signature}, primitives::Signature,
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair}, validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair},
}; };
use processor_messages::coordinator::SubstrateSignableId; use processor_messages::coordinator::SubstrateSignableId;
@@ -32,8 +32,8 @@ impl PublishSeraiTransaction for () {
&self, &self,
_db: &(impl Sync + serai_db::Get), _db: &(impl Sync + serai_db::Get),
_set: ValidatorSet, _set: ValidatorSet,
_removed: Vec<SeraiAddress>,
_key_pair: KeyPair, _key_pair: KeyPair,
_signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
_signature: Signature, _signature: Signature,
) { ) {
panic!("publish_set_keys was called in test") panic!("publish_set_keys was called in test")
@@ -148,70 +148,20 @@ fn serialize_transaction() {
signed: random_signed_with_nonce(&mut OsRng, 0), signed: random_signed_with_nonce(&mut OsRng, 0),
}); });
{ test_read_write(&Transaction::DkgParticipation {
let mut commitments = vec![random_vec(&mut OsRng, 512)]; participation: random_vec(&mut OsRng, 4096),
for _ in 0 .. (OsRng.next_u64() % 100) { signed: random_signed_with_nonce(&mut OsRng, 0),
let mut temp = commitments[0].clone(); });
OsRng.fill_bytes(&mut temp);
commitments.push(temp);
}
test_read_write(&Transaction::DkgCommitments {
attempt: random_u32(&mut OsRng),
commitments,
signed: random_signed_with_nonce(&mut OsRng, 0),
});
}
{ test_read_write(&Transaction::DkgConfirmationNonces {
// This supports a variable share length, and variable amount of sent shares, yet share length attempt: random_u32(&mut OsRng),
// and sent shares is expected to be constant among recipients confirmation_nonces: {
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap(); let mut nonces = [0; 64];
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap(); OsRng.fill_bytes(&mut nonces);
// Create a valid vec of shares nonces
let mut shares = vec![]; },
// Create up to 150 participants signed: random_signed_with_nonce(&mut OsRng, 0),
for _ in 0 ..= (OsRng.next_u64() % 150) { });
// Give each sender multiple shares
let mut sender_shares = vec![];
for _ in 0 .. amount_of_shares {
let mut share = vec![0; share_len];
OsRng.fill_bytes(&mut share);
sender_shares.push(share);
}
shares.push(sender_shares);
}
test_read_write(&Transaction::DkgShares {
attempt: random_u32(&mut OsRng),
shares,
confirmation_nonces: {
let mut nonces = [0; 64];
OsRng.fill_bytes(&mut nonces);
nonces
},
signed: random_signed_with_nonce(&mut OsRng, 1),
});
}
for i in 0 .. 2 {
test_read_write(&Transaction::InvalidDkgShare {
attempt: random_u32(&mut OsRng),
accuser: frost::Participant::new(
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
)
.unwrap(),
faulty: frost::Participant::new(
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
)
.unwrap(),
blame: if i == 0 {
None
} else {
Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty())
},
signed: random_signed_with_nonce(&mut OsRng, 2),
});
}
test_read_write(&Transaction::DkgConfirmationShare { test_read_write(&Transaction::DkgConfirmationShare {
attempt: random_u32(&mut OsRng), attempt: random_u32(&mut OsRng),
@@ -220,7 +170,7 @@ fn serialize_transaction() {
OsRng.fill_bytes(&mut share); OsRng.fill_bytes(&mut share);
share share
}, },
signed: random_signed_with_nonce(&mut OsRng, 2), signed: random_signed_with_nonce(&mut OsRng, 1),
}); });
{ {

View File

@@ -37,15 +37,14 @@ async fn tx_test() {
usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap(); usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap();
let key = keys[sender].clone(); let key = keys[sender].clone();
let attempt = 0;
let mut commitments = vec![0; 256];
OsRng.fill_bytes(&mut commitments);
// Create the TX with a null signature so we can get its sig hash
let block_before_tx = tributaries[sender].1.tip().await; let block_before_tx = tributaries[sender].1.tip().await;
let mut tx = Transaction::DkgCommitments { // Create the TX with a null signature so we can get its sig hash
attempt, let mut tx = Transaction::DkgParticipation {
commitments: vec![commitments.clone()], participation: {
let mut participation = vec![0; 4096];
OsRng.fill_bytes(&mut participation);
participation
},
signed: Transaction::empty_signed(), signed: Transaction::empty_signed(),
}; };
tx.sign(&mut OsRng, spec.genesis(), &key); tx.sign(&mut OsRng, spec.genesis(), &key);

View File

@@ -324,13 +324,13 @@ impl<
); );
// Determine the bitstring representing who participated before we move `shares` // Determine the bitstring representing who participated before we move `shares`
// This reserves too much capacity if the participating validators have multiple key
// shares, yet that's fine
let validators = self.spec.validators(); let validators = self.spec.validators();
let mut signature_participants = bitvec::vec::BitVec::with_capacity(validators.len()); let mut signature_participants = bitvec::vec::BitVec::with_capacity(validators.len());
for (participant, _) in self.spec.validators() { for (participant, _) in validators {
signature_participants signature_participants.push(
.push(shares.contains_key(&self.spec.i(participant).unwrap().start)); (participant == (<Ristretto as Ciphersuite>::generator() * self.our_key.deref())) ||
shares.contains_key(&self.spec.i(participant).unwrap().start),
);
} }
// Produce the final signature // Produce the final signature

View File

@@ -55,7 +55,7 @@
*/ */
use core::ops::Deref; use core::ops::Deref;
use std::collections::HashMap; use std::collections::{HashSet, HashMap};
use zeroize::{Zeroize, Zeroizing}; use zeroize::{Zeroize, Zeroizing};
@@ -243,6 +243,7 @@ fn threshold_i_map_to_keys_and_musig_i_map(
.i(<Ristretto as Ciphersuite>::generator() * our_key.deref()) .i(<Ristretto as Ciphersuite>::generator() * our_key.deref())
.expect("not in a set we're signing for") .expect("not in a set we're signing for")
.start; .start;
// Asserts we weren't unexpectedly already present
assert!(map.insert(our_threshold_i, vec![]).is_none()); assert!(map.insert(our_threshold_i, vec![]).is_none());
let spec_validators = spec.validators(); let spec_validators = spec.validators();
@@ -259,19 +260,27 @@ fn threshold_i_map_to_keys_and_musig_i_map(
let mut threshold_is = map.keys().copied().collect::<Vec<_>>(); let mut threshold_is = map.keys().copied().collect::<Vec<_>>();
threshold_is.sort(); threshold_is.sort();
for threshold_i in threshold_is { for threshold_i in threshold_is {
sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap())); sorted.push((
threshold_i,
key_from_threshold_i(threshold_i),
map.remove(&threshold_i).unwrap(),
));
} }
// Now that signers are sorted, with their shares, create a map with the is needed for MuSig // Now that signers are sorted, with their shares, create a map with the is needed for MuSig
let mut participants = vec![]; let mut participants = vec![];
let mut map = HashMap::new(); let mut map = HashMap::new();
for (raw_i, (key, share)) in sorted.into_iter().enumerate() { let mut our_musig_i = None;
let musig_i = u16::try_from(raw_i).unwrap() + 1; for (raw_i, (threshold_i, key, share)) in sorted.into_iter().enumerate() {
let musig_i = Participant::new(u16::try_from(raw_i).unwrap() + 1).unwrap();
if threshold_i == our_threshold_i {
our_musig_i = Some(musig_i);
}
participants.push(key); participants.push(key);
map.insert(Participant::new(musig_i).unwrap(), share); map.insert(musig_i, share);
} }
map.remove(&our_threshold_i).unwrap(); map.remove(&our_musig_i.unwrap()).unwrap();
(participants, map) (participants, map)
} }
@@ -301,7 +310,9 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
} }
fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) { fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>(); // This preprocesses with just us as we only decide the participants after obtaining
// preprocesses
let participants = vec![<Ristretto as Ciphersuite>::generator() * self.key.deref()];
self.signing_protocol().preprocess_internal(&participants) self.signing_protocol().preprocess_internal(&participants)
} }
// Get the preprocess for this confirmation. // Get the preprocess for this confirmation.
@@ -314,8 +325,8 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
preprocesses: HashMap<Participant, Vec<u8>>, preprocesses: HashMap<Participant, Vec<u8>>,
key_pair: &KeyPair, key_pair: &KeyPair,
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> { ) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>(); let (participants, preprocesses) =
let preprocesses = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses).1; threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses);
let msg = set_keys_message(&self.spec.set(), key_pair); let msg = set_keys_message(&self.spec.set(), key_pair);
self.signing_protocol().share_internal(&participants, preprocesses, &msg) self.signing_protocol().share_internal(&participants, preprocesses, &msg)
} }
@@ -334,6 +345,8 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
key_pair: &KeyPair, key_pair: &KeyPair,
shares: HashMap<Participant, Vec<u8>>, shares: HashMap<Participant, Vec<u8>>,
) -> Result<[u8; 64], Participant> { ) -> Result<[u8; 64], Participant> {
assert_eq!(preprocesses.keys().collect::<HashSet<_>>(), shares.keys().collect::<HashSet<_>>());
let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares).1; let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares).1;
let machine = self let machine = self

View File

@@ -296,7 +296,7 @@ impl ReadWrite for Transaction {
let mut confirmation_share = [0; 32]; let mut confirmation_share = [0; 32];
reader.read_exact(&mut confirmation_share)?; reader.read_exact(&mut confirmation_share)?;
let signed = Signed::read_without_nonce(reader, 0)?; let signed = Signed::read_without_nonce(reader, 1)?;
Ok(Transaction::DkgConfirmationShare { attempt, confirmation_share, signed }) Ok(Transaction::DkgConfirmationShare { attempt, confirmation_share, signed })
} }
@@ -446,11 +446,9 @@ impl TransactionTrait for Transaction {
Transaction::DkgParticipation { signed, .. } => { Transaction::DkgParticipation { signed, .. } => {
TransactionKind::Signed(b"dkg".to_vec(), signed) TransactionKind::Signed(b"dkg".to_vec(), signed)
} }
Transaction::DkgConfirmationNonces { attempt, signed, .. } => { Transaction::DkgConfirmationNonces { attempt, signed, .. } |
TransactionKind::Signed((b"dkg_confirmation_nonces", attempt).encode(), signed)
}
Transaction::DkgConfirmationShare { attempt, signed, .. } => { Transaction::DkgConfirmationShare { attempt, signed, .. } => {
TransactionKind::Signed((b"dkg_confirmation_share", attempt).encode(), signed) TransactionKind::Signed((b"dkg_confirmation", attempt).encode(), signed)
} }
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"), Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
@@ -521,7 +519,10 @@ impl Transaction {
Transaction::DkgParticipation { .. } => 0, Transaction::DkgParticipation { .. } => 0,
// Uses a nonce of 0 as it has an internal attempt counter we distinguish by // Uses a nonce of 0 as it has an internal attempt counter we distinguish by
Transaction::DkgConfirmationNonces { .. } | Transaction::DkgConfirmationShare { .. } => 0, Transaction::DkgConfirmationNonces { .. } => 0,
// Uses a nonce of 1 due to internal attempt counter and due to following
// DkgConfirmationNonces
Transaction::DkgConfirmationShare { .. } => 1,
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),