mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-12 22:19:26 +00:00
Get coordinator tests to pass
This commit is contained in:
@@ -7,12 +7,8 @@ use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, CryptoRng, OsRng};
|
||||
use futures_util::{task::Poll, poll};
|
||||
|
||||
use ciphersuite::{
|
||||
group::{ff::Field, GroupEncoding},
|
||||
Ciphersuite, Ristretto,
|
||||
};
|
||||
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
||||
|
||||
use sp_application_crypto::sr25519;
|
||||
use borsh::BorshDeserialize;
|
||||
use serai_client::{
|
||||
primitives::NetworkId,
|
||||
@@ -52,12 +48,22 @@ pub fn new_spec<R: RngCore + CryptoRng>(
|
||||
|
||||
let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin };
|
||||
|
||||
let set_participants = keys
|
||||
let validators = keys
|
||||
.iter()
|
||||
.map(|key| (sr25519::Public((<Ristretto as Ciphersuite>::generator() * **key).to_bytes()), 1))
|
||||
.map(|key| ((<Ristretto as Ciphersuite>::generator() * **key), 1))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let res = TributarySpec::new(serai_block, start_time, set, set_participants);
|
||||
// Generate random eVRF keys as none of these test rely on them to have any structure
|
||||
let mut evrf_keys = vec![];
|
||||
for _ in 0 .. keys.len() {
|
||||
let mut substrate = [0; 32];
|
||||
OsRng.fill_bytes(&mut substrate);
|
||||
let mut network = vec![0; 64];
|
||||
OsRng.fill_bytes(&mut network);
|
||||
evrf_keys.push((substrate, network));
|
||||
}
|
||||
|
||||
let res = TributarySpec::new(serai_block, start_time, set, validators, evrf_keys);
|
||||
assert_eq!(
|
||||
TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(),
|
||||
res,
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use core::time::Duration;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, OsRng};
|
||||
@@ -9,7 +8,7 @@ use frost::Participant;
|
||||
|
||||
use sp_runtime::traits::Verify;
|
||||
use serai_client::{
|
||||
primitives::{SeraiAddress, Signature},
|
||||
primitives::Signature,
|
||||
validator_sets::primitives::{ValidatorSet, KeyPair},
|
||||
};
|
||||
|
||||
@@ -17,10 +16,7 @@ use tokio::time::sleep;
|
||||
|
||||
use serai_db::{Get, DbTxn, Db, MemDb};
|
||||
|
||||
use processor_messages::{
|
||||
key_gen::{self, KeyGenId},
|
||||
CoordinatorMessage,
|
||||
};
|
||||
use processor_messages::{key_gen, CoordinatorMessage};
|
||||
|
||||
use tributary::{TransactionTrait, Tributary};
|
||||
|
||||
@@ -54,44 +50,41 @@ async fn dkg_test() {
|
||||
tokio::spawn(run_tributaries(tributaries.clone()));
|
||||
|
||||
let mut txs = vec![];
|
||||
// Create DKG commitments for each key
|
||||
// Create DKG participation for each key
|
||||
for key in &keys {
|
||||
let attempt = 0;
|
||||
let mut commitments = vec![0; 256];
|
||||
OsRng.fill_bytes(&mut commitments);
|
||||
let mut participation = vec![0; 4096];
|
||||
OsRng.fill_bytes(&mut participation);
|
||||
|
||||
let mut tx = Transaction::DkgCommitments {
|
||||
attempt,
|
||||
commitments: vec![commitments],
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
let mut tx =
|
||||
Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() };
|
||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||
txs.push(tx);
|
||||
}
|
||||
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
|
||||
// Publish all commitments but one
|
||||
for (i, tx) in txs.iter().enumerate().skip(1) {
|
||||
// Publish t-1 participations
|
||||
let t = ((keys.len() * 2) / 3) + 1;
|
||||
for (i, tx) in txs.iter().take(t - 1).enumerate() {
|
||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
||||
}
|
||||
|
||||
// Wait until these are included
|
||||
for tx in txs.iter().skip(1) {
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||
}
|
||||
|
||||
let expected_commitments: HashMap<_, _> = txs
|
||||
let expected_participations = txs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, tx)| {
|
||||
if let Transaction::DkgCommitments { commitments, .. } = tx {
|
||||
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())
|
||||
if let Transaction::DkgParticipation { participation, .. } = tx {
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Participation {
|
||||
session: spec.set().session,
|
||||
participant: Participant::new((i + 1).try_into().unwrap()).unwrap(),
|
||||
participation: participation.clone(),
|
||||
})
|
||||
} else {
|
||||
panic!("txs had non-commitments");
|
||||
panic!("txs wasn't a DkgParticipation");
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
async fn new_processors(
|
||||
db: &mut MemDb,
|
||||
@@ -120,28 +113,30 @@ async fn dkg_test() {
|
||||
processors
|
||||
}
|
||||
|
||||
// Instantiate a scanner and verify it has nothing to report
|
||||
// Instantiate a scanner and verify it has the first two participations to report (and isn't
|
||||
// waiting for `t`)
|
||||
let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await;
|
||||
assert!(processors.0.read().await.is_empty());
|
||||
assert_eq!(processors.0.read().await.get(&spec.set().network).unwrap().len(), t - 1);
|
||||
|
||||
// Publish the last commitment
|
||||
// Publish the rest of the participations
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
||||
for tx in txs.iter().skip(t - 1) {
|
||||
assert_eq!(tributaries[0].1.add_transaction(tx.clone()).await, Ok(true));
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||
}
|
||||
|
||||
// Verify the scanner emits a KeyGen::Commitments message
|
||||
// Verify the scanner emits all KeyGen::Participations messages
|
||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||
&mut dbs[0],
|
||||
&keys[0],
|
||||
&|_, _, _, _| async {
|
||||
panic!("provided TX caused recognized_id to be called after Commitments")
|
||||
panic!("provided TX caused recognized_id to be called after DkgParticipation")
|
||||
},
|
||||
&processors,
|
||||
&(),
|
||||
&|_| async {
|
||||
panic!(
|
||||
"test tried to publish a new Tributary TX from handle_application_tx after Commitments"
|
||||
"test tried to publish a new Tributary TX from handle_application_tx after DkgParticipation"
|
||||
)
|
||||
},
|
||||
&spec,
|
||||
@@ -150,17 +145,11 @@ async fn dkg_test() {
|
||||
.await;
|
||||
{
|
||||
let mut msgs = processors.0.write().await;
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||
let mut expected_commitments = expected_commitments.clone();
|
||||
expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap());
|
||||
assert_eq!(
|
||||
msgs.pop_front().unwrap(),
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
||||
commitments: expected_commitments
|
||||
})
|
||||
);
|
||||
assert_eq!(msgs.len(), keys.len());
|
||||
for expected in &expected_participations {
|
||||
assert_eq!(&msgs.pop_front().unwrap(), expected);
|
||||
}
|
||||
assert!(msgs.is_empty());
|
||||
}
|
||||
|
||||
@@ -168,149 +157,14 @@ async fn dkg_test() {
|
||||
for (i, key) in keys.iter().enumerate().skip(1) {
|
||||
let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await;
|
||||
let mut msgs = processors.0.write().await;
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||
let mut expected_commitments = expected_commitments.clone();
|
||||
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
||||
assert_eq!(
|
||||
msgs.pop_front().unwrap(),
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
||||
commitments: expected_commitments
|
||||
})
|
||||
);
|
||||
assert_eq!(msgs.len(), keys.len());
|
||||
for expected in &expected_participations {
|
||||
assert_eq!(&msgs.pop_front().unwrap(), expected);
|
||||
}
|
||||
assert!(msgs.is_empty());
|
||||
}
|
||||
|
||||
// Now do shares
|
||||
let mut txs = vec![];
|
||||
for (k, key) in keys.iter().enumerate() {
|
||||
let attempt = 0;
|
||||
|
||||
let mut shares = vec![vec![]];
|
||||
for i in 0 .. keys.len() {
|
||||
if i != k {
|
||||
let mut share = vec![0; 256];
|
||||
OsRng.fill_bytes(&mut share);
|
||||
shares.last_mut().unwrap().push(share);
|
||||
}
|
||||
}
|
||||
|
||||
let mut txn = dbs[k].txn();
|
||||
let mut tx = Transaction::DkgShares {
|
||||
attempt,
|
||||
shares,
|
||||
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
txn.commit();
|
||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||
txs.push(tx);
|
||||
}
|
||||
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
for (i, tx) in txs.iter().enumerate().skip(1) {
|
||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
||||
}
|
||||
for tx in txs.iter().skip(1) {
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||
}
|
||||
|
||||
// With just 4 sets of shares, nothing should happen yet
|
||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||
&mut dbs[0],
|
||||
&keys[0],
|
||||
&|_, _, _, _| async {
|
||||
panic!("provided TX caused recognized_id to be called after some shares")
|
||||
},
|
||||
&processors,
|
||||
&(),
|
||||
&|_| async {
|
||||
panic!(
|
||||
"test tried to publish a new Tributary TX from handle_application_tx after some shares"
|
||||
)
|
||||
},
|
||||
&spec,
|
||||
&tributaries[0].1.reader(),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(processors.0.read().await.len(), 1);
|
||||
assert!(processors.0.read().await[&spec.set().network].is_empty());
|
||||
|
||||
// Publish the final set of shares
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
||||
|
||||
// Each scanner should emit a distinct shares message
|
||||
let shares_for = |i: usize| {
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
|
||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
||||
shares: vec![txs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(l, tx)| {
|
||||
if let Transaction::DkgShares { shares, .. } = tx {
|
||||
if i == l {
|
||||
None
|
||||
} else {
|
||||
let relative_i = i - (if i > l { 1 } else { 0 });
|
||||
Some((
|
||||
Participant::new((l + 1).try_into().unwrap()).unwrap(),
|
||||
shares[0][relative_i].clone(),
|
||||
))
|
||||
}
|
||||
} else {
|
||||
panic!("txs had non-shares");
|
||||
}
|
||||
})
|
||||
.collect::<HashMap<_, _>>()],
|
||||
})
|
||||
};
|
||||
|
||||
// Any scanner which has handled the prior blocks should only emit the new event
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||
&mut dbs[i],
|
||||
key,
|
||||
&|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
|
||||
&processors,
|
||||
&(),
|
||||
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
|
||||
&spec,
|
||||
&tributaries[i].1.reader(),
|
||||
)
|
||||
.await;
|
||||
{
|
||||
let mut msgs = processors.0.write().await;
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
||||
assert!(msgs.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
// Yet new scanners should emit all events
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await;
|
||||
let mut msgs = processors.0.write().await;
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||
let mut expected_commitments = expected_commitments.clone();
|
||||
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
||||
assert_eq!(
|
||||
msgs.pop_front().unwrap(),
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
||||
commitments: expected_commitments
|
||||
})
|
||||
);
|
||||
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
||||
assert!(msgs.is_empty());
|
||||
}
|
||||
|
||||
// Send DkgConfirmationShare
|
||||
let mut substrate_key = [0; 32];
|
||||
OsRng.fill_bytes(&mut substrate_key);
|
||||
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
|
||||
@@ -319,17 +173,19 @@ async fn dkg_test() {
|
||||
|
||||
let mut txs = vec![];
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
let attempt = 0;
|
||||
let mut txn = dbs[i].txn();
|
||||
let share =
|
||||
crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();
|
||||
txn.commit();
|
||||
|
||||
let mut tx = Transaction::DkgConfirmationShare {
|
||||
// Claim we've generated the key pair
|
||||
crate::tributary::generated_key_pair::<MemDb>(&mut txn, spec.genesis(), &key_pair);
|
||||
|
||||
// Publish the nonces
|
||||
let attempt = 0;
|
||||
let mut tx = Transaction::DkgConfirmationNonces {
|
||||
attempt,
|
||||
confirmation_share: share,
|
||||
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
txn.commit();
|
||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||
txs.push(tx);
|
||||
}
|
||||
@@ -341,6 +197,35 @@ async fn dkg_test() {
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||
}
|
||||
|
||||
// This should not cause any new processor event as the processor doesn't handle DKG confirming
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||
&mut dbs[i],
|
||||
key,
|
||||
&|_, _, _, _| async {
|
||||
panic!("provided TX caused recognized_id to be called after DkgConfirmationNonces")
|
||||
},
|
||||
&processors,
|
||||
&(),
|
||||
// The Tributary handler should publish ConfirmationShare itself after ConfirmationNonces
|
||||
&|tx| async { assert_eq!(tributaries[i].1.add_transaction(tx).await, Ok(true)) },
|
||||
&spec,
|
||||
&tributaries[i].1.reader(),
|
||||
)
|
||||
.await;
|
||||
{
|
||||
assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
// Yet once these TXs are on-chain, the tributary should itself publish the confirmation shares
|
||||
// This means in the block after the next block, the keys should be set onto Serai
|
||||
// Sleep twice as long as two blocks, in case there's some stability issue
|
||||
sleep(Duration::from_secs(
|
||||
2 * 2 * u64::from(Tributary::<MemDb, Transaction, LocalP2p>::block_time()),
|
||||
))
|
||||
.await;
|
||||
|
||||
struct CheckPublishSetKeys {
|
||||
spec: TributarySpec,
|
||||
key_pair: KeyPair,
|
||||
@@ -351,19 +236,24 @@ async fn dkg_test() {
|
||||
&self,
|
||||
_db: &(impl Sync + Get),
|
||||
set: ValidatorSet,
|
||||
removed: Vec<SeraiAddress>,
|
||||
key_pair: KeyPair,
|
||||
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||
signature: Signature,
|
||||
) {
|
||||
assert_eq!(set, self.spec.set());
|
||||
assert!(removed.is_empty());
|
||||
assert_eq!(self.key_pair, key_pair);
|
||||
assert!(signature.verify(
|
||||
&*serai_client::validator_sets::primitives::set_keys_message(&set, &key_pair),
|
||||
&serai_client::Public(
|
||||
frost::dkg::musig::musig_key::<Ristretto>(
|
||||
&serai_client::validator_sets::primitives::musig_context(set),
|
||||
&self.spec.validators().into_iter().map(|(validator, _)| validator).collect::<Vec<_>>()
|
||||
&self
|
||||
.spec
|
||||
.validators()
|
||||
.into_iter()
|
||||
.zip(signature_participants)
|
||||
.filter_map(|((validator, _), included)| included.then_some(validator))
|
||||
.collect::<Vec<_>>()
|
||||
)
|
||||
.unwrap()
|
||||
.to_bytes()
|
||||
|
||||
@@ -6,7 +6,7 @@ use ciphersuite::{group::Group, Ciphersuite, Ristretto};
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use serai_client::{
|
||||
primitives::{SeraiAddress, Signature},
|
||||
primitives::Signature,
|
||||
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair},
|
||||
};
|
||||
use processor_messages::coordinator::SubstrateSignableId;
|
||||
@@ -32,8 +32,8 @@ impl PublishSeraiTransaction for () {
|
||||
&self,
|
||||
_db: &(impl Sync + serai_db::Get),
|
||||
_set: ValidatorSet,
|
||||
_removed: Vec<SeraiAddress>,
|
||||
_key_pair: KeyPair,
|
||||
_signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||
_signature: Signature,
|
||||
) {
|
||||
panic!("publish_set_keys was called in test")
|
||||
@@ -148,70 +148,20 @@ fn serialize_transaction() {
|
||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||
});
|
||||
|
||||
{
|
||||
let mut commitments = vec![random_vec(&mut OsRng, 512)];
|
||||
for _ in 0 .. (OsRng.next_u64() % 100) {
|
||||
let mut temp = commitments[0].clone();
|
||||
OsRng.fill_bytes(&mut temp);
|
||||
commitments.push(temp);
|
||||
}
|
||||
test_read_write(&Transaction::DkgCommitments {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
commitments,
|
||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||
});
|
||||
}
|
||||
test_read_write(&Transaction::DkgParticipation {
|
||||
participation: random_vec(&mut OsRng, 4096),
|
||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||
});
|
||||
|
||||
{
|
||||
// This supports a variable share length, and variable amount of sent shares, yet share length
|
||||
// and sent shares is expected to be constant among recipients
|
||||
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap();
|
||||
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap();
|
||||
// Create a valid vec of shares
|
||||
let mut shares = vec![];
|
||||
// Create up to 150 participants
|
||||
for _ in 0 ..= (OsRng.next_u64() % 150) {
|
||||
// Give each sender multiple shares
|
||||
let mut sender_shares = vec![];
|
||||
for _ in 0 .. amount_of_shares {
|
||||
let mut share = vec![0; share_len];
|
||||
OsRng.fill_bytes(&mut share);
|
||||
sender_shares.push(share);
|
||||
}
|
||||
shares.push(sender_shares);
|
||||
}
|
||||
|
||||
test_read_write(&Transaction::DkgShares {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
shares,
|
||||
confirmation_nonces: {
|
||||
let mut nonces = [0; 64];
|
||||
OsRng.fill_bytes(&mut nonces);
|
||||
nonces
|
||||
},
|
||||
signed: random_signed_with_nonce(&mut OsRng, 1),
|
||||
});
|
||||
}
|
||||
|
||||
for i in 0 .. 2 {
|
||||
test_read_write(&Transaction::InvalidDkgShare {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
accuser: frost::Participant::new(
|
||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
||||
)
|
||||
.unwrap(),
|
||||
faulty: frost::Participant::new(
|
||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
||||
)
|
||||
.unwrap(),
|
||||
blame: if i == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty())
|
||||
},
|
||||
signed: random_signed_with_nonce(&mut OsRng, 2),
|
||||
});
|
||||
}
|
||||
test_read_write(&Transaction::DkgConfirmationNonces {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
confirmation_nonces: {
|
||||
let mut nonces = [0; 64];
|
||||
OsRng.fill_bytes(&mut nonces);
|
||||
nonces
|
||||
},
|
||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||
});
|
||||
|
||||
test_read_write(&Transaction::DkgConfirmationShare {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
@@ -220,7 +170,7 @@ fn serialize_transaction() {
|
||||
OsRng.fill_bytes(&mut share);
|
||||
share
|
||||
},
|
||||
signed: random_signed_with_nonce(&mut OsRng, 2),
|
||||
signed: random_signed_with_nonce(&mut OsRng, 1),
|
||||
});
|
||||
|
||||
{
|
||||
|
||||
@@ -37,15 +37,14 @@ async fn tx_test() {
|
||||
usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap();
|
||||
let key = keys[sender].clone();
|
||||
|
||||
let attempt = 0;
|
||||
let mut commitments = vec![0; 256];
|
||||
OsRng.fill_bytes(&mut commitments);
|
||||
|
||||
// Create the TX with a null signature so we can get its sig hash
|
||||
let block_before_tx = tributaries[sender].1.tip().await;
|
||||
let mut tx = Transaction::DkgCommitments {
|
||||
attempt,
|
||||
commitments: vec![commitments.clone()],
|
||||
// Create the TX with a null signature so we can get its sig hash
|
||||
let mut tx = Transaction::DkgParticipation {
|
||||
participation: {
|
||||
let mut participation = vec![0; 4096];
|
||||
OsRng.fill_bytes(&mut participation);
|
||||
participation
|
||||
},
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
tx.sign(&mut OsRng, spec.genesis(), &key);
|
||||
|
||||
@@ -324,13 +324,13 @@ impl<
|
||||
);
|
||||
|
||||
// Determine the bitstring representing who participated before we move `shares`
|
||||
// This reserves too much capacity if the participating validators have multiple key
|
||||
// shares, yet that's fine
|
||||
let validators = self.spec.validators();
|
||||
let mut signature_participants = bitvec::vec::BitVec::with_capacity(validators.len());
|
||||
for (participant, _) in self.spec.validators() {
|
||||
signature_participants
|
||||
.push(shares.contains_key(&self.spec.i(participant).unwrap().start));
|
||||
for (participant, _) in validators {
|
||||
signature_participants.push(
|
||||
(participant == (<Ristretto as Ciphersuite>::generator() * self.our_key.deref())) ||
|
||||
shares.contains_key(&self.spec.i(participant).unwrap().start),
|
||||
);
|
||||
}
|
||||
|
||||
// Produce the final signature
|
||||
|
||||
@@ -55,7 +55,7 @@
|
||||
*/
|
||||
|
||||
use core::ops::Deref;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{HashSet, HashMap};
|
||||
|
||||
use zeroize::{Zeroize, Zeroizing};
|
||||
|
||||
@@ -243,6 +243,7 @@ fn threshold_i_map_to_keys_and_musig_i_map(
|
||||
.i(<Ristretto as Ciphersuite>::generator() * our_key.deref())
|
||||
.expect("not in a set we're signing for")
|
||||
.start;
|
||||
// Asserts we weren't unexpectedly already present
|
||||
assert!(map.insert(our_threshold_i, vec![]).is_none());
|
||||
|
||||
let spec_validators = spec.validators();
|
||||
@@ -259,19 +260,27 @@ fn threshold_i_map_to_keys_and_musig_i_map(
|
||||
let mut threshold_is = map.keys().copied().collect::<Vec<_>>();
|
||||
threshold_is.sort();
|
||||
for threshold_i in threshold_is {
|
||||
sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap()));
|
||||
sorted.push((
|
||||
threshold_i,
|
||||
key_from_threshold_i(threshold_i),
|
||||
map.remove(&threshold_i).unwrap(),
|
||||
));
|
||||
}
|
||||
|
||||
// Now that signers are sorted, with their shares, create a map with the is needed for MuSig
|
||||
let mut participants = vec![];
|
||||
let mut map = HashMap::new();
|
||||
for (raw_i, (key, share)) in sorted.into_iter().enumerate() {
|
||||
let musig_i = u16::try_from(raw_i).unwrap() + 1;
|
||||
let mut our_musig_i = None;
|
||||
for (raw_i, (threshold_i, key, share)) in sorted.into_iter().enumerate() {
|
||||
let musig_i = Participant::new(u16::try_from(raw_i).unwrap() + 1).unwrap();
|
||||
if threshold_i == our_threshold_i {
|
||||
our_musig_i = Some(musig_i);
|
||||
}
|
||||
participants.push(key);
|
||||
map.insert(Participant::new(musig_i).unwrap(), share);
|
||||
map.insert(musig_i, share);
|
||||
}
|
||||
|
||||
map.remove(&our_threshold_i).unwrap();
|
||||
map.remove(&our_musig_i.unwrap()).unwrap();
|
||||
|
||||
(participants, map)
|
||||
}
|
||||
@@ -301,7 +310,9 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||
}
|
||||
|
||||
fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
||||
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
||||
// This preprocesses with just us as we only decide the participants after obtaining
|
||||
// preprocesses
|
||||
let participants = vec![<Ristretto as Ciphersuite>::generator() * self.key.deref()];
|
||||
self.signing_protocol().preprocess_internal(&participants)
|
||||
}
|
||||
// Get the preprocess for this confirmation.
|
||||
@@ -314,8 +325,8 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||
key_pair: &KeyPair,
|
||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
||||
let preprocesses = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses).1;
|
||||
let (participants, preprocesses) =
|
||||
threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses);
|
||||
let msg = set_keys_message(&self.spec.set(), key_pair);
|
||||
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
|
||||
}
|
||||
@@ -334,6 +345,8 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||
key_pair: &KeyPair,
|
||||
shares: HashMap<Participant, Vec<u8>>,
|
||||
) -> Result<[u8; 64], Participant> {
|
||||
assert_eq!(preprocesses.keys().collect::<HashSet<_>>(), shares.keys().collect::<HashSet<_>>());
|
||||
|
||||
let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares).1;
|
||||
|
||||
let machine = self
|
||||
|
||||
@@ -296,7 +296,7 @@ impl ReadWrite for Transaction {
|
||||
let mut confirmation_share = [0; 32];
|
||||
reader.read_exact(&mut confirmation_share)?;
|
||||
|
||||
let signed = Signed::read_without_nonce(reader, 0)?;
|
||||
let signed = Signed::read_without_nonce(reader, 1)?;
|
||||
|
||||
Ok(Transaction::DkgConfirmationShare { attempt, confirmation_share, signed })
|
||||
}
|
||||
@@ -446,11 +446,9 @@ impl TransactionTrait for Transaction {
|
||||
Transaction::DkgParticipation { signed, .. } => {
|
||||
TransactionKind::Signed(b"dkg".to_vec(), signed)
|
||||
}
|
||||
Transaction::DkgConfirmationNonces { attempt, signed, .. } => {
|
||||
TransactionKind::Signed((b"dkg_confirmation_nonces", attempt).encode(), signed)
|
||||
}
|
||||
Transaction::DkgConfirmationNonces { attempt, signed, .. } |
|
||||
Transaction::DkgConfirmationShare { attempt, signed, .. } => {
|
||||
TransactionKind::Signed((b"dkg_confirmation_share", attempt).encode(), signed)
|
||||
TransactionKind::Signed((b"dkg_confirmation", attempt).encode(), signed)
|
||||
}
|
||||
|
||||
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
|
||||
@@ -521,7 +519,10 @@ impl Transaction {
|
||||
|
||||
Transaction::DkgParticipation { .. } => 0,
|
||||
// Uses a nonce of 0 as it has an internal attempt counter we distinguish by
|
||||
Transaction::DkgConfirmationNonces { .. } | Transaction::DkgConfirmationShare { .. } => 0,
|
||||
Transaction::DkgConfirmationNonces { .. } => 0,
|
||||
// Uses a nonce of 1 due to internal attempt counter and due to following
|
||||
// DkgConfirmationNonces
|
||||
Transaction::DkgConfirmationShare { .. } => 1,
|
||||
|
||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||
|
||||
|
||||
Reference in New Issue
Block a user