2023-04-23 01:00:46 -04:00
|
|
|
use core::time::Duration;
|
2023-04-23 02:18:41 -04:00
|
|
|
use std::collections::HashMap;
|
2023-04-23 01:00:46 -04:00
|
|
|
|
|
|
|
|
use zeroize::Zeroizing;
|
|
|
|
|
use rand_core::{RngCore, OsRng};
|
|
|
|
|
|
2023-04-23 01:25:45 -04:00
|
|
|
use ciphersuite::{Ciphersuite, Ristretto};
|
2023-04-23 01:00:46 -04:00
|
|
|
use frost::Participant;
|
|
|
|
|
|
2023-05-08 22:20:51 -04:00
|
|
|
use tokio::{time::sleep, sync::mpsc};
|
2023-04-23 01:00:46 -04:00
|
|
|
|
|
|
|
|
use serai_db::MemDb;
|
|
|
|
|
|
|
|
|
|
use processor_messages::{
|
|
|
|
|
key_gen::{self, KeyGenId},
|
|
|
|
|
CoordinatorMessage,
|
|
|
|
|
};
|
|
|
|
|
|
2023-04-23 01:52:19 -04:00
|
|
|
use tributary::{Transaction as TransactionTrait, Tributary};
|
2023-04-23 01:00:46 -04:00
|
|
|
|
|
|
|
|
use crate::{
|
|
|
|
|
tributary::{TributaryDb, Transaction, TributarySpec, scanner::handle_new_blocks},
|
2023-07-18 01:53:51 -04:00
|
|
|
tests::{
|
2023-08-08 15:12:47 -04:00
|
|
|
MemProcessors, LocalP2p,
|
2023-07-18 01:53:51 -04:00
|
|
|
tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion},
|
|
|
|
|
},
|
2023-04-23 01:00:46 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
2023-04-23 02:18:41 -04:00
|
|
|
async fn dkg_test() {
|
2023-04-23 01:00:46 -04:00
|
|
|
let keys = new_keys(&mut OsRng);
|
|
|
|
|
let spec = new_spec(&mut OsRng, &keys);
|
|
|
|
|
|
2023-04-23 16:56:23 -04:00
|
|
|
let tributaries = new_tributaries(&keys, &spec).await;
|
2023-04-23 01:00:46 -04:00
|
|
|
|
|
|
|
|
// Run the tributaries in the background
|
|
|
|
|
tokio::spawn(run_tributaries(tributaries.clone()));
|
|
|
|
|
|
|
|
|
|
let mut txs = vec![];
|
|
|
|
|
// Create DKG commitments for each key
|
|
|
|
|
for key in &keys {
|
|
|
|
|
let attempt = 0;
|
|
|
|
|
let mut commitments = vec![0; 256];
|
|
|
|
|
OsRng.fill_bytes(&mut commitments);
|
|
|
|
|
|
2023-04-23 02:18:41 -04:00
|
|
|
let mut tx = Transaction::DkgCommitments(attempt, commitments, Transaction::empty_signed());
|
2023-04-23 01:25:45 -04:00
|
|
|
tx.sign(&mut OsRng, spec.genesis(), key, 0);
|
|
|
|
|
txs.push(tx);
|
2023-04-23 01:00:46 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-23 23:15:15 -04:00
|
|
|
let block_before_tx = tributaries[0].1.tip().await;
|
2023-04-23 01:00:46 -04:00
|
|
|
|
|
|
|
|
// Publish all commitments but one
|
|
|
|
|
for (i, tx) in txs.iter().enumerate().skip(1) {
|
|
|
|
|
assert!(tributaries[i].1.add_transaction(tx.clone()).await);
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-23 01:52:19 -04:00
|
|
|
// Wait until these are included
|
|
|
|
|
for tx in txs.iter().skip(1) {
|
|
|
|
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
2023-04-23 01:00:46 -04:00
|
|
|
}
|
|
|
|
|
|
2023-08-13 02:21:56 -04:00
|
|
|
let expected_commitments: HashMap<_, _> = txs
|
|
|
|
|
.iter()
|
|
|
|
|
.enumerate()
|
|
|
|
|
.map(|(i, tx)| {
|
|
|
|
|
if let Transaction::DkgCommitments(_, commitments, _) = tx {
|
|
|
|
|
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments.clone())
|
|
|
|
|
} else {
|
|
|
|
|
panic!("txs had non-commitments");
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
2023-04-23 01:00:46 -04:00
|
|
|
|
2023-05-09 23:44:41 -04:00
|
|
|
async fn new_processors(
|
2023-04-23 01:00:46 -04:00
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
|
spec: &TributarySpec,
|
|
|
|
|
tributary: &Tributary<MemDb, Transaction, LocalP2p>,
|
2023-05-09 23:44:41 -04:00
|
|
|
) -> (TributaryDb<MemDb>, MemProcessors) {
|
2023-04-23 01:00:46 -04:00
|
|
|
let mut scanner_db = TributaryDb(MemDb::new());
|
2023-05-09 23:44:41 -04:00
|
|
|
let processors = MemProcessors::new();
|
2023-05-08 22:20:51 -04:00
|
|
|
// Uses a brand new channel since this channel won't be used within this test
|
|
|
|
|
handle_new_blocks(
|
|
|
|
|
&mut scanner_db,
|
|
|
|
|
key,
|
|
|
|
|
&mpsc::unbounded_channel().0,
|
2023-05-09 23:44:41 -04:00
|
|
|
&processors,
|
2023-05-08 22:20:51 -04:00
|
|
|
spec,
|
|
|
|
|
&tributary.reader(),
|
|
|
|
|
)
|
|
|
|
|
.await;
|
2023-05-09 23:44:41 -04:00
|
|
|
(scanner_db, processors)
|
2023-04-23 01:00:46 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-23 02:18:41 -04:00
|
|
|
// Instantiate a scanner and verify it has nothing to report
|
2023-05-09 23:44:41 -04:00
|
|
|
let (mut scanner_db, processors) = new_processors(&keys[0], &spec, &tributaries[0].1).await;
|
|
|
|
|
assert!(processors.0.read().await.is_empty());
|
2023-04-23 02:18:41 -04:00
|
|
|
|
|
|
|
|
// Publish the last commitment
|
2023-04-23 23:15:15 -04:00
|
|
|
let block_before_tx = tributaries[0].1.tip().await;
|
2023-04-23 02:18:41 -04:00
|
|
|
assert!(tributaries[0].1.add_transaction(txs[0].clone()).await);
|
2023-04-23 03:48:50 -04:00
|
|
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
2023-04-23 02:18:41 -04:00
|
|
|
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
|
|
|
|
|
|
|
|
|
// Verify the scanner emits a KeyGen::Commitments message
|
2023-05-08 22:20:51 -04:00
|
|
|
handle_new_blocks(
|
|
|
|
|
&mut scanner_db,
|
|
|
|
|
&keys[0],
|
|
|
|
|
&mpsc::unbounded_channel().0,
|
2023-05-09 23:44:41 -04:00
|
|
|
&processors,
|
2023-05-08 22:20:51 -04:00
|
|
|
&spec,
|
|
|
|
|
&tributaries[0].1.reader(),
|
|
|
|
|
)
|
|
|
|
|
.await;
|
2023-04-23 01:00:46 -04:00
|
|
|
{
|
2023-05-09 23:44:41 -04:00
|
|
|
let mut msgs = processors.0.write().await;
|
|
|
|
|
assert_eq!(msgs.len(), 1);
|
|
|
|
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
2023-08-13 02:21:56 -04:00
|
|
|
let mut expected_commitments = expected_commitments.clone();
|
|
|
|
|
expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap());
|
|
|
|
|
assert_eq!(
|
|
|
|
|
msgs.pop_front().unwrap(),
|
|
|
|
|
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
|
|
|
|
id: KeyGenId { set: spec.set(), attempt: 0 },
|
|
|
|
|
commitments: expected_commitments
|
|
|
|
|
})
|
|
|
|
|
);
|
2023-04-23 01:00:46 -04:00
|
|
|
assert!(msgs.is_empty());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Verify all keys exhibit this scanner behavior
|
|
|
|
|
for (i, key) in keys.iter().enumerate() {
|
2023-05-09 23:44:41 -04:00
|
|
|
let (_, processors) = new_processors(key, &spec, &tributaries[i].1).await;
|
|
|
|
|
let mut msgs = processors.0.write().await;
|
|
|
|
|
assert_eq!(msgs.len(), 1);
|
|
|
|
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
2023-08-13 02:21:56 -04:00
|
|
|
let mut expected_commitments = expected_commitments.clone();
|
|
|
|
|
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
|
|
|
|
assert_eq!(
|
|
|
|
|
msgs.pop_front().unwrap(),
|
|
|
|
|
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
|
|
|
|
id: KeyGenId { set: spec.set(), attempt: 0 },
|
|
|
|
|
commitments: expected_commitments
|
|
|
|
|
})
|
|
|
|
|
);
|
2023-04-23 02:18:41 -04:00
|
|
|
assert!(msgs.is_empty());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Now do shares
|
|
|
|
|
let mut txs = vec![];
|
2023-08-13 02:21:56 -04:00
|
|
|
for (k, key) in keys.iter().enumerate() {
|
2023-04-23 02:18:41 -04:00
|
|
|
let attempt = 0;
|
|
|
|
|
|
|
|
|
|
let mut shares = HashMap::new();
|
|
|
|
|
for i in 0 .. keys.len() {
|
2023-08-13 02:21:56 -04:00
|
|
|
if i != k {
|
|
|
|
|
let mut share = vec![0; 256];
|
|
|
|
|
OsRng.fill_bytes(&mut share);
|
|
|
|
|
shares.insert(Participant::new((i + 1).try_into().unwrap()).unwrap(), share);
|
|
|
|
|
}
|
2023-04-23 02:18:41 -04:00
|
|
|
}
|
|
|
|
|
|
2023-08-13 02:21:56 -04:00
|
|
|
let mut tx = Transaction::DkgShares(
|
|
|
|
|
attempt,
|
|
|
|
|
Participant::new((k + 1).try_into().unwrap()).unwrap(),
|
|
|
|
|
shares,
|
|
|
|
|
Transaction::empty_signed(),
|
|
|
|
|
);
|
2023-04-23 02:18:41 -04:00
|
|
|
tx.sign(&mut OsRng, spec.genesis(), key, 1);
|
|
|
|
|
txs.push(tx);
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-23 23:15:15 -04:00
|
|
|
let block_before_tx = tributaries[0].1.tip().await;
|
2023-04-23 02:18:41 -04:00
|
|
|
for (i, tx) in txs.iter().enumerate().skip(1) {
|
|
|
|
|
assert!(tributaries[i].1.add_transaction(tx.clone()).await);
|
|
|
|
|
}
|
|
|
|
|
for tx in txs.iter().skip(1) {
|
|
|
|
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// With just 4 sets of shares, nothing should happen yet
|
2023-05-08 22:20:51 -04:00
|
|
|
handle_new_blocks(
|
|
|
|
|
&mut scanner_db,
|
|
|
|
|
&keys[0],
|
|
|
|
|
&mpsc::unbounded_channel().0,
|
2023-05-09 23:44:41 -04:00
|
|
|
&processors,
|
2023-05-08 22:20:51 -04:00
|
|
|
&spec,
|
|
|
|
|
&tributaries[0].1.reader(),
|
|
|
|
|
)
|
|
|
|
|
.await;
|
2023-05-09 23:44:41 -04:00
|
|
|
assert_eq!(processors.0.read().await.len(), 1);
|
|
|
|
|
assert!(processors.0.read().await[&spec.set().network].is_empty());
|
2023-04-23 02:18:41 -04:00
|
|
|
|
|
|
|
|
// Publish the final set of shares
|
2023-04-23 23:15:15 -04:00
|
|
|
let block_before_tx = tributaries[0].1.tip().await;
|
2023-04-23 02:18:41 -04:00
|
|
|
assert!(tributaries[0].1.add_transaction(txs[0].clone()).await);
|
2023-04-23 03:48:50 -04:00
|
|
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
2023-04-23 02:18:41 -04:00
|
|
|
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
|
|
|
|
|
|
|
|
|
// Each scanner should emit a distinct shares message
|
|
|
|
|
let shares_for = |i: usize| {
|
|
|
|
|
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
|
|
|
|
|
id: KeyGenId { set: spec.set(), attempt: 0 },
|
|
|
|
|
shares: txs
|
|
|
|
|
.iter()
|
|
|
|
|
.enumerate()
|
2023-08-13 02:21:56 -04:00
|
|
|
.filter_map(|(l, tx)| {
|
|
|
|
|
if let Transaction::DkgShares(_, _, shares, _) = tx {
|
|
|
|
|
shares
|
|
|
|
|
.get(&Participant::new((i + 1).try_into().unwrap()).unwrap())
|
|
|
|
|
.cloned()
|
|
|
|
|
.map(|share| (Participant::new((l + 1).try_into().unwrap()).unwrap(), share))
|
2023-04-23 02:18:41 -04:00
|
|
|
} else {
|
|
|
|
|
panic!("txs had non-shares");
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
.collect::<HashMap<_, _>>(),
|
|
|
|
|
})
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Any scanner which has handled the prior blocks should only emit the new event
|
2023-05-08 22:20:51 -04:00
|
|
|
handle_new_blocks(
|
|
|
|
|
&mut scanner_db,
|
|
|
|
|
&keys[0],
|
|
|
|
|
&mpsc::unbounded_channel().0,
|
2023-05-09 23:44:41 -04:00
|
|
|
&processors,
|
2023-05-08 22:20:51 -04:00
|
|
|
&spec,
|
|
|
|
|
&tributaries[0].1.reader(),
|
|
|
|
|
)
|
|
|
|
|
.await;
|
2023-04-23 02:18:41 -04:00
|
|
|
{
|
2023-05-09 23:44:41 -04:00
|
|
|
let mut msgs = processors.0.write().await;
|
|
|
|
|
assert_eq!(msgs.len(), 1);
|
|
|
|
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
2023-04-23 02:18:41 -04:00
|
|
|
assert_eq!(msgs.pop_front().unwrap(), shares_for(0));
|
|
|
|
|
assert!(msgs.is_empty());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Yet new scanners should emit all events
|
|
|
|
|
for (i, key) in keys.iter().enumerate() {
|
2023-05-09 23:44:41 -04:00
|
|
|
let (_, processors) = new_processors(key, &spec, &tributaries[i].1).await;
|
|
|
|
|
let mut msgs = processors.0.write().await;
|
|
|
|
|
assert_eq!(msgs.len(), 1);
|
|
|
|
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
2023-08-13 02:21:56 -04:00
|
|
|
let mut expected_commitments = expected_commitments.clone();
|
|
|
|
|
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
|
|
|
|
assert_eq!(
|
|
|
|
|
msgs.pop_front().unwrap(),
|
|
|
|
|
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
|
|
|
|
id: KeyGenId { set: spec.set(), attempt: 0 },
|
|
|
|
|
commitments: expected_commitments
|
|
|
|
|
})
|
|
|
|
|
);
|
2023-04-23 02:18:41 -04:00
|
|
|
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
2023-04-23 01:00:46 -04:00
|
|
|
assert!(msgs.is_empty());
|
|
|
|
|
}
|
|
|
|
|
}
|