2023-04-23 01:00:46 -04:00
|
|
|
use core::time::Duration;
|
2023-04-23 02:18:41 -04:00
|
|
|
use std::collections::HashMap;
|
2023-04-23 01:00:46 -04:00
|
|
|
|
|
|
|
|
use zeroize::Zeroizing;
|
|
|
|
|
use rand_core::{RngCore, OsRng};
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
use scale::Decode;
|
|
|
|
|
|
|
|
|
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
2023-04-23 01:00:46 -04:00
|
|
|
use frost::Participant;
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
use sp_runtime::traits::Verify;
|
2023-11-25 04:01:11 -05:00
|
|
|
use serai_client::validator_sets::primitives::KeyPair;
|
2023-08-14 06:08:55 -04:00
|
|
|
|
2023-08-24 21:55:59 -04:00
|
|
|
use tokio::time::sleep;
|
2023-04-23 01:00:46 -04:00
|
|
|
|
2023-11-22 13:17:51 +04:00
|
|
|
use serai_db::{Db, MemDb, DbTxn};
|
2023-04-23 01:00:46 -04:00
|
|
|
|
|
|
|
|
use processor_messages::{
|
|
|
|
|
key_gen::{self, KeyGenId},
|
|
|
|
|
CoordinatorMessage,
|
|
|
|
|
};
|
|
|
|
|
|
Slash malevolent validators (#294)
* add slash tx
* ignore unsigned tx replays
* verify that provided evidence is valid
* fix clippy + fmt
* move application tx handling to another module
* partially handle the tendermint txs
* fix pr comments
* support unsigned app txs
* add slash target to the votes
* enforce provided, unsigned, signed tx ordering within a block
* bug fixes
* add unit test for tendermint txs
* bug fixes
* update tests for tendermint txs
* add tx ordering test
* tidy up tx ordering test
* cargo +nightly fmt
* Misc fixes from rebasing
* Finish resolving clippy
* Remove sha3 from tendermint-machine
* Resolve a DoS in SlashEvidence's read
Also moves Evidence from Vec<Message> to (Message, Option<Message>). That
should meet all requirements while being a bit safer.
* Make lazy_static a dev-depend for tributary
* Various small tweaks
One use of sort was inefficient, sorting unsigned || signed when unsigned was
already properly sorted. Given how the unsigned TXs were given a nonce of 0, an
unstable sort may swap places with an unsigned TX and a signed TX with a nonce
of 0 (leading to a faulty block).
The extra protection added here sorts signed, then concats.
* Fix Tributary tests I broke, start review on tendermint/tx.rs
* Finish reviewing everything outside tests and empty_signature
* Remove empty_signature
empty_signature led to corrupted local state histories. Unfortunately, the API
is only sane with a signature.
We now use the actual signature, which risks creating a signature over a
malicious message if we have ever have an invariant producing malicious
messages. Prior, we only signed the message after the local machine confirmed
it was okay per the local view of consensus.
This is tolerated/preferred over a corrupt state history since production of
such messages is already an invariant. TODOs are added to make handling of this
theoretical invariant further robust.
* Remove async_sequential for tokio::test
There was no competition for resources forcing them to be run sequentially.
* Modify block order test to be statistically significant without multiple runs
* Clean tests
---------
Co-authored-by: Luke Parker <lukeparker5132@gmail.com>
2023-08-21 07:28:23 +03:00
|
|
|
use tributary::{TransactionTrait, Tributary};
|
2023-04-23 01:00:46 -04:00
|
|
|
|
|
|
|
|
use crate::{
|
2023-11-22 13:17:51 +04:00
|
|
|
tributary::{Transaction, TributarySpec, scanner::handle_new_blocks},
|
2023-07-18 01:53:51 -04:00
|
|
|
tests::{
|
2023-08-08 15:12:47 -04:00
|
|
|
MemProcessors, LocalP2p,
|
2023-07-18 01:53:51 -04:00
|
|
|
tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion},
|
|
|
|
|
},
|
2023-04-23 01:00:46 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
2023-04-23 02:18:41 -04:00
|
|
|
async fn dkg_test() {
|
2023-04-23 01:00:46 -04:00
|
|
|
let keys = new_keys(&mut OsRng);
|
|
|
|
|
let spec = new_spec(&mut OsRng, &keys);
|
|
|
|
|
|
2023-04-23 16:56:23 -04:00
|
|
|
let tributaries = new_tributaries(&keys, &spec).await;
|
2023-04-23 01:00:46 -04:00
|
|
|
|
|
|
|
|
// Run the tributaries in the background
|
|
|
|
|
tokio::spawn(run_tributaries(tributaries.clone()));
|
|
|
|
|
|
|
|
|
|
let mut txs = vec![];
|
|
|
|
|
// Create DKG commitments for each key
|
|
|
|
|
for key in &keys {
|
|
|
|
|
let attempt = 0;
|
|
|
|
|
let mut commitments = vec![0; 256];
|
|
|
|
|
OsRng.fill_bytes(&mut commitments);
|
|
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
let mut tx =
|
|
|
|
|
Transaction::DkgCommitments(attempt, vec![commitments], Transaction::empty_signed());
|
2023-04-23 01:25:45 -04:00
|
|
|
tx.sign(&mut OsRng, spec.genesis(), key, 0);
|
|
|
|
|
txs.push(tx);
|
2023-04-23 01:00:46 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-23 23:15:15 -04:00
|
|
|
let block_before_tx = tributaries[0].1.tip().await;
|
2023-04-23 01:00:46 -04:00
|
|
|
|
|
|
|
|
// Publish all commitments but one
|
|
|
|
|
for (i, tx) in txs.iter().enumerate().skip(1) {
|
2023-10-14 21:50:11 -04:00
|
|
|
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
2023-04-23 01:00:46 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-23 01:52:19 -04:00
|
|
|
// Wait until these are included
|
|
|
|
|
for tx in txs.iter().skip(1) {
|
|
|
|
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
2023-04-23 01:00:46 -04:00
|
|
|
}
|
|
|
|
|
|
2023-08-13 02:21:56 -04:00
|
|
|
let expected_commitments: HashMap<_, _> = txs
|
|
|
|
|
.iter()
|
|
|
|
|
.enumerate()
|
|
|
|
|
.map(|(i, tx)| {
|
|
|
|
|
if let Transaction::DkgCommitments(_, commitments, _) = tx {
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())
|
2023-08-13 02:21:56 -04:00
|
|
|
} else {
|
|
|
|
|
panic!("txs had non-commitments");
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
2023-04-23 01:00:46 -04:00
|
|
|
|
2023-05-09 23:44:41 -04:00
|
|
|
async fn new_processors(
|
2023-04-23 01:00:46 -04:00
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
|
spec: &TributarySpec,
|
|
|
|
|
tributary: &Tributary<MemDb, Transaction, LocalP2p>,
|
2023-11-22 13:17:51 +04:00
|
|
|
) -> (MemDb, MemProcessors) {
|
|
|
|
|
let mut scanner_db = MemDb::new();
|
2023-05-09 23:44:41 -04:00
|
|
|
let processors = MemProcessors::new();
|
2023-08-24 21:55:59 -04:00
|
|
|
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
2023-05-08 22:20:51 -04:00
|
|
|
&mut scanner_db,
|
|
|
|
|
key,
|
2023-09-25 15:42:39 -04:00
|
|
|
|_, _, _, _, _| async {
|
2023-08-24 21:55:59 -04:00
|
|
|
panic!("provided TX caused recognized_id to be called in new_processors")
|
|
|
|
|
},
|
2023-05-09 23:44:41 -04:00
|
|
|
&processors,
|
2023-08-14 06:08:55 -04:00
|
|
|
|_, _| async { panic!("test tried to publish a new Serai TX in new_processors") },
|
2023-05-08 22:20:51 -04:00
|
|
|
spec,
|
|
|
|
|
&tributary.reader(),
|
|
|
|
|
)
|
|
|
|
|
.await;
|
2023-05-09 23:44:41 -04:00
|
|
|
(scanner_db, processors)
|
2023-04-23 01:00:46 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-23 02:18:41 -04:00
|
|
|
// Instantiate a scanner and verify it has nothing to report
|
2023-05-09 23:44:41 -04:00
|
|
|
let (mut scanner_db, processors) = new_processors(&keys[0], &spec, &tributaries[0].1).await;
|
|
|
|
|
assert!(processors.0.read().await.is_empty());
|
2023-04-23 02:18:41 -04:00
|
|
|
|
|
|
|
|
// Publish the last commitment
|
2023-04-23 23:15:15 -04:00
|
|
|
let block_before_tx = tributaries[0].1.tip().await;
|
2023-10-14 21:50:11 -04:00
|
|
|
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
2023-04-23 03:48:50 -04:00
|
|
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
2023-04-23 02:18:41 -04:00
|
|
|
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
|
|
|
|
|
|
|
|
|
// Verify the scanner emits a KeyGen::Commitments message
|
2023-08-24 21:55:59 -04:00
|
|
|
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
2023-05-08 22:20:51 -04:00
|
|
|
&mut scanner_db,
|
|
|
|
|
&keys[0],
|
2023-09-25 15:42:39 -04:00
|
|
|
|_, _, _, _, _| async {
|
2023-08-24 21:55:59 -04:00
|
|
|
panic!("provided TX caused recognized_id to be called after Commitments")
|
|
|
|
|
},
|
2023-05-09 23:44:41 -04:00
|
|
|
&processors,
|
2023-08-14 06:08:55 -04:00
|
|
|
|_, _| async { panic!("test tried to publish a new Serai TX after Commitments") },
|
2023-05-08 22:20:51 -04:00
|
|
|
&spec,
|
|
|
|
|
&tributaries[0].1.reader(),
|
|
|
|
|
)
|
|
|
|
|
.await;
|
2023-04-23 01:00:46 -04:00
|
|
|
{
|
2023-05-09 23:44:41 -04:00
|
|
|
let mut msgs = processors.0.write().await;
|
|
|
|
|
assert_eq!(msgs.len(), 1);
|
|
|
|
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
2023-08-13 02:21:56 -04:00
|
|
|
let mut expected_commitments = expected_commitments.clone();
|
|
|
|
|
expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap());
|
|
|
|
|
assert_eq!(
|
|
|
|
|
msgs.pop_front().unwrap(),
|
|
|
|
|
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
2023-11-26 12:14:23 -05:00
|
|
|
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
2023-08-13 02:21:56 -04:00
|
|
|
commitments: expected_commitments
|
|
|
|
|
})
|
|
|
|
|
);
|
2023-04-23 01:00:46 -04:00
|
|
|
assert!(msgs.is_empty());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Verify all keys exhibit this scanner behavior
|
|
|
|
|
for (i, key) in keys.iter().enumerate() {
|
2023-05-09 23:44:41 -04:00
|
|
|
let (_, processors) = new_processors(key, &spec, &tributaries[i].1).await;
|
|
|
|
|
let mut msgs = processors.0.write().await;
|
|
|
|
|
assert_eq!(msgs.len(), 1);
|
|
|
|
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
2023-08-13 02:21:56 -04:00
|
|
|
let mut expected_commitments = expected_commitments.clone();
|
|
|
|
|
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
|
|
|
|
assert_eq!(
|
|
|
|
|
msgs.pop_front().unwrap(),
|
|
|
|
|
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
2023-11-26 12:14:23 -05:00
|
|
|
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
2023-08-13 02:21:56 -04:00
|
|
|
commitments: expected_commitments
|
|
|
|
|
})
|
|
|
|
|
);
|
2023-04-23 02:18:41 -04:00
|
|
|
assert!(msgs.is_empty());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Now do shares
|
|
|
|
|
let mut txs = vec![];
|
2023-08-13 02:21:56 -04:00
|
|
|
for (k, key) in keys.iter().enumerate() {
|
2023-04-23 02:18:41 -04:00
|
|
|
let attempt = 0;
|
|
|
|
|
|
2023-11-15 22:49:58 -05:00
|
|
|
let mut shares = vec![vec![]];
|
2023-04-23 02:18:41 -04:00
|
|
|
for i in 0 .. keys.len() {
|
2023-08-13 02:21:56 -04:00
|
|
|
if i != k {
|
|
|
|
|
let mut share = vec![0; 256];
|
|
|
|
|
OsRng.fill_bytes(&mut share);
|
2023-11-15 22:49:58 -05:00
|
|
|
shares.last_mut().unwrap().push(share);
|
2023-08-13 02:21:56 -04:00
|
|
|
}
|
2023-04-23 02:18:41 -04:00
|
|
|
}
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
let mut tx = Transaction::DkgShares {
|
2023-08-13 02:21:56 -04:00
|
|
|
attempt,
|
|
|
|
|
shares,
|
2023-09-01 00:16:43 -04:00
|
|
|
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, 0),
|
2023-08-14 06:08:55 -04:00
|
|
|
signed: Transaction::empty_signed(),
|
|
|
|
|
};
|
2023-04-23 02:18:41 -04:00
|
|
|
tx.sign(&mut OsRng, spec.genesis(), key, 1);
|
|
|
|
|
txs.push(tx);
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-23 23:15:15 -04:00
|
|
|
let block_before_tx = tributaries[0].1.tip().await;
|
2023-04-23 02:18:41 -04:00
|
|
|
for (i, tx) in txs.iter().enumerate().skip(1) {
|
2023-10-14 21:50:11 -04:00
|
|
|
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
2023-04-23 02:18:41 -04:00
|
|
|
}
|
|
|
|
|
for tx in txs.iter().skip(1) {
|
|
|
|
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// With just 4 sets of shares, nothing should happen yet
|
2023-08-24 21:55:59 -04:00
|
|
|
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
2023-05-08 22:20:51 -04:00
|
|
|
&mut scanner_db,
|
|
|
|
|
&keys[0],
|
2023-09-25 15:42:39 -04:00
|
|
|
|_, _, _, _, _| async {
|
2023-08-24 21:55:59 -04:00
|
|
|
panic!("provided TX caused recognized_id to be called after some shares")
|
|
|
|
|
},
|
2023-05-09 23:44:41 -04:00
|
|
|
&processors,
|
2023-08-14 06:08:55 -04:00
|
|
|
|_, _| async { panic!("test tried to publish a new Serai TX after some shares") },
|
2023-05-08 22:20:51 -04:00
|
|
|
&spec,
|
|
|
|
|
&tributaries[0].1.reader(),
|
|
|
|
|
)
|
|
|
|
|
.await;
|
2023-05-09 23:44:41 -04:00
|
|
|
assert_eq!(processors.0.read().await.len(), 1);
|
|
|
|
|
assert!(processors.0.read().await[&spec.set().network].is_empty());
|
2023-04-23 02:18:41 -04:00
|
|
|
|
|
|
|
|
// Publish the final set of shares
|
2023-04-23 23:15:15 -04:00
|
|
|
let block_before_tx = tributaries[0].1.tip().await;
|
2023-10-14 21:50:11 -04:00
|
|
|
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
2023-04-23 03:48:50 -04:00
|
|
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
2023-04-23 02:18:41 -04:00
|
|
|
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
|
|
|
|
|
|
|
|
|
// Each scanner should emit a distinct shares message
|
|
|
|
|
let shares_for = |i: usize| {
|
|
|
|
|
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
|
2023-11-26 12:14:23 -05:00
|
|
|
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
shares: vec![txs
|
2023-04-23 02:18:41 -04:00
|
|
|
.iter()
|
|
|
|
|
.enumerate()
|
2023-08-13 02:21:56 -04:00
|
|
|
.filter_map(|(l, tx)| {
|
2023-08-14 06:08:55 -04:00
|
|
|
if let Transaction::DkgShares { shares, .. } = tx {
|
2023-09-01 00:03:53 -04:00
|
|
|
if i == l {
|
|
|
|
|
None
|
|
|
|
|
} else {
|
|
|
|
|
let relative_i = i - (if i > l { 1 } else { 0 });
|
|
|
|
|
Some((
|
|
|
|
|
Participant::new((l + 1).try_into().unwrap()).unwrap(),
|
2023-11-15 22:49:58 -05:00
|
|
|
shares[0][relative_i].clone(),
|
2023-09-01 00:03:53 -04:00
|
|
|
))
|
|
|
|
|
}
|
2023-04-23 02:18:41 -04:00
|
|
|
} else {
|
|
|
|
|
panic!("txs had non-shares");
|
|
|
|
|
}
|
|
|
|
|
})
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
.collect::<HashMap<_, _>>()],
|
2023-04-23 02:18:41 -04:00
|
|
|
})
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Any scanner which has handled the prior blocks should only emit the new event
|
2023-08-24 21:55:59 -04:00
|
|
|
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
2023-05-08 22:20:51 -04:00
|
|
|
&mut scanner_db,
|
|
|
|
|
&keys[0],
|
2023-09-25 15:42:39 -04:00
|
|
|
|_, _, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
|
2023-05-09 23:44:41 -04:00
|
|
|
&processors,
|
2023-08-14 06:08:55 -04:00
|
|
|
|_, _| async { panic!("test tried to publish a new Serai TX") },
|
2023-05-08 22:20:51 -04:00
|
|
|
&spec,
|
|
|
|
|
&tributaries[0].1.reader(),
|
|
|
|
|
)
|
|
|
|
|
.await;
|
2023-04-23 02:18:41 -04:00
|
|
|
{
|
2023-05-09 23:44:41 -04:00
|
|
|
let mut msgs = processors.0.write().await;
|
|
|
|
|
assert_eq!(msgs.len(), 1);
|
|
|
|
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
2023-04-23 02:18:41 -04:00
|
|
|
assert_eq!(msgs.pop_front().unwrap(), shares_for(0));
|
|
|
|
|
assert!(msgs.is_empty());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Yet new scanners should emit all events
|
|
|
|
|
for (i, key) in keys.iter().enumerate() {
|
2023-05-09 23:44:41 -04:00
|
|
|
let (_, processors) = new_processors(key, &spec, &tributaries[i].1).await;
|
|
|
|
|
let mut msgs = processors.0.write().await;
|
|
|
|
|
assert_eq!(msgs.len(), 1);
|
|
|
|
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
2023-08-13 02:21:56 -04:00
|
|
|
let mut expected_commitments = expected_commitments.clone();
|
|
|
|
|
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
|
|
|
|
assert_eq!(
|
|
|
|
|
msgs.pop_front().unwrap(),
|
|
|
|
|
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
2023-11-26 12:14:23 -05:00
|
|
|
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
2023-08-13 02:21:56 -04:00
|
|
|
commitments: expected_commitments
|
|
|
|
|
})
|
|
|
|
|
);
|
2023-04-23 02:18:41 -04:00
|
|
|
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
2023-04-23 01:00:46 -04:00
|
|
|
assert!(msgs.is_empty());
|
|
|
|
|
}
|
2023-08-14 06:08:55 -04:00
|
|
|
|
|
|
|
|
// Send DkgConfirmed
|
|
|
|
|
let mut substrate_key = [0; 32];
|
|
|
|
|
OsRng.fill_bytes(&mut substrate_key);
|
|
|
|
|
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
|
|
|
|
|
OsRng.fill_bytes(&mut network_key);
|
2023-11-25 04:01:11 -05:00
|
|
|
let key_pair = KeyPair(serai_client::Public(substrate_key), network_key.try_into().unwrap());
|
2023-08-14 06:08:55 -04:00
|
|
|
|
|
|
|
|
let mut txs = vec![];
|
2023-10-24 04:34:47 -04:00
|
|
|
for (i, key) in keys.iter().enumerate() {
|
2023-08-14 06:08:55 -04:00
|
|
|
let attempt = 0;
|
2023-10-24 04:34:47 -04:00
|
|
|
let mut scanner_db = &mut scanner_db;
|
|
|
|
|
let (mut local_scanner_db, _) = new_processors(key, &spec, &tributaries[0].1).await;
|
|
|
|
|
if i != 0 {
|
|
|
|
|
scanner_db = &mut local_scanner_db;
|
|
|
|
|
}
|
2023-11-22 13:17:51 +04:00
|
|
|
let mut txn = scanner_db.txn();
|
2023-08-14 06:08:55 -04:00
|
|
|
let share =
|
2023-09-01 00:16:43 -04:00
|
|
|
crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();
|
2023-08-14 06:08:55 -04:00
|
|
|
txn.commit();
|
|
|
|
|
|
|
|
|
|
let mut tx = Transaction::DkgConfirmed(attempt, share, Transaction::empty_signed());
|
|
|
|
|
tx.sign(&mut OsRng, spec.genesis(), key, 2);
|
|
|
|
|
txs.push(tx);
|
|
|
|
|
}
|
|
|
|
|
let block_before_tx = tributaries[0].1.tip().await;
|
|
|
|
|
for (i, tx) in txs.iter().enumerate() {
|
2023-10-14 21:50:11 -04:00
|
|
|
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
2023-08-14 06:08:55 -04:00
|
|
|
}
|
|
|
|
|
for tx in txs.iter() {
|
|
|
|
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The scanner should successfully try to publish a transaction with a validly signed signature
|
2023-08-24 21:55:59 -04:00
|
|
|
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
2023-08-14 06:08:55 -04:00
|
|
|
&mut scanner_db,
|
|
|
|
|
&keys[0],
|
2023-09-25 15:42:39 -04:00
|
|
|
|_, _, _, _, _| async {
|
2023-08-24 21:55:59 -04:00
|
|
|
panic!("provided TX caused recognized_id to be called after DKG confirmation")
|
|
|
|
|
},
|
2023-08-14 06:08:55 -04:00
|
|
|
&processors,
|
|
|
|
|
|set, tx| {
|
|
|
|
|
let spec = spec.clone();
|
|
|
|
|
let key_pair = key_pair.clone();
|
|
|
|
|
async move {
|
|
|
|
|
// Version, Pallet, Call, Network, Key Pair, Signature
|
|
|
|
|
let expected_len = 1 + 1 + 1 + 1 + 32 + 1 + key_pair.1.len() + 64;
|
2023-11-23 11:45:00 -05:00
|
|
|
assert_eq!(tx.len(), expected_len);
|
2023-08-14 06:08:55 -04:00
|
|
|
|
|
|
|
|
// Version
|
2023-11-23 11:45:00 -05:00
|
|
|
assert_eq!(tx[0], 4);
|
2023-08-14 06:08:55 -04:00
|
|
|
|
|
|
|
|
// Call
|
2023-11-23 11:45:00 -05:00
|
|
|
let tx = serai_client::runtime::RuntimeCall::decode(&mut &tx[1 ..]).unwrap();
|
2023-08-14 06:08:55 -04:00
|
|
|
match tx {
|
2023-11-23 11:45:00 -05:00
|
|
|
serai_client::runtime::RuntimeCall::ValidatorSets(
|
|
|
|
|
serai_client::runtime::validator_sets::Call::set_keys {
|
|
|
|
|
network,
|
|
|
|
|
key_pair: set_key_pair,
|
|
|
|
|
signature,
|
|
|
|
|
},
|
|
|
|
|
) => {
|
2023-08-14 06:08:55 -04:00
|
|
|
assert_eq!(set, spec.set());
|
|
|
|
|
assert_eq!(set.network, network);
|
|
|
|
|
assert_eq!(key_pair, set_key_pair);
|
|
|
|
|
assert!(signature.verify(
|
|
|
|
|
&*serai_client::validator_sets::primitives::set_keys_message(&set, &key_pair),
|
|
|
|
|
&serai_client::Public(
|
|
|
|
|
frost::dkg::musig::musig_key::<Ristretto>(
|
|
|
|
|
&serai_client::validator_sets::primitives::musig_context(set),
|
|
|
|
|
&spec
|
|
|
|
|
.validators()
|
|
|
|
|
.into_iter()
|
|
|
|
|
.map(|(validator, _)| validator)
|
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
|
)
|
|
|
|
|
.unwrap()
|
|
|
|
|
.to_bytes()
|
|
|
|
|
),
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
_ => panic!("Serai TX wasn't to set_keys"),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
&spec,
|
|
|
|
|
&tributaries[0].1.reader(),
|
|
|
|
|
)
|
|
|
|
|
.await;
|
|
|
|
|
{
|
|
|
|
|
assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty());
|
|
|
|
|
}
|
2023-04-23 01:00:46 -04:00
|
|
|
}
|