2023-04-11 19:04:53 -04:00
|
|
|
use core::fmt::Debug;
|
|
|
|
|
|
|
|
|
|
use rand_core::{RngCore, OsRng};
|
|
|
|
|
|
|
|
|
|
use tributary::{ReadWrite, tests::random_signed};
|
|
|
|
|
|
2023-04-17 02:09:29 -04:00
|
|
|
use crate::tributary::{SignData, Transaction};
|
2023-04-11 19:04:53 -04:00
|
|
|
|
2023-04-22 10:49:52 -04:00
|
|
|
mod chain;
|
2023-04-22 22:27:12 -04:00
|
|
|
pub use chain::*;
|
|
|
|
|
|
|
|
|
|
mod tx;
|
2023-04-22 10:49:52 -04:00
|
|
|
|
2023-04-23 01:00:46 -04:00
|
|
|
mod dkg;
|
2023-04-23 03:48:50 -04:00
|
|
|
// TODO: Test the other transactions
|
2023-04-23 01:00:46 -04:00
|
|
|
|
2023-04-24 02:50:03 -04:00
|
|
|
mod handle_p2p;
|
|
|
|
|
mod sync;
|
|
|
|
|
|
2023-04-11 19:04:53 -04:00
|
|
|
fn random_u32<R: RngCore>(rng: &mut R) -> u32 {
|
|
|
|
|
u32::try_from(rng.next_u64() >> 32).unwrap()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn random_vec<R: RngCore>(rng: &mut R, limit: usize) -> Vec<u8> {
|
|
|
|
|
let len = usize::try_from(rng.next_u64() % u64::try_from(limit).unwrap()).unwrap();
|
|
|
|
|
let mut res = vec![0; len];
|
|
|
|
|
rng.fill_bytes(&mut res);
|
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
|
2023-11-06 19:50:32 -05:00
|
|
|
fn random_sign_data<R: RngCore, const N: usize>(rng: &mut R) -> SignData<N> {
|
|
|
|
|
let mut plan = [0; N];
|
2023-04-11 19:04:53 -04:00
|
|
|
rng.fill_bytes(&mut plan);
|
|
|
|
|
|
|
|
|
|
SignData {
|
|
|
|
|
plan,
|
|
|
|
|
attempt: random_u32(&mut OsRng),
|
|
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
data: {
|
|
|
|
|
let mut res = vec![];
|
|
|
|
|
for _ in 0 .. ((rng.next_u64() % 255) + 1) {
|
|
|
|
|
res.push(random_vec(&mut OsRng, 512));
|
|
|
|
|
}
|
|
|
|
|
res
|
|
|
|
|
},
|
2023-04-11 19:04:53 -04:00
|
|
|
|
|
|
|
|
signed: random_signed(&mut OsRng),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn test_read_write<RW: Eq + Debug + ReadWrite>(value: RW) {
|
|
|
|
|
assert_eq!(value, RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap());
|
|
|
|
|
}
|
|
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
#[test]
|
|
|
|
|
fn tx_size_limit() {
|
|
|
|
|
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, MAX_KEY_LEN};
|
|
|
|
|
|
|
|
|
|
use tributary::TRANSACTION_SIZE_LIMIT;
|
|
|
|
|
|
|
|
|
|
let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1;
|
|
|
|
|
let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients;
|
|
|
|
|
// Handwave the DKG Commitments size as the size of the commitments to the coefficients and
|
|
|
|
|
// 1024 bytes for all overhead
|
|
|
|
|
let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024;
|
|
|
|
|
assert!(
|
|
|
|
|
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
|
|
|
|
(handwaved_dkg_commitments_size * max_key_shares_per_individual)
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Encryption key, PoP (2 elements), message
|
|
|
|
|
let elements_per_share = 4;
|
|
|
|
|
let handwaved_dkg_shares_size =
|
|
|
|
|
(elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024;
|
|
|
|
|
assert!(
|
|
|
|
|
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
|
|
|
|
(handwaved_dkg_shares_size * max_key_shares_per_individual)
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-11 19:04:53 -04:00
|
|
|
#[test]
|
|
|
|
|
fn serialize_sign_data() {
|
2023-11-06 19:50:32 -05:00
|
|
|
test_read_write(random_sign_data::<_, 3>(&mut OsRng));
|
|
|
|
|
test_read_write(random_sign_data::<_, 8>(&mut OsRng));
|
|
|
|
|
test_read_write(random_sign_data::<_, 16>(&mut OsRng));
|
|
|
|
|
test_read_write(random_sign_data::<_, 24>(&mut OsRng));
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn serialize_transaction() {
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
{
|
|
|
|
|
let mut commitments = vec![random_vec(&mut OsRng, 512)];
|
|
|
|
|
for _ in 0 .. (OsRng.next_u64() % 100) {
|
|
|
|
|
let mut temp = commitments[0].clone();
|
|
|
|
|
OsRng.fill_bytes(&mut temp);
|
|
|
|
|
commitments.push(temp);
|
|
|
|
|
}
|
|
|
|
|
test_read_write(Transaction::DkgCommitments(
|
|
|
|
|
random_u32(&mut OsRng),
|
|
|
|
|
commitments,
|
|
|
|
|
random_signed(&mut OsRng),
|
|
|
|
|
));
|
|
|
|
|
}
|
2023-04-11 19:04:53 -04:00
|
|
|
|
|
|
|
|
{
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
// This supports a variable share length, and variable amount of sent shares, yet share length
|
|
|
|
|
// and sent shares is expected to be constant among recipients
|
|
|
|
|
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap();
|
|
|
|
|
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap();
|
2023-09-01 00:03:53 -04:00
|
|
|
// Create a valid vec of shares
|
|
|
|
|
let mut shares = vec![];
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
// Create up to 150 participants
|
|
|
|
|
for _ in 0 .. ((OsRng.next_u64() % 150) + 1) {
|
|
|
|
|
// Give each sender multiple shares
|
|
|
|
|
let mut sender_shares = vec![];
|
|
|
|
|
for _ in 0 .. amount_of_shares {
|
|
|
|
|
let mut share = vec![0; share_len];
|
|
|
|
|
OsRng.fill_bytes(&mut share);
|
|
|
|
|
sender_shares.push(share);
|
|
|
|
|
}
|
|
|
|
|
shares.push(sender_shares);
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
test_read_write(Transaction::DkgShares {
|
|
|
|
|
attempt: random_u32(&mut OsRng),
|
2023-04-11 19:04:53 -04:00
|
|
|
shares,
|
2023-08-14 06:08:55 -04:00
|
|
|
confirmation_nonces: {
|
|
|
|
|
let mut nonces = [0; 64];
|
|
|
|
|
OsRng.fill_bytes(&mut nonces);
|
|
|
|
|
nonces
|
|
|
|
|
},
|
|
|
|
|
signed: random_signed(&mut OsRng),
|
|
|
|
|
});
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
test_read_write(Transaction::DkgConfirmed(
|
|
|
|
|
random_u32(&mut OsRng),
|
|
|
|
|
{
|
|
|
|
|
let mut share = [0; 32];
|
|
|
|
|
OsRng.fill_bytes(&mut share);
|
|
|
|
|
share
|
|
|
|
|
},
|
|
|
|
|
random_signed(&mut OsRng),
|
|
|
|
|
));
|
|
|
|
|
|
2023-04-20 14:24:49 -04:00
|
|
|
{
|
2023-08-31 23:04:37 -04:00
|
|
|
let mut block = [0; 32];
|
|
|
|
|
OsRng.fill_bytes(&mut block);
|
2023-11-06 19:50:32 -05:00
|
|
|
let mut batch = [0; 5];
|
Replace ExternalBlock with Batch
The initial TODO was simply to use one ExternalBlock per all batches in the
block. This would require publishing ExternalBlock after the last batch,
requiring knowing the last batch. While we could add such a pipeline, it'd
require:
1) Initial preprocesses using a distinct message from BatchPreprocess
2) An additional message sent after all BatchPreprocess are sent
Unfortunately, both would require tweaks to the SubstrateSigner which aren't
worth the complexity compared to the solution here, at least, not at this time.
While this will cause, if a Tributary is signing a block whose total batch data
exceeds 25 kB, to use multiple transactions which could be optimized out by
'better' local data pipelining, that's an extreme edge case. Given the temporal
nature of each Tributary, it's also an acceptable edge.
This does no longer achieve synchrony over external blocks accordingly. While
signed batches have synchrony, as they embed their block hash, batches being
signed don't have cryptographic synchrony on their contents. This means
validators who are eclipsed may produce invalid shares, as they sign a
different batch. This will be introduced in a follow-up commit.
2023-08-31 22:48:02 -04:00
|
|
|
OsRng.fill_bytes(&mut batch);
|
2023-08-31 23:04:37 -04:00
|
|
|
test_read_write(Transaction::Batch(block, batch));
|
2023-04-20 14:24:49 -04:00
|
|
|
}
|
2023-04-20 15:37:22 -04:00
|
|
|
test_read_write(Transaction::SubstrateBlock(OsRng.next_u64()));
|
2023-04-11 19:04:53 -04:00
|
|
|
|
|
|
|
|
test_read_write(Transaction::BatchPreprocess(random_sign_data(&mut OsRng)));
|
|
|
|
|
test_read_write(Transaction::BatchShare(random_sign_data(&mut OsRng)));
|
2023-04-20 06:59:42 -04:00
|
|
|
|
|
|
|
|
test_read_write(Transaction::SignPreprocess(random_sign_data(&mut OsRng)));
|
|
|
|
|
test_read_write(Transaction::SignShare(random_sign_data(&mut OsRng)));
|
2023-08-31 23:39:36 -04:00
|
|
|
|
|
|
|
|
{
|
|
|
|
|
let mut plan = [0; 32];
|
|
|
|
|
OsRng.fill_bytes(&mut plan);
|
|
|
|
|
let mut tx_hash = vec![0; (OsRng.next_u64() % 64).try_into().unwrap()];
|
|
|
|
|
OsRng.fill_bytes(&mut tx_hash);
|
|
|
|
|
test_read_write(Transaction::SignCompleted {
|
|
|
|
|
plan,
|
|
|
|
|
tx_hash,
|
|
|
|
|
first_signer: random_signed(&mut OsRng).signer,
|
|
|
|
|
signature: random_signed(&mut OsRng).signature,
|
|
|
|
|
});
|
|
|
|
|
}
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|