7 Commits

Author SHA1 Message Date
Luke Parker
470b5f7d9e Increase time allowed for the DKG on the GH CI 2024-08-06 05:44:49 -04:00
Luke Parker
01de73efd9 Updating existing TX size limit test for the new DKG parameters 2024-08-06 05:43:56 -04:00
Luke Parker
dbf32d515f Correct ThresholdKeys serialization in modular-frost test 2024-08-06 05:37:19 -04:00
Luke Parker
e9d09ef4e2 Send/Recv Participation one at a time
Sending all, then attempting to receive all in an expected order, wasn't working
even with notable delays between sending messages. This points to the mempool
not working as expected...
2024-08-06 03:03:12 -04:00
Luke Parker
ec629308d6 Update a few comments in the validator-sets pallet 2024-08-06 01:14:29 -04:00
Luke Parker
f27fb9b652 Correct error in the Processor spec document 2024-08-06 01:10:43 -04:00
Luke Parker
c790efa212 Update TX size limit
We now no longer have to support the ridiculous case of having 49 DKG
participations within a 101-of-150 DKG. It does remain quite high due to
needing to _sign_ so many times. It'd may be optimal for parties with multiple
key shares to independently send their preprocesses/shares (despite the
overhead that'll cause with signatures and the transaction structure).
2024-08-06 01:10:28 -04:00
7 changed files with 54 additions and 43 deletions

View File

@@ -84,23 +84,25 @@ fn tx_size_limit() {
use tributary::TRANSACTION_SIZE_LIMIT;
let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1;
let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients;
// Handwave the DKG Commitments size as the size of the commitments to the coefficients and
// 1024 bytes for all overhead
let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024;
assert!(
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
(handwaved_dkg_commitments_size * max_key_shares_per_individual)
);
// n coefficients
// 2 ECDH values per recipient, and the encrypted share
let elements_outside_of_proof = max_dkg_coefficients + ((2 + 1) * MAX_KEY_SHARES_PER_SET);
// Then Pedersen Vector Commitments for each DH done, and the associated overhead in the proof
// It's handwaved as one commitment per DH, where we do 2 per coefficient and 1 for the explicit
// ECDHs
let vector_commitments = (2 * max_dkg_coefficients) + (2 * MAX_KEY_SHARES_PER_SET);
// Then we have commitments to the `t` polynomial of length 2 + 2 nc, where nc is the amount of
// commitments
let t_commitments = 2 + (2 * vector_commitments);
// The remainder of the proof should be ~30 elements
let proof_elements = 30;
// Encryption key, PoP (2 elements), message
let elements_per_share = 4;
let handwaved_dkg_shares_size =
(elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024;
assert!(
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
(handwaved_dkg_shares_size * max_key_shares_per_individual)
);
let handwaved_dkg_size =
((elements_outside_of_proof + vector_commitments + t_commitments + proof_elements) *
MAX_KEY_LEN) +
1024;
// Further scale by two in case of any errors in the above
assert!(u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >= (2 * handwaved_dkg_size));
}
#[test]

View File

@@ -50,13 +50,17 @@ pub(crate) use crate::tendermint::*;
pub mod tests;
/// Size limit for an individual transaction.
pub const TRANSACTION_SIZE_LIMIT: usize = 3_000_000;
// This needs to be big enough to participate in a 101-of-150 eVRF DKG with each element taking
// `MAX_KEY_LEN`. This also needs to be big enough to pariticpate in signing 520 Bitcoin inputs
// with 49 key shares, and signing 120 Monero inputs with 49 key shares.
// TODO: Add a test for these properties
pub const TRANSACTION_SIZE_LIMIT: usize = 2_000_000;
/// Amount of transactions a single account may have in the mempool.
pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
/// Block size limit.
// This targets a growth limit of roughly 45 GB a day, under load, in order to prevent a malicious
// This targets a growth limit of roughly 30 GB a day, under load, in order to prevent a malicious
// participant from flooding disks and causing out of space errors in order processes.
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
pub const BLOCK_SIZE_LIMIT: usize = 2_001_000;
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;

View File

@@ -122,6 +122,7 @@ fn vectors_to_multisig_keys<C: Curve>(vectors: &Vectors) -> HashMap<Participant,
serialized.extend(vectors.threshold.to_le_bytes());
serialized.extend(u16::try_from(shares.len()).unwrap().to_le_bytes());
serialized.extend(i.to_le_bytes());
serialized.push(1);
serialized.extend(shares[usize::from(i) - 1].to_repr().as_ref());
for share in &verification_shares {
serialized.extend(share.to_bytes().as_ref());

View File

@@ -16,8 +16,9 @@ Serai blockchain. The second instance is for a key to use on the external
network. This pair of DKG instances is considered a single instance of Serai's
overall DKG protocol.
The participations in both protocols are sent to the coordinator in a single
`key_gen::ProcessorMessage::Participation`.
The participations in both protocols are sent to the coordinator in
`key_gen::ProcessorMessage::Participation` messages, individually, as they come
in.
### Key Gen Participations

View File

@@ -1016,12 +1016,9 @@ pub mod pallet {
#[pallet::weight(0)] // TODO
pub fn allocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {
let validator = ensure_signed(origin)?;
// If this network utilizes an embedded elliptic curve, require the validator to have set the
// appropriate key
// If this network utilizes embedded elliptic curve(s), require the validator to have set the
// appropriate key(s)
for embedded_elliptic_curve in network.embedded_elliptic_curves() {
// Require an Embedwards25519 embedded curve key and a key for the curve for this network
// The Embedwards25519 embedded curve key is required for the DKG for the Substrate key
// used to oraclize events with
if !EmbeddedEllipticCurveKeys::<T>::contains_key(validator, *embedded_elliptic_curve) {
Err(Error::<T>::MissingEmbeddedEllipticCurveKey)?;
}

View File

@@ -30,7 +30,8 @@ pub async fn key_gen<C: Ciphersuite>(
// This is distinct from the result of evrf_public_keys for each processor, as there'll have some
// ordering algorithm on-chain which won't match our ordering
let mut evrf_public_keys_as_on_chain = None;
for (i, processor) in processors.iter_mut().enumerate() {
for processor in processors.iter_mut() {
// Receive GenerateKey
let msg = processor.recv_message().await;
match &msg {
CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::GenerateKey {
@@ -59,30 +60,33 @@ pub async fn key_gen<C: Ciphersuite>(
evrf_public_keys: evrf_public_keys_as_on_chain.clone().unwrap(),
})
);
}
processor
for i in 0 .. coordinators {
// Send Participation
processors[i]
.send_message(messages::key_gen::ProcessorMessage::Participation {
session,
participation: vec![u8::try_from(u16::from(participant_is[i])).unwrap()],
})
.await;
// Sleep so this participation gets included, before moving to the next participation
wait_for_tributary().await;
wait_for_tributary().await;
}
// Sleep so this participation gets included
for _ in 0 .. 2 {
wait_for_tributary().await;
}
wait_for_tributary().await;
for processor in processors.iter_mut() {
#[allow(clippy::needless_range_loop)] // This wouldn't improve readability/clarity
for i in 0 .. coordinators {
// Have every other processor recv this message too
for processor in processors.iter_mut() {
assert_eq!(
processor.recv_message().await,
CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::Participation {
session,
participant: participant_is[i],
participation: vec![u8::try_from(u16::from(participant_is[i])).unwrap()],
})
messages::CoordinatorMessage::KeyGen(
messages::key_gen::CoordinatorMessage::Participation {
session,
participant: participant_is[i],
participation: vec![u8::try_from(u16::from(participant_is[i])).unwrap()],
}
)
);
}
}

View File

@@ -48,7 +48,9 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair {
.await;
}
// This takes forever on debug, as we use in these tests
tokio::time::sleep(core::time::Duration::from_secs(600)).await;
let ci_scaling_factor =
1 + u64::from(u8::from(std::env::var("GITHUB_CI") == Ok("true".to_string())));
tokio::time::sleep(core::time::Duration::from_secs(600 * ci_scaling_factor)).await;
interact_with_all(coordinators, |participant, msg| match msg {
messages::key_gen::ProcessorMessage::Participation { session: this_session, participation } => {
assert_eq!(this_session, session);
@@ -71,7 +73,7 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair {
}
}
// This also takes a while on debug
tokio::time::sleep(core::time::Duration::from_secs(240)).await;
tokio::time::sleep(core::time::Duration::from_secs(240 * ci_scaling_factor)).await;
interact_with_all(coordinators, |_, msg| match msg {
messages::key_gen::ProcessorMessage::GeneratedKeyPair {
session: this_session,