7 Commits

Author SHA1 Message Date
Luke Parker
470b5f7d9e Increase time allowed for the DKG on the GH CI 2024-08-06 05:44:49 -04:00
Luke Parker
01de73efd9 Updating existing TX size limit test for the new DKG parameters 2024-08-06 05:43:56 -04:00
Luke Parker
dbf32d515f Correct ThresholdKeys serialization in modular-frost test 2024-08-06 05:37:19 -04:00
Luke Parker
e9d09ef4e2 Send/Recv Participation one at a time
Sending all, then attempting to receive all in an expected order, wasn't working
even with notable delays between sending messages. This points to the mempool
not working as expected...
2024-08-06 03:03:12 -04:00
Luke Parker
ec629308d6 Update a few comments in the validator-sets pallet 2024-08-06 01:14:29 -04:00
Luke Parker
f27fb9b652 Correct error in the Processor spec document 2024-08-06 01:10:43 -04:00
Luke Parker
c790efa212 Update TX size limit
We now no longer have to support the ridiculous case of having 49 DKG
participations within a 101-of-150 DKG. It does remain quite high due to
needing to _sign_ so many times. It'd may be optimal for parties with multiple
key shares to independently send their preprocesses/shares (despite the
overhead that'll cause with signatures and the transaction structure).
2024-08-06 01:10:28 -04:00
7 changed files with 54 additions and 43 deletions

View File

@@ -84,23 +84,25 @@ fn tx_size_limit() {
use tributary::TRANSACTION_SIZE_LIMIT; use tributary::TRANSACTION_SIZE_LIMIT;
let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1; let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1;
let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients; // n coefficients
// Handwave the DKG Commitments size as the size of the commitments to the coefficients and // 2 ECDH values per recipient, and the encrypted share
// 1024 bytes for all overhead let elements_outside_of_proof = max_dkg_coefficients + ((2 + 1) * MAX_KEY_SHARES_PER_SET);
let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024; // Then Pedersen Vector Commitments for each DH done, and the associated overhead in the proof
assert!( // It's handwaved as one commitment per DH, where we do 2 per coefficient and 1 for the explicit
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >= // ECDHs
(handwaved_dkg_commitments_size * max_key_shares_per_individual) let vector_commitments = (2 * max_dkg_coefficients) + (2 * MAX_KEY_SHARES_PER_SET);
); // Then we have commitments to the `t` polynomial of length 2 + 2 nc, where nc is the amount of
// commitments
let t_commitments = 2 + (2 * vector_commitments);
// The remainder of the proof should be ~30 elements
let proof_elements = 30;
// Encryption key, PoP (2 elements), message let handwaved_dkg_size =
let elements_per_share = 4; ((elements_outside_of_proof + vector_commitments + t_commitments + proof_elements) *
let handwaved_dkg_shares_size = MAX_KEY_LEN) +
(elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024; 1024;
assert!( // Further scale by two in case of any errors in the above
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >= assert!(u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >= (2 * handwaved_dkg_size));
(handwaved_dkg_shares_size * max_key_shares_per_individual)
);
} }
#[test] #[test]

View File

@@ -50,13 +50,17 @@ pub(crate) use crate::tendermint::*;
pub mod tests; pub mod tests;
/// Size limit for an individual transaction. /// Size limit for an individual transaction.
pub const TRANSACTION_SIZE_LIMIT: usize = 3_000_000; // This needs to be big enough to participate in a 101-of-150 eVRF DKG with each element taking
// `MAX_KEY_LEN`. This also needs to be big enough to pariticpate in signing 520 Bitcoin inputs
// with 49 key shares, and signing 120 Monero inputs with 49 key shares.
// TODO: Add a test for these properties
pub const TRANSACTION_SIZE_LIMIT: usize = 2_000_000;
/// Amount of transactions a single account may have in the mempool. /// Amount of transactions a single account may have in the mempool.
pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50; pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
/// Block size limit. /// Block size limit.
// This targets a growth limit of roughly 45 GB a day, under load, in order to prevent a malicious // This targets a growth limit of roughly 30 GB a day, under load, in order to prevent a malicious
// participant from flooding disks and causing out of space errors in order processes. // participant from flooding disks and causing out of space errors in order processes.
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000; pub const BLOCK_SIZE_LIMIT: usize = 2_001_000;
pub(crate) const TENDERMINT_MESSAGE: u8 = 0; pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
pub(crate) const TRANSACTION_MESSAGE: u8 = 1; pub(crate) const TRANSACTION_MESSAGE: u8 = 1;

View File

@@ -122,6 +122,7 @@ fn vectors_to_multisig_keys<C: Curve>(vectors: &Vectors) -> HashMap<Participant,
serialized.extend(vectors.threshold.to_le_bytes()); serialized.extend(vectors.threshold.to_le_bytes());
serialized.extend(u16::try_from(shares.len()).unwrap().to_le_bytes()); serialized.extend(u16::try_from(shares.len()).unwrap().to_le_bytes());
serialized.extend(i.to_le_bytes()); serialized.extend(i.to_le_bytes());
serialized.push(1);
serialized.extend(shares[usize::from(i) - 1].to_repr().as_ref()); serialized.extend(shares[usize::from(i) - 1].to_repr().as_ref());
for share in &verification_shares { for share in &verification_shares {
serialized.extend(share.to_bytes().as_ref()); serialized.extend(share.to_bytes().as_ref());

View File

@@ -16,8 +16,9 @@ Serai blockchain. The second instance is for a key to use on the external
network. This pair of DKG instances is considered a single instance of Serai's network. This pair of DKG instances is considered a single instance of Serai's
overall DKG protocol. overall DKG protocol.
The participations in both protocols are sent to the coordinator in a single The participations in both protocols are sent to the coordinator in
`key_gen::ProcessorMessage::Participation`. `key_gen::ProcessorMessage::Participation` messages, individually, as they come
in.
### Key Gen Participations ### Key Gen Participations

View File

@@ -1016,12 +1016,9 @@ pub mod pallet {
#[pallet::weight(0)] // TODO #[pallet::weight(0)] // TODO
pub fn allocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult { pub fn allocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {
let validator = ensure_signed(origin)?; let validator = ensure_signed(origin)?;
// If this network utilizes an embedded elliptic curve, require the validator to have set the // If this network utilizes embedded elliptic curve(s), require the validator to have set the
// appropriate key // appropriate key(s)
for embedded_elliptic_curve in network.embedded_elliptic_curves() { for embedded_elliptic_curve in network.embedded_elliptic_curves() {
// Require an Embedwards25519 embedded curve key and a key for the curve for this network
// The Embedwards25519 embedded curve key is required for the DKG for the Substrate key
// used to oraclize events with
if !EmbeddedEllipticCurveKeys::<T>::contains_key(validator, *embedded_elliptic_curve) { if !EmbeddedEllipticCurveKeys::<T>::contains_key(validator, *embedded_elliptic_curve) {
Err(Error::<T>::MissingEmbeddedEllipticCurveKey)?; Err(Error::<T>::MissingEmbeddedEllipticCurveKey)?;
} }

View File

@@ -30,7 +30,8 @@ pub async fn key_gen<C: Ciphersuite>(
// This is distinct from the result of evrf_public_keys for each processor, as there'll have some // This is distinct from the result of evrf_public_keys for each processor, as there'll have some
// ordering algorithm on-chain which won't match our ordering // ordering algorithm on-chain which won't match our ordering
let mut evrf_public_keys_as_on_chain = None; let mut evrf_public_keys_as_on_chain = None;
for (i, processor) in processors.iter_mut().enumerate() { for processor in processors.iter_mut() {
// Receive GenerateKey
let msg = processor.recv_message().await; let msg = processor.recv_message().await;
match &msg { match &msg {
CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::GenerateKey { CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::GenerateKey {
@@ -59,30 +60,33 @@ pub async fn key_gen<C: Ciphersuite>(
evrf_public_keys: evrf_public_keys_as_on_chain.clone().unwrap(), evrf_public_keys: evrf_public_keys_as_on_chain.clone().unwrap(),
}) })
); );
}
processor for i in 0 .. coordinators {
// Send Participation
processors[i]
.send_message(messages::key_gen::ProcessorMessage::Participation { .send_message(messages::key_gen::ProcessorMessage::Participation {
session, session,
participation: vec![u8::try_from(u16::from(participant_is[i])).unwrap()], participation: vec![u8::try_from(u16::from(participant_is[i])).unwrap()],
}) })
.await; .await;
// Sleep so this participation gets included, before moving to the next participation // Sleep so this participation gets included
wait_for_tributary().await; for _ in 0 .. 2 {
wait_for_tributary().await; wait_for_tributary().await;
} }
wait_for_tributary().await; // Have every other processor recv this message too
for processor in processors.iter_mut() { for processor in processors.iter_mut() {
#[allow(clippy::needless_range_loop)] // This wouldn't improve readability/clarity
for i in 0 .. coordinators {
assert_eq!( assert_eq!(
processor.recv_message().await, processor.recv_message().await,
CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::Participation { messages::CoordinatorMessage::KeyGen(
session, messages::key_gen::CoordinatorMessage::Participation {
participant: participant_is[i], session,
participation: vec![u8::try_from(u16::from(participant_is[i])).unwrap()], participant: participant_is[i],
}) participation: vec![u8::try_from(u16::from(participant_is[i])).unwrap()],
}
)
); );
} }
} }

View File

@@ -48,7 +48,9 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair {
.await; .await;
} }
// This takes forever on debug, as we use in these tests // This takes forever on debug, as we use in these tests
tokio::time::sleep(core::time::Duration::from_secs(600)).await; let ci_scaling_factor =
1 + u64::from(u8::from(std::env::var("GITHUB_CI") == Ok("true".to_string())));
tokio::time::sleep(core::time::Duration::from_secs(600 * ci_scaling_factor)).await;
interact_with_all(coordinators, |participant, msg| match msg { interact_with_all(coordinators, |participant, msg| match msg {
messages::key_gen::ProcessorMessage::Participation { session: this_session, participation } => { messages::key_gen::ProcessorMessage::Participation { session: this_session, participation } => {
assert_eq!(this_session, session); assert_eq!(this_session, session);
@@ -71,7 +73,7 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair {
} }
} }
// This also takes a while on debug // This also takes a while on debug
tokio::time::sleep(core::time::Duration::from_secs(240)).await; tokio::time::sleep(core::time::Duration::from_secs(240 * ci_scaling_factor)).await;
interact_with_all(coordinators, |_, msg| match msg { interact_with_all(coordinators, |_, msg| match msg {
messages::key_gen::ProcessorMessage::GeneratedKeyPair { messages::key_gen::ProcessorMessage::GeneratedKeyPair {
session: this_session, session: this_session,