2023-04-11 19:04:53 -04:00
|
|
|
use core::fmt::Debug;
|
|
|
|
|
|
|
|
|
|
use rand_core::{RngCore, OsRng};
|
|
|
|
|
|
2023-12-18 09:13:09 -05:00
|
|
|
use ciphersuite::{group::Group, Ciphersuite, Ristretto};
|
|
|
|
|
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
use scale::{Encode, Decode};
|
2023-12-14 15:53:41 -05:00
|
|
|
use serai_client::{
|
|
|
|
|
primitives::{SeraiAddress, Signature},
|
2024-01-29 03:48:53 -05:00
|
|
|
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair},
|
2023-12-14 15:53:41 -05:00
|
|
|
};
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
use processor_messages::coordinator::SubstrateSignableId;
|
|
|
|
|
|
2023-12-01 23:31:18 -05:00
|
|
|
use tributary::{ReadWrite, tests::random_signed_with_nonce};
|
2023-04-11 19:04:53 -04:00
|
|
|
|
2023-12-14 15:53:41 -05:00
|
|
|
use crate::tributary::{Label, SignData, Transaction, scanner::PublishSeraiTransaction};
|
2023-04-11 19:04:53 -04:00
|
|
|
|
2023-04-22 10:49:52 -04:00
|
|
|
mod chain;
|
2023-04-22 22:27:12 -04:00
|
|
|
pub use chain::*;
|
|
|
|
|
|
|
|
|
|
mod tx;
|
2023-04-22 10:49:52 -04:00
|
|
|
|
2023-04-23 01:00:46 -04:00
|
|
|
mod dkg;
|
2023-04-23 03:48:50 -04:00
|
|
|
// TODO: Test the other transactions
|
2023-04-23 01:00:46 -04:00
|
|
|
|
2023-04-24 02:50:03 -04:00
|
|
|
mod handle_p2p;
|
|
|
|
|
mod sync;
|
|
|
|
|
|
2023-12-14 15:53:41 -05:00
|
|
|
#[async_trait::async_trait]
|
|
|
|
|
impl PublishSeraiTransaction for () {
|
2023-12-14 23:45:15 -05:00
|
|
|
async fn publish_set_keys(
|
2023-12-14 15:53:41 -05:00
|
|
|
&self,
|
2024-01-05 23:36:33 -05:00
|
|
|
_db: &(impl Sync + serai_db::Get),
|
2023-12-14 15:53:41 -05:00
|
|
|
_set: ValidatorSet,
|
2023-12-14 23:45:15 -05:00
|
|
|
_removed: Vec<SeraiAddress>,
|
|
|
|
|
_key_pair: KeyPair,
|
2023-12-14 15:53:41 -05:00
|
|
|
_signature: Signature,
|
|
|
|
|
) {
|
2023-12-14 23:45:15 -05:00
|
|
|
panic!("publish_set_keys was called in test")
|
2023-12-14 15:53:41 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-11 19:04:53 -04:00
|
|
|
fn random_u32<R: RngCore>(rng: &mut R) -> u32 {
|
|
|
|
|
u32::try_from(rng.next_u64() >> 32).unwrap()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn random_vec<R: RngCore>(rng: &mut R, limit: usize) -> Vec<u8> {
|
|
|
|
|
let len = usize::try_from(rng.next_u64() % u64::try_from(limit).unwrap()).unwrap();
|
|
|
|
|
let mut res = vec![0; len];
|
|
|
|
|
rng.fill_bytes(&mut res);
|
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
fn random_sign_data<R: RngCore, Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(
|
|
|
|
|
rng: &mut R,
|
|
|
|
|
plan: Id,
|
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file
Cleans it up and makes it more robust.
* Have expected_next_batch return an error instead of retrying
While convenient to offer an error-free implementation, it potentially caused
very long lived lock acquisitions in handle_processor_message.
* Unify and clean DkgConfirmer and DkgRemoval
Does so via adding a new file for the common code, SigningProtocol.
Modifies from_cache to return the preprocess with the machine, as there's no
reason not to. Also removes an unused Result around the type.
Clarifies the security around deterministic nonces, removing them for
saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the
DB is not a proper secret store.
Moves arguments always present in the protocol from function arguments into the
struct itself.
Removes the horribly ugly code in DkgRemoval, fixing multiple issues present
with it which would cause it to fail on use.
* Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol
* Remove unnecessary Clone from lambdas in coordinator
* Remove the EventDb from Tributary scanner
We used per-Transaction DB TXNs so on error, we don't have to rescan the entire
block yet only the rest of it. We prevented scanning multiple transactions by
tracking which we already had.
This is over-engineered and not worth it.
* Implement borsh for HasEvents, removing the manual encoding
* Merge DkgConfirmer and DkgRemoval into signing_protocol.rs
Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes
if any validator had multiple key shares.
* Strictly type DataSpecification's Label
* Correct threshold_i_map_to_keys_and_musig_i_map
It didn't include the participant's own index and accordingly was offset.
* Create TributaryBlockHandler
This struct contains all variables prior passed to handle_block and stops them
from being passed around again and again.
This also ensures fatal_slash is only called while handling a block, as needed
as it expects to operate under perfect consensus.
* Inline accumulate, store confirmation nonces with shares
Inlining accumulate makes sense due to the amount of data accumulate needed to
be passed.
Storing confirmation nonces with shares ensures that both are available or
neither. Prior, one could be yet the other may not have been (requiring an
assert in runtime to ensure we didn't bungle it somehow).
* Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs
* Move Label into SignData
All of our transactions which use SignData end up with the same common usage
pattern for Label, justifying this.
Removes 3 transactions, explicitly de-duplicating their handlers.
* Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair
* Remove the manual read/write for TributarySpec for borsh
This struct doesn't have any optimizations booned by the manual impl. Using
borsh reduces our scope.
* Use temporary variables to further minimize LoC in tributary handler
* Remove usage of tuples for non-trivial Tributary transactions
* Remove serde from dkg
serde could be used to deserialize intenrally inconsistent objects which could
lead to panics or faults.
The BorshDeserialize derives have been replaced with a manual implementation
which won't produce inconsistent objects.
* Abstract Future generics using new trait definitions in coordinator
* Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs
* Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
2023-12-10 20:21:44 -05:00
|
|
|
label: Label,
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
) -> SignData<Id> {
|
2023-04-11 19:04:53 -04:00
|
|
|
SignData {
|
|
|
|
|
plan,
|
|
|
|
|
attempt: random_u32(&mut OsRng),
|
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file
Cleans it up and makes it more robust.
* Have expected_next_batch return an error instead of retrying
While convenient to offer an error-free implementation, it potentially caused
very long lived lock acquisitions in handle_processor_message.
* Unify and clean DkgConfirmer and DkgRemoval
Does so via adding a new file for the common code, SigningProtocol.
Modifies from_cache to return the preprocess with the machine, as there's no
reason not to. Also removes an unused Result around the type.
Clarifies the security around deterministic nonces, removing them for
saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the
DB is not a proper secret store.
Moves arguments always present in the protocol from function arguments into the
struct itself.
Removes the horribly ugly code in DkgRemoval, fixing multiple issues present
with it which would cause it to fail on use.
* Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol
* Remove unnecessary Clone from lambdas in coordinator
* Remove the EventDb from Tributary scanner
We used per-Transaction DB TXNs so on error, we don't have to rescan the entire
block yet only the rest of it. We prevented scanning multiple transactions by
tracking which we already had.
This is over-engineered and not worth it.
* Implement borsh for HasEvents, removing the manual encoding
* Merge DkgConfirmer and DkgRemoval into signing_protocol.rs
Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes
if any validator had multiple key shares.
* Strictly type DataSpecification's Label
* Correct threshold_i_map_to_keys_and_musig_i_map
It didn't include the participant's own index and accordingly was offset.
* Create TributaryBlockHandler
This struct contains all variables prior passed to handle_block and stops them
from being passed around again and again.
This also ensures fatal_slash is only called while handling a block, as needed
as it expects to operate under perfect consensus.
* Inline accumulate, store confirmation nonces with shares
Inlining accumulate makes sense due to the amount of data accumulate needed to
be passed.
Storing confirmation nonces with shares ensures that both are available or
neither. Prior, one could be yet the other may not have been (requiring an
assert in runtime to ensure we didn't bungle it somehow).
* Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs
* Move Label into SignData
All of our transactions which use SignData end up with the same common usage
pattern for Label, justifying this.
Removes 3 transactions, explicitly de-duplicating their handlers.
* Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair
* Remove the manual read/write for TributarySpec for borsh
This struct doesn't have any optimizations booned by the manual impl. Using
borsh reduces our scope.
* Use temporary variables to further minimize LoC in tributary handler
* Remove usage of tuples for non-trivial Tributary transactions
* Remove serde from dkg
serde could be used to deserialize intenrally inconsistent objects which could
lead to panics or faults.
The BorshDeserialize derives have been replaced with a manual implementation
which won't produce inconsistent objects.
* Abstract Future generics using new trait definitions in coordinator
* Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs
* Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
2023-12-10 20:21:44 -05:00
|
|
|
label,
|
2023-04-11 19:04:53 -04:00
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
data: {
|
|
|
|
|
let mut res = vec![];
|
2023-12-30 19:08:27 -05:00
|
|
|
for _ in 0 .. (rng.next_u64() % 256) {
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
res.push(random_vec(&mut OsRng, 512));
|
|
|
|
|
}
|
|
|
|
|
res
|
|
|
|
|
},
|
2023-04-11 19:04:53 -04:00
|
|
|
|
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file
Cleans it up and makes it more robust.
* Have expected_next_batch return an error instead of retrying
While convenient to offer an error-free implementation, it potentially caused
very long lived lock acquisitions in handle_processor_message.
* Unify and clean DkgConfirmer and DkgRemoval
Does so via adding a new file for the common code, SigningProtocol.
Modifies from_cache to return the preprocess with the machine, as there's no
reason not to. Also removes an unused Result around the type.
Clarifies the security around deterministic nonces, removing them for
saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the
DB is not a proper secret store.
Moves arguments always present in the protocol from function arguments into the
struct itself.
Removes the horribly ugly code in DkgRemoval, fixing multiple issues present
with it which would cause it to fail on use.
* Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol
* Remove unnecessary Clone from lambdas in coordinator
* Remove the EventDb from Tributary scanner
We used per-Transaction DB TXNs so on error, we don't have to rescan the entire
block yet only the rest of it. We prevented scanning multiple transactions by
tracking which we already had.
This is over-engineered and not worth it.
* Implement borsh for HasEvents, removing the manual encoding
* Merge DkgConfirmer and DkgRemoval into signing_protocol.rs
Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes
if any validator had multiple key shares.
* Strictly type DataSpecification's Label
* Correct threshold_i_map_to_keys_and_musig_i_map
It didn't include the participant's own index and accordingly was offset.
* Create TributaryBlockHandler
This struct contains all variables prior passed to handle_block and stops them
from being passed around again and again.
This also ensures fatal_slash is only called while handling a block, as needed
as it expects to operate under perfect consensus.
* Inline accumulate, store confirmation nonces with shares
Inlining accumulate makes sense due to the amount of data accumulate needed to
be passed.
Storing confirmation nonces with shares ensures that both are available or
neither. Prior, one could be yet the other may not have been (requiring an
assert in runtime to ensure we didn't bungle it somehow).
* Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs
* Move Label into SignData
All of our transactions which use SignData end up with the same common usage
pattern for Label, justifying this.
Removes 3 transactions, explicitly de-duplicating their handlers.
* Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair
* Remove the manual read/write for TributarySpec for borsh
This struct doesn't have any optimizations booned by the manual impl. Using
borsh reduces our scope.
* Use temporary variables to further minimize LoC in tributary handler
* Remove usage of tuples for non-trivial Tributary transactions
* Remove serde from dkg
serde could be used to deserialize intenrally inconsistent objects which could
lead to panics or faults.
The BorshDeserialize derives have been replaced with a manual implementation
which won't produce inconsistent objects.
* Abstract Future generics using new trait definitions in coordinator
* Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs
* Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
2023-12-10 20:21:44 -05:00
|
|
|
signed: random_signed_with_nonce(&mut OsRng, label.nonce()),
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-12-17 00:01:41 -05:00
|
|
|
fn test_read_write<RW: Eq + Debug + ReadWrite>(value: &RW) {
|
|
|
|
|
assert_eq!(value, &RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap());
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
#[test]
|
|
|
|
|
fn tx_size_limit() {
|
2024-01-29 03:48:53 -05:00
|
|
|
use serai_client::validator_sets::primitives::MAX_KEY_LEN;
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
|
|
|
|
|
use tributary::TRANSACTION_SIZE_LIMIT;
|
|
|
|
|
|
|
|
|
|
let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1;
|
|
|
|
|
let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients;
|
|
|
|
|
// Handwave the DKG Commitments size as the size of the commitments to the coefficients and
|
|
|
|
|
// 1024 bytes for all overhead
|
|
|
|
|
let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024;
|
|
|
|
|
assert!(
|
|
|
|
|
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
|
|
|
|
(handwaved_dkg_commitments_size * max_key_shares_per_individual)
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Encryption key, PoP (2 elements), message
|
|
|
|
|
let elements_per_share = 4;
|
|
|
|
|
let handwaved_dkg_shares_size =
|
|
|
|
|
(elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024;
|
|
|
|
|
assert!(
|
|
|
|
|
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
|
|
|
|
(handwaved_dkg_shares_size * max_key_shares_per_individual)
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-11 19:04:53 -04:00
|
|
|
#[test]
|
|
|
|
|
fn serialize_sign_data() {
|
2023-12-17 00:01:41 -05:00
|
|
|
fn test_read_write<Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(value: &SignData<Id>) {
|
2023-12-01 23:31:18 -05:00
|
|
|
let mut buf = vec![];
|
|
|
|
|
value.write(&mut buf).unwrap();
|
2023-12-17 00:01:41 -05:00
|
|
|
assert_eq!(value, &SignData::read(&mut buf.as_slice()).unwrap())
|
2023-12-01 23:31:18 -05:00
|
|
|
}
|
|
|
|
|
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
let mut plan = [0; 3];
|
|
|
|
|
OsRng.fill_bytes(&mut plan);
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&random_sign_data::<_, _>(
|
2023-12-01 23:31:18 -05:00
|
|
|
&mut OsRng,
|
|
|
|
|
plan,
|
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file
Cleans it up and makes it more robust.
* Have expected_next_batch return an error instead of retrying
While convenient to offer an error-free implementation, it potentially caused
very long lived lock acquisitions in handle_processor_message.
* Unify and clean DkgConfirmer and DkgRemoval
Does so via adding a new file for the common code, SigningProtocol.
Modifies from_cache to return the preprocess with the machine, as there's no
reason not to. Also removes an unused Result around the type.
Clarifies the security around deterministic nonces, removing them for
saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the
DB is not a proper secret store.
Moves arguments always present in the protocol from function arguments into the
struct itself.
Removes the horribly ugly code in DkgRemoval, fixing multiple issues present
with it which would cause it to fail on use.
* Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol
* Remove unnecessary Clone from lambdas in coordinator
* Remove the EventDb from Tributary scanner
We used per-Transaction DB TXNs so on error, we don't have to rescan the entire
block yet only the rest of it. We prevented scanning multiple transactions by
tracking which we already had.
This is over-engineered and not worth it.
* Implement borsh for HasEvents, removing the manual encoding
* Merge DkgConfirmer and DkgRemoval into signing_protocol.rs
Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes
if any validator had multiple key shares.
* Strictly type DataSpecification's Label
* Correct threshold_i_map_to_keys_and_musig_i_map
It didn't include the participant's own index and accordingly was offset.
* Create TributaryBlockHandler
This struct contains all variables prior passed to handle_block and stops them
from being passed around again and again.
This also ensures fatal_slash is only called while handling a block, as needed
as it expects to operate under perfect consensus.
* Inline accumulate, store confirmation nonces with shares
Inlining accumulate makes sense due to the amount of data accumulate needed to
be passed.
Storing confirmation nonces with shares ensures that both are available or
neither. Prior, one could be yet the other may not have been (requiring an
assert in runtime to ensure we didn't bungle it somehow).
* Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs
* Move Label into SignData
All of our transactions which use SignData end up with the same common usage
pattern for Label, justifying this.
Removes 3 transactions, explicitly de-duplicating their handlers.
* Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair
* Remove the manual read/write for TributarySpec for borsh
This struct doesn't have any optimizations booned by the manual impl. Using
borsh reduces our scope.
* Use temporary variables to further minimize LoC in tributary handler
* Remove usage of tuples for non-trivial Tributary transactions
* Remove serde from dkg
serde could be used to deserialize intenrally inconsistent objects which could
lead to panics or faults.
The BorshDeserialize derives have been replaced with a manual implementation
which won't produce inconsistent objects.
* Abstract Future generics using new trait definitions in coordinator
* Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs
* Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
2023-12-10 20:21:44 -05:00
|
|
|
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
2023-12-01 23:31:18 -05:00
|
|
|
));
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
let mut plan = [0; 5];
|
|
|
|
|
OsRng.fill_bytes(&mut plan);
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&random_sign_data::<_, _>(
|
2023-12-01 23:31:18 -05:00
|
|
|
&mut OsRng,
|
|
|
|
|
plan,
|
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file
Cleans it up and makes it more robust.
* Have expected_next_batch return an error instead of retrying
While convenient to offer an error-free implementation, it potentially caused
very long lived lock acquisitions in handle_processor_message.
* Unify and clean DkgConfirmer and DkgRemoval
Does so via adding a new file for the common code, SigningProtocol.
Modifies from_cache to return the preprocess with the machine, as there's no
reason not to. Also removes an unused Result around the type.
Clarifies the security around deterministic nonces, removing them for
saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the
DB is not a proper secret store.
Moves arguments always present in the protocol from function arguments into the
struct itself.
Removes the horribly ugly code in DkgRemoval, fixing multiple issues present
with it which would cause it to fail on use.
* Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol
* Remove unnecessary Clone from lambdas in coordinator
* Remove the EventDb from Tributary scanner
We used per-Transaction DB TXNs so on error, we don't have to rescan the entire
block yet only the rest of it. We prevented scanning multiple transactions by
tracking which we already had.
This is over-engineered and not worth it.
* Implement borsh for HasEvents, removing the manual encoding
* Merge DkgConfirmer and DkgRemoval into signing_protocol.rs
Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes
if any validator had multiple key shares.
* Strictly type DataSpecification's Label
* Correct threshold_i_map_to_keys_and_musig_i_map
It didn't include the participant's own index and accordingly was offset.
* Create TributaryBlockHandler
This struct contains all variables prior passed to handle_block and stops them
from being passed around again and again.
This also ensures fatal_slash is only called while handling a block, as needed
as it expects to operate under perfect consensus.
* Inline accumulate, store confirmation nonces with shares
Inlining accumulate makes sense due to the amount of data accumulate needed to
be passed.
Storing confirmation nonces with shares ensures that both are available or
neither. Prior, one could be yet the other may not have been (requiring an
assert in runtime to ensure we didn't bungle it somehow).
* Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs
* Move Label into SignData
All of our transactions which use SignData end up with the same common usage
pattern for Label, justifying this.
Removes 3 transactions, explicitly de-duplicating their handlers.
* Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair
* Remove the manual read/write for TributarySpec for borsh
This struct doesn't have any optimizations booned by the manual impl. Using
borsh reduces our scope.
* Use temporary variables to further minimize LoC in tributary handler
* Remove usage of tuples for non-trivial Tributary transactions
* Remove serde from dkg
serde could be used to deserialize intenrally inconsistent objects which could
lead to panics or faults.
The BorshDeserialize derives have been replaced with a manual implementation
which won't produce inconsistent objects.
* Abstract Future generics using new trait definitions in coordinator
* Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs
* Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
2023-12-10 20:21:44 -05:00
|
|
|
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
2023-12-01 23:31:18 -05:00
|
|
|
));
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
let mut plan = [0; 8];
|
|
|
|
|
OsRng.fill_bytes(&mut plan);
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&random_sign_data::<_, _>(
|
2023-12-01 23:31:18 -05:00
|
|
|
&mut OsRng,
|
|
|
|
|
plan,
|
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file
Cleans it up and makes it more robust.
* Have expected_next_batch return an error instead of retrying
While convenient to offer an error-free implementation, it potentially caused
very long lived lock acquisitions in handle_processor_message.
* Unify and clean DkgConfirmer and DkgRemoval
Does so via adding a new file for the common code, SigningProtocol.
Modifies from_cache to return the preprocess with the machine, as there's no
reason not to. Also removes an unused Result around the type.
Clarifies the security around deterministic nonces, removing them for
saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the
DB is not a proper secret store.
Moves arguments always present in the protocol from function arguments into the
struct itself.
Removes the horribly ugly code in DkgRemoval, fixing multiple issues present
with it which would cause it to fail on use.
* Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol
* Remove unnecessary Clone from lambdas in coordinator
* Remove the EventDb from Tributary scanner
We used per-Transaction DB TXNs so on error, we don't have to rescan the entire
block yet only the rest of it. We prevented scanning multiple transactions by
tracking which we already had.
This is over-engineered and not worth it.
* Implement borsh for HasEvents, removing the manual encoding
* Merge DkgConfirmer and DkgRemoval into signing_protocol.rs
Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes
if any validator had multiple key shares.
* Strictly type DataSpecification's Label
* Correct threshold_i_map_to_keys_and_musig_i_map
It didn't include the participant's own index and accordingly was offset.
* Create TributaryBlockHandler
This struct contains all variables prior passed to handle_block and stops them
from being passed around again and again.
This also ensures fatal_slash is only called while handling a block, as needed
as it expects to operate under perfect consensus.
* Inline accumulate, store confirmation nonces with shares
Inlining accumulate makes sense due to the amount of data accumulate needed to
be passed.
Storing confirmation nonces with shares ensures that both are available or
neither. Prior, one could be yet the other may not have been (requiring an
assert in runtime to ensure we didn't bungle it somehow).
* Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs
* Move Label into SignData
All of our transactions which use SignData end up with the same common usage
pattern for Label, justifying this.
Removes 3 transactions, explicitly de-duplicating their handlers.
* Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair
* Remove the manual read/write for TributarySpec for borsh
This struct doesn't have any optimizations booned by the manual impl. Using
borsh reduces our scope.
* Use temporary variables to further minimize LoC in tributary handler
* Remove usage of tuples for non-trivial Tributary transactions
* Remove serde from dkg
serde could be used to deserialize intenrally inconsistent objects which could
lead to panics or faults.
The BorshDeserialize derives have been replaced with a manual implementation
which won't produce inconsistent objects.
* Abstract Future generics using new trait definitions in coordinator
* Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs
* Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
2023-12-10 20:21:44 -05:00
|
|
|
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
2023-12-01 23:31:18 -05:00
|
|
|
));
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
let mut plan = [0; 24];
|
|
|
|
|
OsRng.fill_bytes(&mut plan);
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&random_sign_data::<_, _>(
|
2023-12-01 23:31:18 -05:00
|
|
|
&mut OsRng,
|
|
|
|
|
plan,
|
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file
Cleans it up and makes it more robust.
* Have expected_next_batch return an error instead of retrying
While convenient to offer an error-free implementation, it potentially caused
very long lived lock acquisitions in handle_processor_message.
* Unify and clean DkgConfirmer and DkgRemoval
Does so via adding a new file for the common code, SigningProtocol.
Modifies from_cache to return the preprocess with the machine, as there's no
reason not to. Also removes an unused Result around the type.
Clarifies the security around deterministic nonces, removing them for
saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the
DB is not a proper secret store.
Moves arguments always present in the protocol from function arguments into the
struct itself.
Removes the horribly ugly code in DkgRemoval, fixing multiple issues present
with it which would cause it to fail on use.
* Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol
* Remove unnecessary Clone from lambdas in coordinator
* Remove the EventDb from Tributary scanner
We used per-Transaction DB TXNs so on error, we don't have to rescan the entire
block yet only the rest of it. We prevented scanning multiple transactions by
tracking which we already had.
This is over-engineered and not worth it.
* Implement borsh for HasEvents, removing the manual encoding
* Merge DkgConfirmer and DkgRemoval into signing_protocol.rs
Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes
if any validator had multiple key shares.
* Strictly type DataSpecification's Label
* Correct threshold_i_map_to_keys_and_musig_i_map
It didn't include the participant's own index and accordingly was offset.
* Create TributaryBlockHandler
This struct contains all variables prior passed to handle_block and stops them
from being passed around again and again.
This also ensures fatal_slash is only called while handling a block, as needed
as it expects to operate under perfect consensus.
* Inline accumulate, store confirmation nonces with shares
Inlining accumulate makes sense due to the amount of data accumulate needed to
be passed.
Storing confirmation nonces with shares ensures that both are available or
neither. Prior, one could be yet the other may not have been (requiring an
assert in runtime to ensure we didn't bungle it somehow).
* Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs
* Move Label into SignData
All of our transactions which use SignData end up with the same common usage
pattern for Label, justifying this.
Removes 3 transactions, explicitly de-duplicating their handlers.
* Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair
* Remove the manual read/write for TributarySpec for borsh
This struct doesn't have any optimizations booned by the manual impl. Using
borsh reduces our scope.
* Use temporary variables to further minimize LoC in tributary handler
* Remove usage of tuples for non-trivial Tributary transactions
* Remove serde from dkg
serde could be used to deserialize intenrally inconsistent objects which could
lead to panics or faults.
The BorshDeserialize derives have been replaced with a manual implementation
which won't produce inconsistent objects.
* Abstract Future generics using new trait definitions in coordinator
* Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs
* Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
2023-12-10 20:21:44 -05:00
|
|
|
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
2023-12-01 23:31:18 -05:00
|
|
|
));
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn serialize_transaction() {
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&Transaction::RemoveParticipantDueToDkg {
|
2023-12-18 09:13:09 -05:00
|
|
|
participant: <Ristretto as Ciphersuite>::G::random(&mut OsRng),
|
|
|
|
|
signed: random_signed_with_nonce(&mut OsRng, 0),
|
2023-12-13 14:03:07 -05:00
|
|
|
});
|
2023-11-12 07:24:41 -05:00
|
|
|
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
{
|
|
|
|
|
let mut commitments = vec![random_vec(&mut OsRng, 512)];
|
|
|
|
|
for _ in 0 .. (OsRng.next_u64() % 100) {
|
|
|
|
|
let mut temp = commitments[0].clone();
|
|
|
|
|
OsRng.fill_bytes(&mut temp);
|
|
|
|
|
commitments.push(temp);
|
|
|
|
|
}
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&Transaction::DkgCommitments {
|
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file
Cleans it up and makes it more robust.
* Have expected_next_batch return an error instead of retrying
While convenient to offer an error-free implementation, it potentially caused
very long lived lock acquisitions in handle_processor_message.
* Unify and clean DkgConfirmer and DkgRemoval
Does so via adding a new file for the common code, SigningProtocol.
Modifies from_cache to return the preprocess with the machine, as there's no
reason not to. Also removes an unused Result around the type.
Clarifies the security around deterministic nonces, removing them for
saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the
DB is not a proper secret store.
Moves arguments always present in the protocol from function arguments into the
struct itself.
Removes the horribly ugly code in DkgRemoval, fixing multiple issues present
with it which would cause it to fail on use.
* Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol
* Remove unnecessary Clone from lambdas in coordinator
* Remove the EventDb from Tributary scanner
We used per-Transaction DB TXNs so on error, we don't have to rescan the entire
block yet only the rest of it. We prevented scanning multiple transactions by
tracking which we already had.
This is over-engineered and not worth it.
* Implement borsh for HasEvents, removing the manual encoding
* Merge DkgConfirmer and DkgRemoval into signing_protocol.rs
Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes
if any validator had multiple key shares.
* Strictly type DataSpecification's Label
* Correct threshold_i_map_to_keys_and_musig_i_map
It didn't include the participant's own index and accordingly was offset.
* Create TributaryBlockHandler
This struct contains all variables prior passed to handle_block and stops them
from being passed around again and again.
This also ensures fatal_slash is only called while handling a block, as needed
as it expects to operate under perfect consensus.
* Inline accumulate, store confirmation nonces with shares
Inlining accumulate makes sense due to the amount of data accumulate needed to
be passed.
Storing confirmation nonces with shares ensures that both are available or
neither. Prior, one could be yet the other may not have been (requiring an
assert in runtime to ensure we didn't bungle it somehow).
* Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs
* Move Label into SignData
All of our transactions which use SignData end up with the same common usage
pattern for Label, justifying this.
Removes 3 transactions, explicitly de-duplicating their handlers.
* Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair
* Remove the manual read/write for TributarySpec for borsh
This struct doesn't have any optimizations booned by the manual impl. Using
borsh reduces our scope.
* Use temporary variables to further minimize LoC in tributary handler
* Remove usage of tuples for non-trivial Tributary transactions
* Remove serde from dkg
serde could be used to deserialize intenrally inconsistent objects which could
lead to panics or faults.
The BorshDeserialize derives have been replaced with a manual implementation
which won't produce inconsistent objects.
* Abstract Future generics using new trait definitions in coordinator
* Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs
* Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
2023-12-10 20:21:44 -05:00
|
|
|
attempt: random_u32(&mut OsRng),
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
commitments,
|
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file
Cleans it up and makes it more robust.
* Have expected_next_batch return an error instead of retrying
While convenient to offer an error-free implementation, it potentially caused
very long lived lock acquisitions in handle_processor_message.
* Unify and clean DkgConfirmer and DkgRemoval
Does so via adding a new file for the common code, SigningProtocol.
Modifies from_cache to return the preprocess with the machine, as there's no
reason not to. Also removes an unused Result around the type.
Clarifies the security around deterministic nonces, removing them for
saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the
DB is not a proper secret store.
Moves arguments always present in the protocol from function arguments into the
struct itself.
Removes the horribly ugly code in DkgRemoval, fixing multiple issues present
with it which would cause it to fail on use.
* Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol
* Remove unnecessary Clone from lambdas in coordinator
* Remove the EventDb from Tributary scanner
We used per-Transaction DB TXNs so on error, we don't have to rescan the entire
block yet only the rest of it. We prevented scanning multiple transactions by
tracking which we already had.
This is over-engineered and not worth it.
* Implement borsh for HasEvents, removing the manual encoding
* Merge DkgConfirmer and DkgRemoval into signing_protocol.rs
Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes
if any validator had multiple key shares.
* Strictly type DataSpecification's Label
* Correct threshold_i_map_to_keys_and_musig_i_map
It didn't include the participant's own index and accordingly was offset.
* Create TributaryBlockHandler
This struct contains all variables prior passed to handle_block and stops them
from being passed around again and again.
This also ensures fatal_slash is only called while handling a block, as needed
as it expects to operate under perfect consensus.
* Inline accumulate, store confirmation nonces with shares
Inlining accumulate makes sense due to the amount of data accumulate needed to
be passed.
Storing confirmation nonces with shares ensures that both are available or
neither. Prior, one could be yet the other may not have been (requiring an
assert in runtime to ensure we didn't bungle it somehow).
* Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs
* Move Label into SignData
All of our transactions which use SignData end up with the same common usage
pattern for Label, justifying this.
Removes 3 transactions, explicitly de-duplicating their handlers.
* Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair
* Remove the manual read/write for TributarySpec for borsh
This struct doesn't have any optimizations booned by the manual impl. Using
borsh reduces our scope.
* Use temporary variables to further minimize LoC in tributary handler
* Remove usage of tuples for non-trivial Tributary transactions
* Remove serde from dkg
serde could be used to deserialize intenrally inconsistent objects which could
lead to panics or faults.
The BorshDeserialize derives have been replaced with a manual implementation
which won't produce inconsistent objects.
* Abstract Future generics using new trait definitions in coordinator
* Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs
* Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
2023-12-10 20:21:44 -05:00
|
|
|
signed: random_signed_with_nonce(&mut OsRng, 0),
|
|
|
|
|
});
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
}
|
2023-04-11 19:04:53 -04:00
|
|
|
|
|
|
|
|
{
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
// This supports a variable share length, and variable amount of sent shares, yet share length
|
|
|
|
|
// and sent shares is expected to be constant among recipients
|
|
|
|
|
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap();
|
|
|
|
|
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap();
|
2023-09-01 00:03:53 -04:00
|
|
|
// Create a valid vec of shares
|
|
|
|
|
let mut shares = vec![];
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
// Create up to 150 participants
|
2023-12-17 00:01:41 -05:00
|
|
|
for _ in 0 ..= (OsRng.next_u64() % 150) {
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
// Give each sender multiple shares
|
|
|
|
|
let mut sender_shares = vec![];
|
|
|
|
|
for _ in 0 .. amount_of_shares {
|
|
|
|
|
let mut share = vec![0; share_len];
|
|
|
|
|
OsRng.fill_bytes(&mut share);
|
|
|
|
|
sender_shares.push(share);
|
|
|
|
|
}
|
|
|
|
|
shares.push(sender_shares);
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&Transaction::DkgShares {
|
2023-08-14 06:08:55 -04:00
|
|
|
attempt: random_u32(&mut OsRng),
|
2023-04-11 19:04:53 -04:00
|
|
|
shares,
|
2023-08-14 06:08:55 -04:00
|
|
|
confirmation_nonces: {
|
|
|
|
|
let mut nonces = [0; 64];
|
|
|
|
|
OsRng.fill_bytes(&mut nonces);
|
|
|
|
|
nonces
|
|
|
|
|
},
|
2023-12-01 23:31:18 -05:00
|
|
|
signed: random_signed_with_nonce(&mut OsRng, 1),
|
2023-08-14 06:08:55 -04:00
|
|
|
});
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
|
2023-11-12 07:24:41 -05:00
|
|
|
for i in 0 .. 2 {
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&Transaction::InvalidDkgShare {
|
2023-11-12 07:24:41 -05:00
|
|
|
attempt: random_u32(&mut OsRng),
|
|
|
|
|
accuser: frost::Participant::new(
|
|
|
|
|
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
|
|
|
|
)
|
|
|
|
|
.unwrap(),
|
|
|
|
|
faulty: frost::Participant::new(
|
|
|
|
|
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
|
|
|
|
)
|
|
|
|
|
.unwrap(),
|
|
|
|
|
blame: if i == 0 {
|
|
|
|
|
None
|
|
|
|
|
} else {
|
|
|
|
|
Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty())
|
|
|
|
|
},
|
2023-12-01 23:31:18 -05:00
|
|
|
signed: random_signed_with_nonce(&mut OsRng, 2),
|
2023-11-12 07:24:41 -05:00
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&Transaction::DkgConfirmed {
|
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file
Cleans it up and makes it more robust.
* Have expected_next_batch return an error instead of retrying
While convenient to offer an error-free implementation, it potentially caused
very long lived lock acquisitions in handle_processor_message.
* Unify and clean DkgConfirmer and DkgRemoval
Does so via adding a new file for the common code, SigningProtocol.
Modifies from_cache to return the preprocess with the machine, as there's no
reason not to. Also removes an unused Result around the type.
Clarifies the security around deterministic nonces, removing them for
saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the
DB is not a proper secret store.
Moves arguments always present in the protocol from function arguments into the
struct itself.
Removes the horribly ugly code in DkgRemoval, fixing multiple issues present
with it which would cause it to fail on use.
* Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol
* Remove unnecessary Clone from lambdas in coordinator
* Remove the EventDb from Tributary scanner
We used per-Transaction DB TXNs so on error, we don't have to rescan the entire
block yet only the rest of it. We prevented scanning multiple transactions by
tracking which we already had.
This is over-engineered and not worth it.
* Implement borsh for HasEvents, removing the manual encoding
* Merge DkgConfirmer and DkgRemoval into signing_protocol.rs
Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes
if any validator had multiple key shares.
* Strictly type DataSpecification's Label
* Correct threshold_i_map_to_keys_and_musig_i_map
It didn't include the participant's own index and accordingly was offset.
* Create TributaryBlockHandler
This struct contains all variables prior passed to handle_block and stops them
from being passed around again and again.
This also ensures fatal_slash is only called while handling a block, as needed
as it expects to operate under perfect consensus.
* Inline accumulate, store confirmation nonces with shares
Inlining accumulate makes sense due to the amount of data accumulate needed to
be passed.
Storing confirmation nonces with shares ensures that both are available or
neither. Prior, one could be yet the other may not have been (requiring an
assert in runtime to ensure we didn't bungle it somehow).
* Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs
* Move Label into SignData
All of our transactions which use SignData end up with the same common usage
pattern for Label, justifying this.
Removes 3 transactions, explicitly de-duplicating their handlers.
* Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair
* Remove the manual read/write for TributarySpec for borsh
This struct doesn't have any optimizations booned by the manual impl. Using
borsh reduces our scope.
* Use temporary variables to further minimize LoC in tributary handler
* Remove usage of tuples for non-trivial Tributary transactions
* Remove serde from dkg
serde could be used to deserialize intenrally inconsistent objects which could
lead to panics or faults.
The BorshDeserialize derives have been replaced with a manual implementation
which won't produce inconsistent objects.
* Abstract Future generics using new trait definitions in coordinator
* Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs
* Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
2023-12-10 20:21:44 -05:00
|
|
|
attempt: random_u32(&mut OsRng),
|
|
|
|
|
confirmation_share: {
|
2023-08-14 06:08:55 -04:00
|
|
|
let mut share = [0; 32];
|
|
|
|
|
OsRng.fill_bytes(&mut share);
|
|
|
|
|
share
|
|
|
|
|
},
|
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file
Cleans it up and makes it more robust.
* Have expected_next_batch return an error instead of retrying
While convenient to offer an error-free implementation, it potentially caused
very long lived lock acquisitions in handle_processor_message.
* Unify and clean DkgConfirmer and DkgRemoval
Does so via adding a new file for the common code, SigningProtocol.
Modifies from_cache to return the preprocess with the machine, as there's no
reason not to. Also removes an unused Result around the type.
Clarifies the security around deterministic nonces, removing them for
saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the
DB is not a proper secret store.
Moves arguments always present in the protocol from function arguments into the
struct itself.
Removes the horribly ugly code in DkgRemoval, fixing multiple issues present
with it which would cause it to fail on use.
* Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol
* Remove unnecessary Clone from lambdas in coordinator
* Remove the EventDb from Tributary scanner
We used per-Transaction DB TXNs so on error, we don't have to rescan the entire
block yet only the rest of it. We prevented scanning multiple transactions by
tracking which we already had.
This is over-engineered and not worth it.
* Implement borsh for HasEvents, removing the manual encoding
* Merge DkgConfirmer and DkgRemoval into signing_protocol.rs
Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes
if any validator had multiple key shares.
* Strictly type DataSpecification's Label
* Correct threshold_i_map_to_keys_and_musig_i_map
It didn't include the participant's own index and accordingly was offset.
* Create TributaryBlockHandler
This struct contains all variables prior passed to handle_block and stops them
from being passed around again and again.
This also ensures fatal_slash is only called while handling a block, as needed
as it expects to operate under perfect consensus.
* Inline accumulate, store confirmation nonces with shares
Inlining accumulate makes sense due to the amount of data accumulate needed to
be passed.
Storing confirmation nonces with shares ensures that both are available or
neither. Prior, one could be yet the other may not have been (requiring an
assert in runtime to ensure we didn't bungle it somehow).
* Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs
* Move Label into SignData
All of our transactions which use SignData end up with the same common usage
pattern for Label, justifying this.
Removes 3 transactions, explicitly de-duplicating their handlers.
* Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair
* Remove the manual read/write for TributarySpec for borsh
This struct doesn't have any optimizations booned by the manual impl. Using
borsh reduces our scope.
* Use temporary variables to further minimize LoC in tributary handler
* Remove usage of tuples for non-trivial Tributary transactions
* Remove serde from dkg
serde could be used to deserialize intenrally inconsistent objects which could
lead to panics or faults.
The BorshDeserialize derives have been replaced with a manual implementation
which won't produce inconsistent objects.
* Abstract Future generics using new trait definitions in coordinator
* Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs
* Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
2023-12-10 20:21:44 -05:00
|
|
|
signed: random_signed_with_nonce(&mut OsRng, 2),
|
|
|
|
|
});
|
2023-08-14 06:08:55 -04:00
|
|
|
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
{
|
|
|
|
|
let mut block = [0; 32];
|
|
|
|
|
OsRng.fill_bytes(&mut block);
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&Transaction::CosignSubstrateBlock(block));
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
}
|
|
|
|
|
|
2023-04-20 14:24:49 -04:00
|
|
|
{
|
2023-08-31 23:04:37 -04:00
|
|
|
let mut block = [0; 32];
|
|
|
|
|
OsRng.fill_bytes(&mut block);
|
2023-12-12 12:28:53 -05:00
|
|
|
let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&Transaction::Batch { block, batch });
|
2023-04-20 14:24:49 -04:00
|
|
|
}
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&Transaction::SubstrateBlock(OsRng.next_u64()));
|
2023-04-11 19:04:53 -04:00
|
|
|
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
{
|
2023-12-12 12:28:53 -05:00
|
|
|
let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&Transaction::SubstrateSign(random_sign_data(
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
&mut OsRng,
|
2023-12-12 12:28:53 -05:00
|
|
|
SubstrateSignableId::Batch(batch),
|
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file
Cleans it up and makes it more robust.
* Have expected_next_batch return an error instead of retrying
While convenient to offer an error-free implementation, it potentially caused
very long lived lock acquisitions in handle_processor_message.
* Unify and clean DkgConfirmer and DkgRemoval
Does so via adding a new file for the common code, SigningProtocol.
Modifies from_cache to return the preprocess with the machine, as there's no
reason not to. Also removes an unused Result around the type.
Clarifies the security around deterministic nonces, removing them for
saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the
DB is not a proper secret store.
Moves arguments always present in the protocol from function arguments into the
struct itself.
Removes the horribly ugly code in DkgRemoval, fixing multiple issues present
with it which would cause it to fail on use.
* Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol
* Remove unnecessary Clone from lambdas in coordinator
* Remove the EventDb from Tributary scanner
We used per-Transaction DB TXNs so on error, we don't have to rescan the entire
block yet only the rest of it. We prevented scanning multiple transactions by
tracking which we already had.
This is over-engineered and not worth it.
* Implement borsh for HasEvents, removing the manual encoding
* Merge DkgConfirmer and DkgRemoval into signing_protocol.rs
Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes
if any validator had multiple key shares.
* Strictly type DataSpecification's Label
* Correct threshold_i_map_to_keys_and_musig_i_map
It didn't include the participant's own index and accordingly was offset.
* Create TributaryBlockHandler
This struct contains all variables prior passed to handle_block and stops them
from being passed around again and again.
This also ensures fatal_slash is only called while handling a block, as needed
as it expects to operate under perfect consensus.
* Inline accumulate, store confirmation nonces with shares
Inlining accumulate makes sense due to the amount of data accumulate needed to
be passed.
Storing confirmation nonces with shares ensures that both are available or
neither. Prior, one could be yet the other may not have been (requiring an
assert in runtime to ensure we didn't bungle it somehow).
* Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs
* Move Label into SignData
All of our transactions which use SignData end up with the same common usage
pattern for Label, justifying this.
Removes 3 transactions, explicitly de-duplicating their handlers.
* Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair
* Remove the manual read/write for TributarySpec for borsh
This struct doesn't have any optimizations booned by the manual impl. Using
borsh reduces our scope.
* Use temporary variables to further minimize LoC in tributary handler
* Remove usage of tuples for non-trivial Tributary transactions
* Remove serde from dkg
serde could be used to deserialize intenrally inconsistent objects which could
lead to panics or faults.
The BorshDeserialize derives have been replaced with a manual implementation
which won't produce inconsistent objects.
* Abstract Future generics using new trait definitions in coordinator
* Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs
* Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
2023-12-10 20:21:44 -05:00
|
|
|
Label::Preprocess,
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
)));
|
|
|
|
|
}
|
|
|
|
|
{
|
2023-12-12 12:28:53 -05:00
|
|
|
let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&Transaction::SubstrateSign(random_sign_data(
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
&mut OsRng,
|
2023-12-12 12:28:53 -05:00
|
|
|
SubstrateSignableId::Batch(batch),
|
Coordinator Cleanup (#481)
* Move logic for evaluating if a cosign should occur to its own file
Cleans it up and makes it more robust.
* Have expected_next_batch return an error instead of retrying
While convenient to offer an error-free implementation, it potentially caused
very long lived lock acquisitions in handle_processor_message.
* Unify and clean DkgConfirmer and DkgRemoval
Does so via adding a new file for the common code, SigningProtocol.
Modifies from_cache to return the preprocess with the machine, as there's no
reason not to. Also removes an unused Result around the type.
Clarifies the security around deterministic nonces, removing them for
saved-to-disk cached preprocesses. The cached preprocesses are encrypted as the
DB is not a proper secret store.
Moves arguments always present in the protocol from function arguments into the
struct itself.
Removes the horribly ugly code in DkgRemoval, fixing multiple issues present
with it which would cause it to fail on use.
* Set SeraiBlockNumber in cosign.rs as it's used by the cosigning protocol
* Remove unnecessary Clone from lambdas in coordinator
* Remove the EventDb from Tributary scanner
We used per-Transaction DB TXNs so on error, we don't have to rescan the entire
block yet only the rest of it. We prevented scanning multiple transactions by
tracking which we already had.
This is over-engineered and not worth it.
* Implement borsh for HasEvents, removing the manual encoding
* Merge DkgConfirmer and DkgRemoval into signing_protocol.rs
Fixes a bug in DkgConfirmer which would cause it to improperly handle indexes
if any validator had multiple key shares.
* Strictly type DataSpecification's Label
* Correct threshold_i_map_to_keys_and_musig_i_map
It didn't include the participant's own index and accordingly was offset.
* Create TributaryBlockHandler
This struct contains all variables prior passed to handle_block and stops them
from being passed around again and again.
This also ensures fatal_slash is only called while handling a block, as needed
as it expects to operate under perfect consensus.
* Inline accumulate, store confirmation nonces with shares
Inlining accumulate makes sense due to the amount of data accumulate needed to
be passed.
Storing confirmation nonces with shares ensures that both are available or
neither. Prior, one could be yet the other may not have been (requiring an
assert in runtime to ensure we didn't bungle it somehow).
* Create helper functions for handling DkgRemoval/SubstrateSign/Sign Tributary TXs
* Move Label into SignData
All of our transactions which use SignData end up with the same common usage
pattern for Label, justifying this.
Removes 3 transactions, explicitly de-duplicating their handlers.
* Remove CurrentlyCompletingKeyPair for the non-contextual DkgKeyPair
* Remove the manual read/write for TributarySpec for borsh
This struct doesn't have any optimizations booned by the manual impl. Using
borsh reduces our scope.
* Use temporary variables to further minimize LoC in tributary handler
* Remove usage of tuples for non-trivial Tributary transactions
* Remove serde from dkg
serde could be used to deserialize intenrally inconsistent objects which could
lead to panics or faults.
The BorshDeserialize derives have been replaced with a manual implementation
which won't produce inconsistent objects.
* Abstract Future generics using new trait definitions in coordinator
* Move published_signed_transaction to tributary/mod.rs to reduce the size of main.rs
* Split coordinator/src/tributary/mod.rs into spec.rs and transaction.rs
2023-12-10 20:21:44 -05:00
|
|
|
Label::Share,
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
)));
|
|
|
|
|
}
|
2023-04-20 06:59:42 -04:00
|
|
|
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
{
|
|
|
|
|
let mut plan = [0; 32];
|
|
|
|
|
OsRng.fill_bytes(&mut plan);
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Preprocess)));
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
}
|
|
|
|
|
{
|
|
|
|
|
let mut plan = [0; 32];
|
|
|
|
|
OsRng.fill_bytes(&mut plan);
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Share)));
|
Add a cosigning protocol to ensure finalizations are unique (#433)
* Add a function to deterministically decide which Serai blocks should be co-signed
Has a 5 minute latency between co-signs, also used as the maximal latency
before a co-sign is started.
* Get all active tributaries we're in at a specific block
* Add and route CosignSubstrateBlock, a new provided TX
* Split queued cosigns per network
* Rename BatchSignId to SubstrateSignId
* Add SubstrateSignableId, a meta-type for either Batch or Block, and modularize around it
* Handle the CosignSubstrateBlock provided TX
* Revert substrate_signer.rs to develop (and patch to still work)
Due to SubstrateSigner moving when the prior multisig closes, yet cosigning
occurring with the most recent key, a single SubstrateSigner can be reused.
We could manage multiple SubstrateSigners, yet considering the much lower
specifications for cosigning, I'd rather treat it distinctly.
* Route cosigning through the processor
* Add note to rename SubstrateSigner post-PR
I don't want to do so now in order to preserve the diff's clarity.
* Implement cosign evaluation into the coordinator
* Get tests to compile
* Bug fixes, mark blocks without cosigners available as cosigned
* Correct the ID Batch preprocesses are saved under, add log statements
* Create a dedicated function to handle cosigns
* Correct the flow around Batch verification/queueing
Verifying `Batch`s could stall when a `Batch` was signed before its
predecessors/before the block it's contained in was cosigned (the latter being
inevitable as we can't sign a block containing a signed batch before signing
the batch).
Now, Batch verification happens on a distinct async task in order to not block
the handling of processor messages. This task is the sole caller of verify in
order to ensure last_verified_batch isn't unexpectedly mutated.
When the processor message handler needs to access it, or needs to queue a
Batch, it associates the DB TXN with a lock preventing the other task from
doing so.
This lock, as currently implemented, is a poor and inefficient design. It
should be modified to the pattern used for cosign management. Additionally, a
new primitive of a DB-backed channel may be immensely valuable.
Fixes a standing potential deadlock and a deadlock introduced with the
cosigning protocol.
* Working full-stack tests
After the last commit, this only required extending a timeout.
* Replace "co-sign" with "cosign" to make finding text easier
* Update the coordinator tests to support cosigning
* Inline prior_batch calculation to prevent panic on rotation
Noticed when doing a final review of the branch.
2023-11-15 16:57:21 -05:00
|
|
|
}
|
2023-08-31 23:39:36 -04:00
|
|
|
|
|
|
|
|
{
|
|
|
|
|
let mut plan = [0; 32];
|
|
|
|
|
OsRng.fill_bytes(&mut plan);
|
|
|
|
|
let mut tx_hash = vec![0; (OsRng.next_u64() % 64).try_into().unwrap()];
|
|
|
|
|
OsRng.fill_bytes(&mut tx_hash);
|
2023-12-17 00:01:41 -05:00
|
|
|
test_read_write(&Transaction::SignCompleted {
|
2023-08-31 23:39:36 -04:00
|
|
|
plan,
|
|
|
|
|
tx_hash,
|
2023-12-01 23:31:18 -05:00
|
|
|
first_signer: random_signed_with_nonce(&mut OsRng, 2).signer,
|
|
|
|
|
signature: random_signed_with_nonce(&mut OsRng, 2).signature,
|
2023-08-31 23:39:36 -04:00
|
|
|
});
|
|
|
|
|
}
|
2024-01-29 03:48:53 -05:00
|
|
|
|
|
|
|
|
test_read_write(&Transaction::SlashReport(
|
|
|
|
|
{
|
|
|
|
|
let amount =
|
|
|
|
|
usize::try_from(OsRng.next_u64() % u64::from(MAX_KEY_SHARES_PER_SET - 1)).unwrap();
|
|
|
|
|
let mut points = vec![];
|
|
|
|
|
for _ in 0 .. amount {
|
|
|
|
|
points.push((OsRng.next_u64() >> 32).try_into().unwrap());
|
|
|
|
|
}
|
|
|
|
|
points
|
|
|
|
|
},
|
|
|
|
|
random_signed_with_nonce(&mut OsRng, 0),
|
|
|
|
|
));
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|