Files
serai/substrate/validator-sets/src/lib.rs

1091 lines
38 KiB
Rust
Raw Normal View History

#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
mod embedded_elliptic_curve_keys;
use embedded_elliptic_curve_keys::*;
mod allocations;
use allocations::*;
mod sessions;
use sessions::{*, GenesisValidators as GenesisValidatorsContainer};
/*
use core::marker::PhantomData;
use scale::{Encode, Decode};
use scale_info::TypeInfo;
use sp_std::{vec, vec::Vec};
use sp_core::sr25519::{Public, Signature};
use sp_application_crypto::RuntimePublic;
use sp_session::{ShouldEndSession, GetSessionNumber, GetValidatorCount};
use sp_runtime::{KeyTypeId, ConsensusEngineId, traits::IsMember};
use sp_staking::offence::{ReportOffence, Offence, OffenceError};
use frame_system::{pallet_prelude::*, RawOrigin};
use frame_support::{
pallet_prelude::*,
sp_runtime::SaturatedConversion,
traits::{DisabledValidators, KeyOwnerProofSystem, FindAuthor},
BoundedVec, WeakBoundedVec, StoragePrefixedMap,
};
use serai_primitives::*;
pub use validator_sets_primitives as primitives;
use primitives::*;
use coins_pallet::{Pallet as Coins, AllowMint};
use dex_pallet::Pallet as Dex;
use pallet_babe::{
Pallet as Babe, AuthorityId as BabeAuthorityId, EquivocationOffence as BabeEquivocationOffence,
};
use pallet_grandpa::{
Pallet as Grandpa, AuthorityId as GrandpaAuthorityId,
EquivocationOffence as GrandpaEquivocationOffence,
};
#[derive(Debug, Encode, Decode, TypeInfo, PartialEq, Eq, Clone)]
pub struct MembershipProof<T: pallet::Config>(pub Public, pub PhantomData<T>);
impl<T: pallet::Config> GetSessionNumber for MembershipProof<T> {
fn session(&self) -> u32 {
let current = Pallet::<T>::session(NetworkId::Serai).unwrap().0;
if Babe::<T>::is_member(&BabeAuthorityId::from(self.0)) {
current
} else {
// if it isn't in the current session, it should have been in the previous one.
current - 1
}
}
}
impl<T: pallet::Config> GetValidatorCount for MembershipProof<T> {
// We only implement and this interface to satisfy trait requirements
// Although this might return the wrong count if the offender was in the previous set, we don't
// rely on it and Substrate only relies on it to offer economic calculations we also don't rely
// on
fn validator_count(&self) -> u32 {
2023-12-16 20:54:24 -05:00
u32::try_from(Babe::<T>::authorities().len()).unwrap()
}
}
*/
2025-09-03 06:10:54 -04:00
#[expect(clippy::ignored_unit_patterns, clippy::cast_possible_truncation)]
#[frame_support::pallet]
mod pallet {
use sp_core::sr25519::Public;
use frame_system::pallet_prelude::*;
use frame_support::pallet_prelude::*;
use serai_primitives::{
crypto::KeyPair, network_id::*, coin::*, balance::*, validator_sets::*, address::SeraiAddress,
};
use coins_pallet::Pallet as Coins;
use super::*;
#[pallet::config]
pub trait Config: frame_system::Config + coins_pallet::Config {
type RuntimeEvent: IsType<<Self as frame_system::Config>::RuntimeEvent> + From<Event<Self>>;
// type ShouldEndSession: ShouldEndSession<BlockNumberFor<Self>>;
}
/* TODO
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, serde::Serialize, serde::Deserialize)]
pub struct AllEmbeddedEllipticCurveKeysAtGenesis {
pub embedwards25519: BoundedVec<u8, ConstU32<{ MAX_KEY_LEN }>>,
pub secq256k1: BoundedVec<u8, ConstU32<{ MAX_KEY_LEN }>>,
}
#[pallet::genesis_config]
Initial In Instructions pallet and Serai client lib (#233) * Initial work on an In Inherents pallet * Add an event for when a batch is executed * Add a dummy provider for InInstructions * Add in-instructions to the node * Add the Serai runtime API to the processor * Move processor tests around * Build a subxt Client around Serai * Successfully get Batch events from Serai Renamed processor/substrate to processor/serai. * Much more robust InInstruction pallet * Implement the workaround from https://github.com/paritytech/subxt/issues/602 * Initial prototype of processor generated InInstructions * Correct PendingCoins data flow for InInstructions * Minor lint to in-instructions * Remove the global Serai connection for a partial re-impl * Correct ID handling of the processor test * Workaround the delay in the subscription * Make an unwrap an if let Some, remove old comments * Lint the processor toml * Rebase and update * Move substrate/in-instructions to substrate/in-instructions/pallet * Start an in-instructions primitives lib * Properly update processor to subxt 0.24 Also corrects failures from the rebase. * in-instructions cargo update * Implement IsFatalError * is_inherent -> true * Rename in-instructions crates and misc cleanup * Update documentation * cargo update * Misc update fixes * Replace height with block_number * Update processor src to latest subxt * Correct pipeline for InInstructions testing * Remove runtime::AccountId for serai_primitives::NativeAddress * Rewrite the in-instructions pallet Complete with respect to the currently written docs. Drops the custom serializer for just using SCALE. Makes slight tweaks as relevant. * Move instructions' InherentDataProvider to a client crate * Correct doc gen * Add serde to in-instructions-primitives * Add in-instructions-primitives to pallet * Heights -> BlockNumbers * Get batch pub test loop working * Update in instructions pallet terminology Removes the ambiguous Coin for Update. Removes pending/artificial latency for furture client work. Also moves to using serai_primitives::Coin. * Add a BlockNumber primitive * Belated cargo fmt * Further document why DifferentBatch isn't fatal * Correct processor sleeps * Remove metadata at compile time, add test framework for Serai nodes * Remove manual RPC client * Simplify update test * Improve re-exporting behavior of serai-runtime It now re-exports all pallets underneath it. * Add a function to get storage values to the Serai RPC * Update substrate/ to latest substrate * Create a dedicated crate for the Serai RPC * Remove unused dependencies in substrate/ * Remove unused dependencies in coins/ Out of scope for this branch, just minor and path of least resistance. * Use substrate/serai/client for the Serai RPC lib It's a bit out of place, since these client folders are intended for the node to access pallets and so on. This is for end-users to access Serai as a whole. In that sense, it made more sense as a top level folder, yet that also felt out of place. * Move InInstructions test to serai-client for now * Final cleanup * Update deny.toml * Cargo.lock update from merging develop * Update nightly Attempt to work around the current CI failure, which is a Rust ICE. We previously didn't upgrade due to clippy 10134, yet that's been reverted. * clippy * clippy * fmt * NativeAddress -> SeraiAddress * Sec fix on non-provided updates and doc fixes * Add Serai as a Coin Necessary in order to swap to Serai. * Add a BlockHash type, used for batch IDs * Remove origin from InInstruction Makes InInstructionTarget. Adds RefundableInInstruction with origin. * Document storage items in in-instructions * Rename serai/client/tests/serai.rs to updates.rs It only tested publishing updates and their successful acceptance.
2023-01-20 11:00:18 -05:00
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
pub struct GenesisConfig<T: Config> {
/// Networks to spawn Serai with, and the stake requirement per key share.
///
/// Every participant at genesis will automatically be assumed to have this much stake.
/// This stake cannot be withdrawn however as there's no actual stake behind it.
pub networks: Vec<(NetworkId, Amount)>,
/// List of participants to place in the initial validator sets.
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
pub participants: Vec<(T::AccountId, AllEmbeddedEllipticCurveKeysAtGenesis)>,
}
impl<T: Config> Default for GenesisConfig<T> {
fn default() -> Self {
2023-10-13 00:12:10 -04:00
GenesisConfig { networks: Default::default(), participants: Default::default() }
}
}
*/
#[pallet::pallet]
pub struct Pallet<T>(PhantomData<T>);
/*
/// The allocation required per key share.
// Uses Identity for the lookup to avoid a hash of a severely limited fixed key-space.
#[pallet::storage]
#[pallet::getter(fn allocation_per_key_share)]
pub type AllocationPerKeyShare<T: Config> =
StorageMap<_, Identity, NetworkId, Amount, OptionQuery>;
/// The validators selected to be in-set (and their key shares), regardless of if removed.
///
/// This method allows iterating over all validators and their stake.
#[pallet::storage]
#[pallet::getter(fn participants_for_latest_decided_set)]
pub(crate) type Participants<T: Config> = StorageMap<
_,
Identity,
NetworkId,
BoundedVec<(Public, u64), ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 }>>,
OptionQuery,
>;
/// The validators selected to be in-set, regardless of if removed.
///
/// This method allows quickly checking for presence in-set and looking up a validator's key
/// shares.
// Uses Identity for NetworkId to avoid a hash of a severely limited fixed key-space.
#[pallet::storage]
pub(crate) type InSet<T: Config> =
StorageDoubleMap<_, Identity, NetworkId, Blake2_128Concat, Public, u64, OptionQuery>;
}
*/
struct Abstractions<T: Config>(PhantomData<T>);
// Satisfy the `EmbeddedEllipticCurveKeys` abstraction
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
#[pallet::storage]
type EmbeddedEllipticCurveKeys<T: Config> = StorageDoubleMap<
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
_,
Identity,
ExternalNetworkId,
Blake2_128Concat,
Public,
serai_primitives::crypto::EmbeddedEllipticCurveKeys,
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
OptionQuery,
>;
impl<T: Config> EmbeddedEllipticCurveKeysStorage for Abstractions<T> {
type EmbeddedEllipticCurveKeys = EmbeddedEllipticCurveKeys<T>;
}
// Satisfy the `Allocations` abstraction
#[pallet::storage]
type Allocations<T: Config> =
StorageMap<_, Blake2_128Concat, AllocationsKey, Amount, OptionQuery>;
// This has to use `Identity` per the documentation of `AllocationsStorage`
#[pallet::storage]
type SortedAllocations<T: Config> =
StorageMap<_, Identity, SortedAllocationsKey, (), OptionQuery>;
impl<T: Config> AllocationsStorage for Abstractions<T> {
type Allocations = Allocations<T>;
type SortedAllocations = SortedAllocations<T>;
}
// Satisfy the `Sessions` abstraction
// We use `Identity` as the hasher for `NetworkId` due to how constrained it is
#[pallet::storage]
type GenesisValidators<T: Config> = StorageValue<_, GenesisValidatorsContainer, OptionQuery>;
#[pallet::storage]
type AllocationPerKeyShare<T: Config> = StorageMap<_, Identity, NetworkId, Amount, OptionQuery>;
#[pallet::storage]
type CurrentSession<T: Config> = StorageMap<_, Identity, NetworkId, Session, OptionQuery>;
#[pallet::storage]
type LatestDecidedSession<T: Config> = StorageMap<_, Identity, NetworkId, Session, OptionQuery>;
// This has to use `Identity` per the documentation of `SessionsStorage`
#[pallet::storage]
type SelectedValidators<T: Config> =
StorageMap<_, Identity, SelectedValidatorsKey, u64, OptionQuery>;
#[pallet::storage]
type TotalAllocatedStake<T: Config> = StorageMap<_, Identity, NetworkId, Amount, OptionQuery>;
#[pallet::storage]
type DelayedDeallocations<T: Config> =
StorageDoubleMap<_, Blake2_128Concat, Public, Identity, Session, Amount, OptionQuery>;
impl<T: Config> SessionsStorage for Abstractions<T> {
type GenesisValidators = GenesisValidators<T>;
type AllocationPerKeyShare = AllocationPerKeyShare<T>;
type CurrentSession = CurrentSession<T>;
type LatestDecidedSession = LatestDecidedSession<T>;
type SelectedValidators = SelectedValidators<T>;
type TotalAllocatedStake = TotalAllocatedStake<T>;
type DelayedDeallocations = DelayedDeallocations<T>;
}
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {}
/*
/// The generated key pair for a given validator set instance.
#[pallet::storage]
#[pallet::getter(fn keys)]
pub type Keys<T: Config> =
StorageMap<_, Twox64Concat, ExternalValidatorSet, KeyPair, OptionQuery>;
/// The key for validator sets which can (and still need to) publish their slash reports.
#[pallet::storage]
pub type PendingSlashReport<T: Config> =
StorageMap<_, Identity, ExternalNetworkId, Public, OptionQuery>;
/// Disabled validators.
#[pallet::storage]
pub type SeraiDisabledIndices<T: Config> = StorageMap<_, Identity, u32, Public, OptionQuery>;
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {
2023-10-22 03:28:42 -04:00
NewSet {
set: ValidatorSet,
},
ParticipantRemoved {
set: ValidatorSet,
removed: T::AccountId,
},
2023-10-22 03:28:42 -04:00
KeyGen {
set: ExternalValidatorSet,
2023-10-22 03:28:42 -04:00
key_pair: KeyPair,
},
AcceptedHandover {
set: ValidatorSet,
},
SetRetired {
set: ValidatorSet,
},
2023-10-22 03:28:42 -04:00
AllocationIncreased {
validator: T::AccountId,
network: NetworkId,
amount: Amount,
},
AllocationDecreased {
validator: T::AccountId,
network: NetworkId,
amount: Amount,
delayed_until: Option<Session>,
},
DeallocationClaimed {
validator: T::AccountId,
network: NetworkId,
session: Session,
},
}
impl<T: Config> Pallet<T> {
fn new_set(network: NetworkId) {
// TODO
let include_genesis_validators = true;
// TODO: prevent new set if it doesn't have enough stake for economic security.
Abstractions::<T>::attempt_new_session(network, include_genesis_validators)
/* TODO
let set = ValidatorSet { network, session };
Pallet::<T>::deposit_event(Event::NewSet { set });
*/
}
}
*/
#[pallet::error]
pub enum Error<T> {
/// The provided embedded elliptic curve keys were invalid.
InvalidEmbeddedEllipticCurveKeys,
/// Allocation was erroneous.
AllocationError(AllocationError),
/// Deallocation was erroneous.
DeallocationError(DeallocationError),
}
/* TODO
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_initialize(n: BlockNumberFor<T>) -> Weight {
if T::ShouldEndSession::should_end_session(n) {
Self::rotate_session();
// TODO: set the proper weights
T::BlockWeights::get().max_block
} else {
Weight::zero()
}
}
}
#[pallet::genesis_build]
impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
fn build(&self) {
for (id, stake) in self.networks.clone() {
AllocationPerKeyShare::<T>::set(id, Some(stake));
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
for participant in &self.participants {
if Abstractions::<T>::set_allocation(id, participant.0, stake) {
panic!("participants contained duplicates");
}
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
EmbeddedEllipticCurveKeys::<T>::set(
participant.0,
EmbeddedEllipticCurve::Embedwards25519,
Some(participant.1.embedwards25519.clone()),
);
EmbeddedEllipticCurveKeys::<T>::set(
participant.0,
EmbeddedEllipticCurve::Secq256k1,
Some(participant.1.secq256k1.clone()),
);
}
Pallet::<T>::new_set(id);
}
}
}
*/
impl<T: Config> Pallet<T> {
fn account() -> T::AccountId {
SeraiAddress::system(b"ValidatorSets").into()
}
/*
Support multiple key shares per validator (#416) * Update the coordinator to give key shares based on weight, not based on existence Participants are now identified by their starting index. While this compiles, the following is unimplemented: 1) A conversion for DKG `i` values. It assumes the threshold `i` values used will be identical for the MuSig signature used to confirm the DKG. 2) Expansion from compressed values to full values before forwarding to the processor. * Add a fn to the DkgConfirmer to convert `i` values as needed Also removes TODOs regarding Serai ensuring validator key uniqueness + validity. The current infra achieves both. * Have the Tributary DB track participation by shares, not by count * Prevent a node from obtaining 34% of the maximum amount of key shares This is actually mainly intended to set a bound on message sizes in the coordinator. Message sizes are amplified by the amount of key shares held, so setting an upper bound on said amount lets it determine constants. While that upper bound could be 150, that'd be unreasonable and increase the potential for DoS attacks. * Correct the mechanism to detect if sufficient accumulation has occured It used to check if the latest accumulation hit the required threshold. Now, accumulations may jump past the required threshold. The required mechanism is to check the threshold wasn't prior met and is now met. * Finish updating the coordinator to handle a multiple key share per validator environment * Adjust stategy re: preventing noce reuse in DKG Confirmer * Add TODOs regarding dropped transactions, add possible TODO fix * Update tests/coordinator This doesn't add new multi-key-share tests, it solely updates the existing single key-share tests to compile and run, with the necessary fixes to the coordinator. * Update processor key_gen to handle generating multiple key shares at once * Update SubstrateSigner * Update signer, clippy * Update processor tests * Update processor docker tests
2023-11-04 19:26:13 -04:00
// is_bft returns if the network is able to survive any single node becoming byzantine.
fn is_bft(network: NetworkId) -> bool {
let allocation_per_key_share = AllocationPerKeyShare::<T>::get(network).unwrap().0;
let mut validators_len = 0;
let mut top = None;
let mut key_shares = 0;
for (_, amount) in Abstractions::<T>::iter_allocations(network, allocation_per_key_share) {
validators_len += 1;
key_shares += amount.0 / allocation_per_key_share;
if top.is_none() {
top = Some(key_shares);
}
if key_shares > u64::from(MAX_KEY_SHARES_PER_SET_U32) {
break;
}
}
let Some(top) = top else { return false };
2024-04-12 20:38:27 -04:00
// key_shares may be over MAX_KEY_SHARES_PER_SET, which will cause a round robin reduction of
// each validator's key shares until their sum is MAX_KEY_SHARES_PER_SET
// post_amortization_key_shares_for_top_validator yields what the top validator's key shares
// would be after such a reduction, letting us evaluate this correctly
let top = post_amortization_key_shares_for_top_validator(validators_len, top, key_shares);
(top * 3) < key_shares.min(MAX_KEY_SHARES_PER_SET_U32.into())
}
fn increase_allocation(
network: NetworkId,
account: T::AccountId,
amount: Amount,
block_reward: bool,
) -> DispatchResult {
/* TODO
Support multiple key shares per validator (#416) * Update the coordinator to give key shares based on weight, not based on existence Participants are now identified by their starting index. While this compiles, the following is unimplemented: 1) A conversion for DKG `i` values. It assumes the threshold `i` values used will be identical for the MuSig signature used to confirm the DKG. 2) Expansion from compressed values to full values before forwarding to the processor. * Add a fn to the DkgConfirmer to convert `i` values as needed Also removes TODOs regarding Serai ensuring validator key uniqueness + validity. The current infra achieves both. * Have the Tributary DB track participation by shares, not by count * Prevent a node from obtaining 34% of the maximum amount of key shares This is actually mainly intended to set a bound on message sizes in the coordinator. Message sizes are amplified by the amount of key shares held, so setting an upper bound on said amount lets it determine constants. While that upper bound could be 150, that'd be unreasonable and increase the potential for DoS attacks. * Correct the mechanism to detect if sufficient accumulation has occured It used to check if the latest accumulation hit the required threshold. Now, accumulations may jump past the required threshold. The required mechanism is to check the threshold wasn't prior met and is now met. * Finish updating the coordinator to handle a multiple key share per validator environment * Adjust stategy re: preventing noce reuse in DKG Confirmer * Add TODOs regarding dropped transactions, add possible TODO fix * Update tests/coordinator This doesn't add new multi-key-share tests, it solely updates the existing single key-share tests to compile and run, with the necessary fixes to the coordinator. * Update processor key_gen to handle generating multiple key shares at once * Update SubstrateSigner * Update signer, clippy * Update processor tests * Update processor docker tests
2023-11-04 19:26:13 -04:00
// The above is_bft calls are only used to check a BFT net doesn't become non-BFT
// Check here if this call would prevent a non-BFT net from *ever* becoming BFT
if (new_allocation / allocation_per_key_share) >= (MAX_KEY_SHARES_PER_SET_U32 / 3).into() {
Support multiple key shares per validator (#416) * Update the coordinator to give key shares based on weight, not based on existence Participants are now identified by their starting index. While this compiles, the following is unimplemented: 1) A conversion for DKG `i` values. It assumes the threshold `i` values used will be identical for the MuSig signature used to confirm the DKG. 2) Expansion from compressed values to full values before forwarding to the processor. * Add a fn to the DkgConfirmer to convert `i` values as needed Also removes TODOs regarding Serai ensuring validator key uniqueness + validity. The current infra achieves both. * Have the Tributary DB track participation by shares, not by count * Prevent a node from obtaining 34% of the maximum amount of key shares This is actually mainly intended to set a bound on message sizes in the coordinator. Message sizes are amplified by the amount of key shares held, so setting an upper bound on said amount lets it determine constants. While that upper bound could be 150, that'd be unreasonable and increase the potential for DoS attacks. * Correct the mechanism to detect if sufficient accumulation has occured It used to check if the latest accumulation hit the required threshold. Now, accumulations may jump past the required threshold. The required mechanism is to check the threshold wasn't prior met and is now met. * Finish updating the coordinator to handle a multiple key share per validator environment * Adjust stategy re: preventing noce reuse in DKG Confirmer * Add TODOs regarding dropped transactions, add possible TODO fix * Update tests/coordinator This doesn't add new multi-key-share tests, it solely updates the existing single key-share tests to compile and run, with the necessary fixes to the coordinator. * Update processor key_gen to handle generating multiple key shares at once * Update SubstrateSigner * Update signer, clippy * Update processor tests * Update processor docker tests
2023-11-04 19:26:13 -04:00
Err(Error::<T>::AllocationWouldPreventFaultTolerance)?;
}
*/
Abstractions::<T>::increase_allocation(network, account, amount, block_reward)
}
fn session_to_unlock_on_for_current_set(network: NetworkId) -> Option<Session> {
let mut to_unlock_on = Self::session(network)?;
// Move to the next session, as deallocating currently in-use stake is obviously invalid
to_unlock_on.0 += 1;
if network == NetworkId::Serai {
// Since the next Serai set will already have been decided, we can only deallocate one
// session later
to_unlock_on.0 += 1;
}
// Increase the session by one, creating a cooldown period
to_unlock_on.0 += 1;
Some(to_unlock_on)
}
/// Decreases a validator's allocation to a set.
///
/// Errors if the capacity provided by this allocation is in use.
///
/// Errors if a partial decrease of allocation which puts the remaining allocation below the
/// minimum requirement.
///
/// The capacity prior provided by the allocation is immediately removed, in order to ensure it
/// doesn't become used (preventing deallocation).
///
/// Returns if the amount is immediately eligible for deallocation.
fn decrease_allocation(
network: NetworkId,
account: T::AccountId,
amount: Amount,
) -> Result<bool, DispatchError> {
/* TODO
// Check it's safe to decrease this set's stake by this amount
if let NetworkId::External(n) = network {
let new_total_staked = Self::total_allocated_stake(NetworkId::from(n))
.unwrap()
.0
.checked_sub(amount.0)
.ok_or(Error::<T>::NotEnoughAllocated)?;
let required_stake = Self::required_stake_for_network(n);
if new_total_staked < required_stake {
Err(Error::<T>::DeallocationWouldRemoveEconomicSecurity)?;
}
}
let decreased_key_shares =
(old_allocation / allocation_per_key_share) > (new_allocation / allocation_per_key_share);
// If this decreases the validator's key shares, error if the new set is unable to handle
// byzantine faults
let mut was_bft = None;
if decreased_key_shares {
was_bft = Some(Self::is_bft(network));
}
if let Some(was_bft) = was_bft {
if was_bft && (!Self::is_bft(network)) {
Err(Error::<T>::DeallocationWouldRemoveFaultTolerance)?;
}
}
*/
Sessions::<T>::decrease_allocation(network, account, amount)
}
// Checks if this session has completed the handover from the prior session.
fn handover_completed(network: NetworkId, session: Session) -> bool {
let Some(current_session) = Self::session(network) else { return false };
// If the session we've been queried about is old, it must have completed its handover
if current_session.0 > session.0 {
return true;
}
// If the session we've been queried about has yet to start, it can't have completed its
// handover
if current_session.0 < session.0 {
return false;
}
let NetworkId::External(n) = network else {
// Handover is automatically complete for Serai as it doesn't have a handover protocol
return true;
};
// The current session must have set keys for its handover to be completed
if !Keys::<T>::contains_key(ExternalValidatorSet { network: n, session }) {
return false;
}
// This must be the first session (which has set keys) OR the prior session must have been
// retired (signified by its keys no longer being present)
(session.0 == 0) ||
(!Keys::<T>::contains_key(ExternalValidatorSet {
network: n,
session: Session(session.0 - 1),
}))
}
fn new_session() {
for network in serai_primitives::NETWORKS {
// If this network hasn't started sessions yet, don't start one now
let Some(current_session) = Self::session(network) else { continue };
// Only spawn a new set if:
// - This is Serai, as we need to rotate Serai upon a new session (per Babe)
// - The current set was actually established with a completed handover protocol
if (network == NetworkId::Serai) || Self::handover_completed(network, current_session) {
Pallet::<T>::new_set(network);
// let the Dex know session is rotated.
Dex::<T>::on_new_session(network);
}
}
}
// TODO: This is called retire_set, yet just starts retiring the set
// Update the nomenclature within this function
pub fn retire_set(set: ValidatorSet) {
// Serai doesn't set keys and network slashes are handled by BABE/GRANDPA
if let NetworkId::External(n) = set.network {
// If the prior prior set didn't report, emit they're retired now
if PendingSlashReport::<T>::get(n).is_some() {
Self::deposit_event(Event::SetRetired {
set: ValidatorSet { network: set.network, session: Session(set.session.0 - 1) },
});
}
// This overwrites the prior value as the prior to-report set's stake presumably just
// unlocked, making their report unenforceable
let keys =
Keys::<T>::take(ExternalValidatorSet { network: n, session: set.session }).unwrap();
PendingSlashReport::<T>::set(n, Some(keys.0));
} else {
// emit the event for serai network
Self::deposit_event(Event::SetRetired { set });
}
// We're retiring this set because the set after it accepted the handover
Self::deposit_event(Event::AcceptedHandover {
set: ValidatorSet { network: set.network, session: Session(set.session.0 + 1) },
});
}
/// Take the amount deallocatable.
///
/// `session` refers to the Session the stake becomes deallocatable on.
fn take_deallocatable_amount(
network: NetworkId,
session: Session,
key: Public,
) -> Option<Amount> {
// Check this Session has properly started, completing the handover from the prior session.
if !Self::handover_completed(network, session) {
return None;
}
PendingDeallocations::<T>::take((network, key), session)
}
pub(crate) fn rotate_session() {
// next serai validators that is in the queue.
let now_validators = Participants::<T>::get(NetworkId::Serai)
.expect("no Serai participants upon rotate_session");
let prior_serai_session = Self::session(NetworkId::Serai).unwrap();
// TODO: T::SessionHandler::on_before_session_ending() was here.
// end the current serai session.
Self::retire_set(ValidatorSet { network: NetworkId::Serai, session: prior_serai_session });
// make a new session and get the next validator set.
Self::new_session();
// Update Babe and Grandpa
let session = prior_serai_session.0 + 1;
let next_validators = Participants::<T>::get(NetworkId::Serai).unwrap();
Babe::<T>::enact_epoch_change(
WeakBoundedVec::force_from(
now_validators.iter().copied().map(|(id, w)| (BabeAuthorityId::from(id), w)).collect(),
None,
),
WeakBoundedVec::force_from(
2023-12-16 20:54:24 -05:00
next_validators.iter().copied().map(|(id, w)| (BabeAuthorityId::from(id), w)).collect(),
None,
),
Some(session),
);
Grandpa::<T>::new_session(
true,
session,
now_validators.into_iter().map(|(id, w)| (GrandpaAuthorityId::from(id), w)).collect(),
);
// Clear SeraiDisabledIndices, only preserving keys still present in the new session
// First drain so we don't mutate as we iterate
let mut disabled = vec![];
for (_, validator) in SeraiDisabledIndices::<T>::drain() {
disabled.push(validator);
}
for disabled in disabled {
Self::disable_serai_validator(disabled);
}
}
/// Returns the required stake in terms SRI for a given `Balance`.
pub fn required_stake(balance: &ExternalBalance) -> SubstrateAmount {
use dex_pallet::HigherPrecisionBalance;
// This is inclusive to an increase in accuracy
let sri_per_coin = Dex::<T>::security_oracle_value(balance.coin).unwrap_or(Amount(0));
// See dex-pallet for the reasoning on these
let coin_decimals = balance.coin.decimals().max(5);
let accuracy_increase = HigherPrecisionBalance::from(SubstrateAmount::pow(10, coin_decimals));
let total_coin_value = u64::try_from(
HigherPrecisionBalance::from(balance.amount.0) *
HigherPrecisionBalance::from(sri_per_coin.0) /
accuracy_increase,
)
.unwrap_or(u64::MAX);
// required stake formula (COIN_VALUE * 1.5) + margin(20%)
let required_stake = total_coin_value.saturating_mul(3).saturating_div(2);
required_stake.saturating_add(total_coin_value.saturating_div(5))
}
/// Returns the current total required stake for a given `network`.
pub fn required_stake_for_network(network: ExternalNetworkId) -> SubstrateAmount {
let mut total_required = 0;
for coin in network.coins() {
let supply = Coins::<T>::supply(Coin::from(coin));
total_required += Self::required_stake(&ExternalBalance { coin, amount: Amount(supply) });
}
total_required
}
pub fn distribute_block_rewards(
network: NetworkId,
account: T::AccountId,
amount: Amount,
) -> DispatchResult {
// TODO: Should this call be part of the `increase_allocation` since we have to have it
// before each call to it?
Coins::<T>::transfer_fn(
account,
Self::account(),
Balance { coin: Coin::Serai, amount },
)?;
Self::increase_allocation(network, account, amount, true)
}
fn can_slash_serai_validator(validator: Public) -> bool {
// Checks if they're active or actively deallocating (letting us still slash them)
// We could check if they're upcoming/still allocating, yet that'd mean the equivocation is
// invalid (as they aren't actively signing anything) or severely dated
// It's not an edge case worth being comprehensive to due to the complexity of being so
Babe::<T>::is_member(&BabeAuthorityId::from(validator)) ||
PendingDeallocations::<T>::iter_prefix((NetworkId::Serai, validator)).next().is_some()
}
fn slash_serai_validator(validator: Public) {
let network = NetworkId::Serai;
let mut allocation = Abstractions::<T>::get_allocation((network, validator))
.unwrap_or(Amount(0));
// reduce the current allocation to 0.
Abstractions::<T>::set_allocation(network, validator, Amount(0));
// Take the pending deallocation from the current session
allocation.0 += PendingDeallocations::<T>::take(
(network, validator),
Self::session_to_unlock_on_for_current_set(network).unwrap(),
)
.unwrap_or(Amount(0))
.0;
// Reduce the TotalAllocatedStake for the network, if in set
// TotalAllocatedStake is the sum of allocations and pending deallocations from the current
// session, since pending deallocations can still be slashed and therefore still contribute
// to economic security, hence the allocation calculations above being above and the ones
// below being below
if InSet::<T>::contains_key(NetworkId::Serai, validator) {
let current_staked = Self::total_allocated_stake(network).unwrap();
TotalAllocatedStake::<T>::set(network, Some(current_staked - allocation));
}
// Clear any other pending deallocations.
for (_, pending) in PendingDeallocations::<T>::drain_prefix((network, validator)) {
allocation.0 += pending.0;
}
// burn the allocation from the stake account
Coins::<T>::burn(
RawOrigin::Signed(Self::account()).into(),
Balance { coin: Coin::Serai, amount: allocation },
)
.unwrap();
}
/// Disable a Serai validator, preventing them from further authoring blocks.
///
/// Returns true if the validator-to-disable was actually a validator.
/// Returns false if they weren't.
fn disable_serai_validator(validator: Public) -> bool {
if let Some(index) =
Babe::<T>::authorities().into_iter().position(|(id, _)| id.into_inner() == validator)
{
SeraiDisabledIndices::<T>::set(u32::try_from(index).unwrap(), Some(validator));
let session = Self::session(NetworkId::Serai).unwrap();
Self::deposit_event(Event::ParticipantRemoved {
set: ValidatorSet { network: NetworkId::Serai, session },
removed: validator,
});
true
} else {
false
}
}
/// Returns the external network key for a given external network
pub fn external_network_key(network: ExternalNetworkId) -> Option<Vec<u8>> {
let current_session = Self::session(NetworkId::from(network))?;
let keys = Keys::<T>::get(ExternalValidatorSet { network, session: current_session })?;
Some(keys.1.into_inner())
}
*/
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/*
#[pallet::call_index(0)]
#[pallet::weight(0)] // TODO
pub fn set_keys(
origin: OriginFor<T>,
network: ExternalNetworkId,
key_pair: KeyPair,
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
signature: Signature,
) -> DispatchResult {
ensure_none(origin)?;
// signature isn't checked as this is an unsigned transaction, and validate_unsigned
// (called by pre_dispatch) checks it
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
let _ = signature_participants;
let _ = signature;
let session = Self::session(NetworkId::from(network)).unwrap();
let set = ExternalValidatorSet { network, session };
Keys::<T>::set(set, Some(key_pair.clone()));
// If this is the first ever set for this network, set TotalAllocatedStake now
// We generally set TotalAllocatedStake when the prior set retires, and the new set is fully
// active and liable. Since this is the first set, there is no prior set to wait to retire
if session == Session(0) {
Self::set_total_allocated_stake(NetworkId::from(network));
}
Self::deposit_event(Event::KeyGen { set, key_pair });
Ok(())
}
#[pallet::call_index(1)]
#[pallet::weight(0)] // TODO
pub fn report_slashes(
origin: OriginFor<T>,
network: ExternalNetworkId,
slashes: SlashReport,
signature: Signature,
) -> DispatchResult {
ensure_none(origin)?;
// signature isn't checked as this is an unsigned transaction, and validate_unsigned
// (called by pre_dispatch) checks it
let _ = signature;
// TODO: Handle slashes
let _ = slashes;
// Emit set retireed
Pallet::<T>::deposit_event(Event::SetRetired {
set: ValidatorSet {
network: network.into(),
session: Session(Self::session(NetworkId::from(network)).unwrap().0 - 1),
},
});
Ok(())
}
*/
#[pallet::call_index(2)]
#[pallet::weight(0)] // TODO
pub fn set_embedded_elliptic_curve_keys(
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
origin: OriginFor<T>,
keys: serai_primitives::crypto::SignedEmbeddedEllipticCurveKeys,
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
) -> DispatchResult {
let signer = ensure_signed(origin)?;
<Abstractions<T> as crate::EmbeddedEllipticCurveKeys>::set_embedded_elliptic_curve_keys(
signer, keys,
)
.map_err(|()| Error::<T>::InvalidEmbeddedEllipticCurveKeys)?;
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
Ok(())
}
#[pallet::call_index(3)]
#[pallet::weight(0)] // TODO
pub fn allocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {
let validator = ensure_signed(origin)?;
Coins::<T>::transfer_fn(validator, Self::account(), Balance { coin: Coin::Serai, amount })?;
Abstractions::<T>::increase_allocation(network, validator, amount, false)
.map_err(Error::<T>::AllocationError)?;
Ok(())
}
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
#[pallet::call_index(4)]
#[pallet::weight(0)] // TODO
pub fn deallocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {
let account = ensure_signed(origin)?;
let deallocation_timeline = Abstractions::<T>::decrease_allocation(network, account, amount)
.map_err(Error::<T>::DeallocationError)?;
if matches!(deallocation_timeline, DeallocationTimeline::Immediate) {
Coins::<T>::transfer_fn(Self::account(), account, Balance { coin: Coin::Serai, amount })?;
}
Ok(())
}
/*
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
#[pallet::call_index(5)]
#[pallet::weight((0, DispatchClass::Operational))] // TODO
pub fn claim_deallocation(
origin: OriginFor<T>,
network: NetworkId,
session: Session,
) -> DispatchResult {
let account = ensure_signed(origin)?;
let Some(amount) = Self::take_deallocatable_amount(network, session, account) else {
Err(Error::<T>::NonExistentDeallocation)?
};
Coins::<T>::transfer_fn(
Self::account(),
account,
Balance { coin: Coin::Serai, amount },
)?;
Self::deposit_event(Event::DeallocationClaimed { validator: account, network, session });
Ok(())
}
*/
}
/*
#[pallet::validate_unsigned]
impl<T: Config> ValidateUnsigned for Pallet<T> {
type Call = Call<T>;
fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity {
// Match to be exhaustive
match call {
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
Call::set_keys { network, ref key_pair, ref signature_participants, ref signature } => {
let network = *network;
// Confirm this set has a session
let Some(current_session) = Self::session(NetworkId::from(network)) else {
Err(InvalidTransaction::Custom(1))?
};
let set = ExternalValidatorSet { network, session: current_session };
// Confirm it has yet to set keys
if Keys::<T>::get(set).is_some() {
Err(InvalidTransaction::Stale)?;
}
// This is a needed precondition as this uses storage variables for the latest decided
// session on this assumption
assert_eq!(Pallet::<T>::latest_decided_session(network.into()), Some(current_session));
2025-01-30 03:14:24 -05:00
let participants = Participants::<T>::get(NetworkId::from(network))
.expect("session existed without participants");
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
// Check the bitvec is of the proper length
if participants.len() != signature_participants.len() {
Err(InvalidTransaction::Custom(2))?;
}
let mut all_key_shares = 0;
let mut signers = vec![];
let mut signing_key_shares = 0;
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
for (participant, in_use) in participants.into_iter().zip(signature_participants) {
let participant = participant.0;
let shares = InSet::<T>::get(NetworkId::from(network), participant)
.expect("participant from Participants wasn't InSet");
all_key_shares += shares;
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
if !in_use {
continue;
}
signers.push(participant);
signing_key_shares += shares;
}
{
let f = all_key_shares - signing_key_shares;
if signing_key_shares < ((2 * f) + 1) {
Err(InvalidTransaction::Custom(3))?;
}
}
// Verify the signature with the MuSig key of the signers
// We theoretically don't need set_keys_message to bind to removed_participants, as the
// key we're signing with effectively already does so, yet there's no reason not to
2025-01-30 03:14:24 -05:00
if !musig_key(set.into(), &signers).verify(&set_keys_message(&set, key_pair), signature) {
Err(InvalidTransaction::BadProof)?;
}
ValidTransaction::with_tag_prefix("ValidatorSets")
.and_provides((0, set))
.longevity(u64::MAX)
.propagate(true)
.build()
}
Call::report_slashes { network, ref slashes, ref signature } => {
let network = *network;
let Some(key) = PendingSlashReport::<T>::take(network) else {
// Assumed already published
Err(InvalidTransaction::Stale)?
};
// There must have been a previous session is PendingSlashReport is populated
2025-01-30 03:14:24 -05:00
let set = ExternalValidatorSet {
network,
session: Session(Self::session(NetworkId::from(network)).unwrap().0 - 1),
};
if !key.verify(&slashes.report_slashes_message(), signature) {
Err(InvalidTransaction::BadProof)?;
}
ValidTransaction::with_tag_prefix("ValidatorSets")
.and_provides((1, set))
.longevity(MAX_KEY_SHARES_PER_SET_U32.into())
.propagate(true)
.build()
}
One Round DKG (#589) * Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ * Initial eVRF implementation Not quite done yet. It needs to communicate the resulting points and proofs to extract them from the Pedersen Commitments in order to return those, and then be tested. * Add the openings of the PCs to the eVRF as necessary * Add implementation of secq256k1 * Make DKG Encryption a bit more flexible No longer requires the use of an EncryptionKeyMessage, and allows pre-defined keys for encryption. * Make NUM_BITS an argument for the field macro * Have the eVRF take a Zeroizing private key * Initial eVRF-based DKG * Add embedwards25519 curve * Inline the eVRF into the DKG library Due to how we're handling share encryption, we'd either need two circuits or to dedicate this circuit to the DKG. The latter makes sense at this time. * Add documentation to the eVRF-based DKG * Add paragraph claiming robustness * Update to the new eVRF proof * Finish routing the eVRF functionality Still needs errors and serialization, along with a few other TODOs. * Add initial eVRF DKG test * Improve eVRF DKG Updates how we calculcate verification shares, improves performance when extracting multiple sets of keys, and adds more to the test for it. * Start using a proper error for the eVRF DKG * Resolve various TODOs Supports recovering multiple key shares from the eVRF DKG. Inlines two loops to save 2**16 iterations. Adds support for creating a constant time representation of scalars < NUM_BITS. * Ban zero ECDH keys, document non-zero requirements * Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 * Add Ristretto eVRF trait impls * Support participating multiple times in the eVRF DKG * Only participate once per key, not once per key share * Rewrite processor key-gen around the eVRF DKG Still a WIP. * Finish routing the new key gen in the processor Doesn't touch the tests, coordinator, nor Substrate yet. `cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor` does pass. * Deduplicate and better document in processor key_gen * Update serai-processor tests to the new key gen * Correct amount of yx coefficients, get processor key gen test to pass * Add embedded elliptic curve keys to Substrate * Update processor key gen tests to the eVRF DKG * Have set_keys take signature_participants, not removed_participants Now no one is removed from the DKG. Only `t` people publish the key however. Uses a BitVec for an efficient encoding of the participants. * Update the coordinator binary for the new DKG This does not yet update any tests. * Add sensible Debug to key_gen::[Processor, Coordinator]Message * Have the DKG explicitly declare how to interpolate its shares Removes the hack for MuSig where we multiply keys by the inverse of their lagrange interpolation factor. * Replace Interpolation::None with Interpolation::Constant Allows the MuSig DKG to keep the secret share as the original private key, enabling deriving FROST nonces consistently regardless of the MuSig context. * Get coordinator tests to pass * Update spec to the new DKG * Get clippy to pass across the repo * cargo machete * Add an extra sleep to ensure expected ordering of `Participation`s * Update orchestration * Remove bad panic in coordinator It expected ConfirmationShare to be n-of-n, not t-of-n. * Improve documentation on functions * Update TX size limit We now no longer have to support the ridiculous case of having 49 DKG participations within a 101-of-150 DKG. It does remain quite high due to needing to _sign_ so many times. It'd may be optimal for parties with multiple key shares to independently send their preprocesses/shares (despite the overhead that'll cause with signatures and the transaction structure). * Correct error in the Processor spec document * Update a few comments in the validator-sets pallet * Send/Recv Participation one at a time Sending all, then attempting to receive all in an expected order, wasn't working even with notable delays between sending messages. This points to the mempool not working as expected... * Correct ThresholdKeys serialization in modular-frost test * Updating existing TX size limit test for the new DKG parameters * Increase time allowed for the DKG on the GH CI * Correct construction of signature_participants in serai-client tests Fault identified by akil. * Further contextualize DkgConfirmer by ValidatorSet Caught by a safety check we wouldn't reuse preprocesses across messages. That raises the question of we were prior reusing preprocesses (reusing keys)? Except that'd have caused a variety of signing failures (suggesting we had some staggered timing avoiding it in practice but yes, this was possible in theory). * Add necessary calls to set_embedded_elliptic_curve_key in coordinator set rotation tests * Correct shimmed setting of a secq256k1 key * cargo fmt * Don't use `[0; 32]` for the embedded keys in the coordinator rotation test The key_gen function expects the random values already decided. * Big-endian secq256k1 scalars Also restores the prior, safer, Encryption::register function.
2024-08-16 11:26:07 -07:00
Call::set_embedded_elliptic_curve_key { .. } |
Call::allocate { .. } |
Call::deallocate { .. } |
Call::claim_deallocation { .. } => Err(InvalidTransaction::Call)?,
Call::__Ignore(_, _) => unreachable!(),
}
}
// Explicitly provide a pre-dispatch which calls validate_unsigned
fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> {
2025-02-04 00:53:22 -05:00
Self::validate_unsigned(TransactionSource::InBlock, call).map(|_| ())
}
}
impl<T: Config> AllowMint for Pallet<T> {
fn is_allowed(balance: &ExternalBalance) -> bool {
// get the required stake
let current_required = Self::required_stake_for_network(balance.coin.network());
let new_required = current_required + Self::required_stake(balance);
// get the total stake for the network & compare.
let staked =
Self::total_allocated_stake(NetworkId::from(balance.coin.network())).unwrap_or(Amount(0));
staked.0 >= new_required
}
}
impl<T: Config, V: Into<Public> + From<Public>> KeyOwnerProofSystem<(KeyTypeId, V)> for
Pallet<T> {
type Proof = MembershipProof<T>;
type IdentificationTuple = Public;
fn prove(key: (KeyTypeId, V)) -> Option<Self::Proof> {
Some(MembershipProof(key.1.into(), PhantomData))
}
fn check_proof(key: (KeyTypeId, V), proof: Self::Proof) -> Option<Self::IdentificationTuple> {
let validator = key.1.into();
// check the offender and the proof offender are the same.
if validator != proof.0 {
return None;
}
// check validator is valid
if !Self::can_slash_serai_validator(validator) {
return None;
}
Some(validator)
}
}
impl<T: Config> ReportOffence<Public, Public, BabeEquivocationOffence<Public>> for Pallet<T> {
/// Report an `offence` and reward given `reporters`.
fn report_offence(
_: Vec<Public>,
offence: BabeEquivocationOffence<Public>,
) -> Result<(), OffenceError> {
// slash the offender
let offender = offence.offender;
Self::slash_serai_validator(offender);
// disable it
Self::disable_serai_validator(offender);
Ok(())
}
fn is_known_offence(
offenders: &[Public],
_: &<BabeEquivocationOffence<Public> as Offence<Public>>::TimeSlot,
) -> bool {
for offender in offenders {
// It's not a known offence if we can still slash them
if Self::can_slash_serai_validator(*offender) {
return false;
}
}
true
}
}
impl<T: Config> ReportOffence<Public, Public, GrandpaEquivocationOffence<Public>> for Pallet<T> {
/// Report an `offence` and reward given `reporters`.
fn report_offence(
_: Vec<Public>,
offence: GrandpaEquivocationOffence<Public>,
) -> Result<(), OffenceError> {
// slash the offender
let offender = offence.offender;
Self::slash_serai_validator(offender);
// disable it
Self::disable_serai_validator(offender);
Ok(())
}
fn is_known_offence(
offenders: &[Public],
_slot: &<GrandpaEquivocationOffence<Public> as Offence<Public>>::TimeSlot,
) -> bool {
for offender in offenders {
if Self::can_slash_serai_validator(*offender) {
return false;
}
}
true
}
}
impl<T: Config> FindAuthor<Public> for Pallet<T> {
fn find_author<'a, I>(digests: I) -> Option<Public>
where
I: 'a + IntoIterator<Item = (ConsensusEngineId, &'a [u8])>,
{
let i = Babe::<T>::find_author(digests)?;
Some(Babe::<T>::authorities()[i as usize].0.clone().into())
}
}
impl<T: Config> DisabledValidators for Pallet<T> {
fn is_disabled(index: u32) -> bool {
SeraiDisabledIndices::<T>::get(index).is_some()
}
}
*/
}
sp_api::decl_runtime_apis! {
#[api_version(1)]
pub trait ValidatorSetsApi {
/// Returns the validator set for a given network.
fn validators(
network_id: serai_primitives::network_id::NetworkId,
) -> Vec<serai_primitives::crypto::Public>;
/// Returns the external network key for a given external network.
fn external_network_key(
network: serai_primitives::network_id::ExternalNetworkId,
) -> Option<serai_primitives::crypto::ExternalKey>;
}
}
pub use pallet::*;