Introduce KeyShares struct to represent the amount of key shares

Improvements, bug fixes associated.
This commit is contained in:
Luke Parker
2025-09-16 01:33:31 -04:00
parent ddb8e1398e
commit 3722df7326
8 changed files with 269 additions and 78 deletions

View File

@@ -24,7 +24,10 @@ const HUMAN_READABLE_PART: bech32::Hrp = bech32::Hrp::parse_unchecked("sri");
/// The address for an account on Serai.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(feature = "non_canonical_scale_derivations", derive(scale::Encode, scale::Decode))]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen)
)]
pub struct SeraiAddress(pub [u8; 32]);
// These share encodings as 32-byte arrays

View File

@@ -6,8 +6,3 @@ pub const TARGET_BLOCK_TIME: Duration = Duration::from_secs(6);
/// The intended duration for a session.
// 1 week
pub const SESSION_LENGTH: Duration = Duration::from_secs(7 * 24 * 60 * 60);
/// The maximum amount of key shares per set.
pub const MAX_KEY_SHARES_PER_SET: u16 = 150;
/// The maximum amount of key shares per set, as an u32.
pub const MAX_KEY_SHARES_PER_SET_U32: u32 = MAX_KEY_SHARES_PER_SET as u32;

View File

@@ -1,16 +1,46 @@
use zeroize::Zeroize;
use borsh::{BorshSerialize, BorshDeserialize};
use crate::network_id::ExternalNetworkId;
use crate::{network_id::ExternalNetworkId, address::SeraiAddress};
/// The ID of an protocol.
pub type ProtocolId = [u8; 32];
/// A signal.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
allow(clippy::cast_possible_truncation),
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen)
)]
pub enum Signal {
/// A signal to retire the current protocol.
Retire {
/// The protocol to retire in favor of.
in_favor_of: [u8; 32],
in_favor_of: ProtocolId,
},
/// A signal to halt an external network.
Halt(ExternalNetworkId),
}
/// A retirement signal, registered on chain.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen)
)]
pub struct RegisteredRetirementSignal {
/// The protocol to retire in favor of.
pub in_favor_of: ProtocolId,
/// The registrant of this signal.
pub registrant: SeraiAddress,
/// The block number this was registered at.
pub registered_at: u64,
}
impl RegisteredRetirementSignal {
/// The ID of this signal.
pub fn id(&self) -> ProtocolId {
sp_core::blake2_256(&borsh::to_vec(self).unwrap())
}
}

View File

@@ -7,9 +7,9 @@ use ciphersuite::{group::GroupEncoding, GroupIo};
use dalek_ff_group::Ristretto;
use crate::{
constants::MAX_KEY_SHARES_PER_SET,
crypto::{Public, KeyPair},
network_id::{ExternalNetworkId, NetworkId},
balance::Amount,
};
mod slashes;
@@ -103,19 +103,84 @@ impl ExternalValidatorSet {
}
}
/// For a set of validators whose key shares may exceed the maximum, reduce until they are less
/// than or equal to the maximum.
///
/// This runs in time linear to the exceed key shares and assumes the excess fits within a usize,
/// panicking otherwise.
///
/// Reduction occurs by reducing each validator in a reverse round-robin. This means the worst
/// validators lose their key shares first.
pub fn amortize_excess_key_shares(validators: &mut [(sp_core::sr25519::Public, u64)]) {
let total_key_shares = validators.iter().map(|(_key, shares)| shares).sum::<u64>();
for i in 0 .. usize::try_from(total_key_shares.saturating_sub(u64::from(MAX_KEY_SHARES_PER_SET)))
.unwrap()
{
validators[validators.len() - ((i % validators.len()) + 1)].1 -= 1;
/// The representation for an amount of key shares.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen)
)]
pub struct KeyShares(pub u16);
impl KeyShares {
/// One key share.
pub const ONE: KeyShares = KeyShares(1);
/// The maximum amount of key shares per set.
pub const MAX_PER_SET: u16 = 150;
/// The maximum amount of key shares per set, represented as a `u32`.
pub const MAX_PER_SET_U32: u32 = 150;
/// Create key shares from a `u16`.
///
/// This will saturate the value if the `u16` exceeds the maximum amount of key shares.
pub fn saturating_from(key_shares: u16) -> KeyShares {
KeyShares(key_shares.min(Self::MAX_PER_SET))
}
/// Create key shares from an allocation.
///
/// Presumably panics if `allocation_per_key_share` is zero.
pub fn from_allocation(allocation: Amount, allocation_per_key_share: Amount) -> Self {
Self::saturating_from(
u16::try_from(allocation.0 / allocation_per_key_share.0).unwrap_or(u16::MAX),
)
}
/// For a set of validators whose key shares may exceed the maximum, reduce until they are less
/// than or equal to the maximum.
///
/// Returns the new amount of validators with a non-zero amount of key shares.
///
/// This runs in time linear to the exceeded key shares and may panic if:
/// - The total amount of key shares exceeds `u16::MAX`.
/// - The list of validators is absurdly long
/// - The list of validators includes validators without key shares
///
/// Reduction occurs by reducing each validator in a reverse round-robin. This means the
/// validators with the least key shares are evicted first.
#[must_use]
pub fn amortize_excess(validators: &mut [(sp_core::sr25519::Public, KeyShares)]) -> usize {
let total_key_shares = validators.iter().map(|(_key, shares)| shares.0).sum::<u16>();
let mut actual_len = validators.len();
let mut offset = 1;
for _ in 0 .. usize::from(total_key_shares.saturating_sub(Self::MAX_PER_SET)) {
// If the offset exceeds the new length, reset it
if offset > actual_len {
offset = 1;
}
// Take one key share from this validator
let index = actual_len - offset;
validators[index].1 .0 -= 1;
// If they now have zero key shares, shrink the length and continue
if validators[index].1 .0 == 0 {
actual_len -= 1;
continue;
}
// Increment the offset to take from the next validator on the next iteration
offset += 1;
}
actual_len
}
}
impl TryFrom<u16> for KeyShares {
type Error = ();
fn try_from(value: u16) -> Result<Self, ()> {
if value > Self::MAX_PER_SET {
Err(())
} else {
Ok(Self(value))
}
}
}

View File

@@ -8,8 +8,9 @@ use borsh::{BorshSerialize, BorshDeserialize};
use sp_core::{ConstU32, bounded::BoundedVec};
use crate::{
constants::{TARGET_BLOCK_TIME, SESSION_LENGTH, MAX_KEY_SHARES_PER_SET_U32},
constants::{TARGET_BLOCK_TIME, SESSION_LENGTH},
balance::Amount,
validator_sets::KeyShares,
};
/// Each slash point is equivalent to the downtime implied by missing a block proposal.
@@ -212,7 +213,7 @@ pub struct SlashReport(
serialize_with = "crate::borsh_serialize_bounded_vec",
deserialize_with = "crate::borsh_deserialize_bounded_vec"
)]
pub BoundedVec<Slash, ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 }>>,
pub BoundedVec<Slash, ConstU32<{ KeyShares::MAX_PER_SET_U32 }>>,
);
/// An error when converting from a `Vec`.
@@ -251,7 +252,7 @@ impl SlashReport {
#[test]
fn test_penalty() {
for validators in [1, 50, 100, crate::constants::MAX_KEY_SHARES_PER_SET] {
for validators in [1, 50, 100, KeyShares::MAX_PER_SET_U32] {
let validators = NonZero::new(validators).unwrap();
// 12 hours of slash points should only decrease the rewards proportionately
let twelve_hours_of_slash_points =