2 Commits

Author SHA1 Message Date
Luke Parker
02afed13b4 Add a Sessions abstraction for validator-sets storage 2025-03-07 04:02:11 -05:00
Luke Parker
3fc00830de Add a dedicated Allocations struct for managing validator set allocations
Part of the DB abstraction necessary for this spaghetti.
2025-03-06 09:14:20 -05:00
7 changed files with 690 additions and 52 deletions

16
Cargo.lock generated
View File

@@ -9169,31 +9169,17 @@ name = "serai-validator-sets-pallet"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"bitvec", "bitvec",
"ciphersuite", "borsh",
"frame-support", "frame-support",
"frame-system", "frame-system",
"frost-schnorrkel",
"modular-frost",
"pallet-babe",
"pallet-grandpa",
"pallet-timestamp",
"parity-scale-codec", "parity-scale-codec",
"rand_core", "rand_core",
"scale-info", "scale-info",
"serai-coins-pallet", "serai-coins-pallet",
"serai-dex-pallet",
"serai-primitives", "serai-primitives",
"serde",
"sp-api",
"sp-application-crypto",
"sp-consensus-babe",
"sp-core", "sp-core",
"sp-io", "sp-io",
"sp-runtime", "sp-runtime",
"sp-session",
"sp-staking",
"sp-std",
"zeroize",
] ]
[[package]] [[package]]

View File

@@ -10,6 +10,14 @@ use serai_primitives::{
validator_sets::*, validator_sets::*,
}; };
/// Key(s) on embedded elliptic curve(s).
///
/// This may be a single key if the external network uses the same embedded elliptic curve as
/// used for the key to oraclize onto Serai. Else, it'll be a key on the embedded elliptic curve
/// used for the key to oraclize onto Serai concatenated with the key on the embedded elliptic
/// curve used for the external network.
pub type EmbeddedEllipticCurveKeys = BoundedVec<u8, ConstU32<{ 2 * ExternalKey::MAX_LEN }>>;
/// A call to the validator sets. /// A call to the validator sets.
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub enum Call { pub enum Call {
@@ -43,14 +51,11 @@ pub enum Call {
/// The network the origin is setting their embedded elliptic curve keys for. /// The network the origin is setting their embedded elliptic curve keys for.
network: ExternalNetworkId, network: ExternalNetworkId,
/// The keys on the embedded elliptic curves. /// The keys on the embedded elliptic curves.
///
/// This may be a single key if the external network uses the same embedded elliptic curve as
/// used for the key to oraclize onto Serai.
#[borsh( #[borsh(
serialize_with = "serai_primitives::sp_borsh::borsh_serialize_bounded_vec", serialize_with = "serai_primitives::sp_borsh::borsh_serialize_bounded_vec",
deserialize_with = "serai_primitives::sp_borsh::borsh_deserialize_bounded_vec" deserialize_with = "serai_primitives::sp_borsh::borsh_deserialize_bounded_vec"
)] )]
keys: BoundedVec<u8, ConstU32<{ 2 * ExternalKey::MAX_LEN }>>, keys: EmbeddedEllipticCurveKeys,
}, },
/// Allocate stake to a network. /// Allocate stake to a network.
allocate { allocate {

View File

@@ -18,6 +18,10 @@ pub enum EmbeddedEllipticCurve {
/// This type serializes to a subset of `NetworkId`. /// This type serializes to a subset of `NetworkId`.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[borsh(use_discriminant = true)] #[borsh(use_discriminant = true)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen)
)]
#[non_exhaustive] #[non_exhaustive]
pub enum ExternalNetworkId { pub enum ExternalNetworkId {
/// The Bitcoin network. /// The Bitcoin network.
@@ -63,6 +67,10 @@ impl ExternalNetworkId {
/// The type used to identify networks. /// The type used to identify networks.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen)
)]
pub enum NetworkId { pub enum NetworkId {
/// The Serai network. /// The Serai network.
Serai, Serai,

View File

@@ -6,6 +6,7 @@ use borsh::{BorshSerialize, BorshDeserialize};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use crate::{ use crate::{
constants::MAX_KEY_SHARES_PER_SET,
crypto::{Public, KeyPair}, crypto::{Public, KeyPair},
network_id::{ExternalNetworkId, NetworkId}, network_id::{ExternalNetworkId, NetworkId},
}; };
@@ -15,10 +16,18 @@ pub use slashes::*;
/// The type used to identify a specific session of validators. /// The type used to identify a specific session of validators.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen)
)]
pub struct Session(pub u32); pub struct Session(pub u32);
/// The type used to identify a specific set of validators for an external network. /// The type used to identify a specific set of validators for an external network.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen)
)]
pub struct ExternalValidatorSet { pub struct ExternalValidatorSet {
/// The network this set of validators are for. /// The network this set of validators are for.
pub network: ExternalNetworkId, pub network: ExternalNetworkId,
@@ -28,6 +37,10 @@ pub struct ExternalValidatorSet {
/// The type used to identify a specific set of validators. /// The type used to identify a specific set of validators.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen)
)]
pub struct ValidatorSet { pub struct ValidatorSet {
/// The network this set of validators are for. /// The network this set of validators are for.
pub network: NetworkId, pub network: NetworkId,
@@ -74,3 +87,20 @@ impl ExternalValidatorSet {
borsh::to_vec(&(b"ValidatorSets-set_keys", self, key_pair)).unwrap() borsh::to_vec(&(b"ValidatorSets-set_keys", self, key_pair)).unwrap()
} }
} }
/// For a set of validators whose key shares may exceed the maximum, reduce until they are less
/// than or equal to the maximum.
///
/// This runs in time linear to the exceed key shares and assumes the excess fits within a usize,
/// panicking otherwise.
///
/// Reduction occurs by reducing each validator in a reverse round-robin. This means the worst
/// validators lose their key shares first.
pub fn amortize_excess_key_shares(validators: &mut [(sp_core::sr25519::Public, u64)]) {
let total_key_shares = validators.iter().map(|(_key, shares)| shares).sum::<u64>();
for i in 0 .. usize::try_from(total_key_shares.saturating_sub(u64::from(MAX_KEY_SHARES_PER_SET)))
.unwrap()
{
validators[validators.len() - ((i % validators.len()) + 1)].1 -= 1;
}
}

View File

@@ -21,39 +21,20 @@ bitvec = { version = "1", default-features = false, features = ["alloc", "serde"
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive", "bit-vec"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive", "bit-vec"] }
scale-info = { version = "2", default-features = false, features = ["derive", "bit-vec"] } scale-info = { version = "2", default-features = false, features = ["derive", "bit-vec"] }
serde = { version = "1", default-features = false, features = ["derive", "alloc"] }
sp-core = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false } sp-core = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
sp-io = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false } sp-io = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
sp-std = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
sp-api = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
sp-application-crypto = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
sp-runtime = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false } sp-runtime = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
sp-session = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
sp-staking = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
frame-system = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false } frame-system = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
frame-support = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false } frame-support = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
pallet-babe = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false } serai-primitives = { path = "../primitives", default-features = false, features = ["non_canonical_scale_derivations"] }
pallet-grandpa = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
serai-primitives = { path = "../primitives", default-features = false }
coins-pallet = { package = "serai-coins-pallet", path = "../coins", default-features = false } coins-pallet = { package = "serai-coins-pallet", path = "../coins", default-features = false }
dex-pallet = { package = "serai-dex-pallet", path = "../dex", default-features = false }
[dev-dependencies] [dev-dependencies]
pallet-timestamp = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
sp-consensus-babe = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
ciphersuite = { path = "../../crypto/ciphersuite", features = ["ristretto"] }
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["tests"] }
schnorrkel = { path = "../../crypto/schnorrkel", package = "frost-schnorrkel" }
zeroize = "^1.5"
rand_core = "0.6" rand_core = "0.6"
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"] }
[features] [features]
std = [ std = [
@@ -64,26 +45,14 @@ std = [
"sp-core/std", "sp-core/std",
"sp-io/std", "sp-io/std",
"sp-std/std",
"sp-api/std",
"sp-application-crypto/std",
"sp-runtime/std", "sp-runtime/std",
"sp-session/std",
"sp-staking/std",
"sp-consensus-babe/std",
"frame-system/std", "frame-system/std",
"frame-support/std", "frame-support/std",
"pallet-babe/std",
"pallet-grandpa/std",
"pallet-timestamp/std",
"serai-primitives/std", "serai-primitives/std",
"coins-pallet/std", "coins-pallet/std",
"dex-pallet/std",
] ]
try-runtime = [ try-runtime = [

View File

@@ -0,0 +1,327 @@
use sp_core::{Encode, sr25519::Public};
use serai_primitives::{constants::MAX_KEY_SHARES_PER_SET, network_id::NetworkId, balance::Amount};
use frame_support::storage::{StorageMap, StoragePrefixedMap};
/// The key to use for the allocations map.
type AllocationsKey = (NetworkId, Public);
/// The key to use for the sorted allocations map.
type SortedAllocationsKey = (NetworkId, [u8; 8], [u8; 16], Public);
/// The storage underlying `Allocations`.
///
/// This storage is expected to be owned by the `Allocations` interface and not directly read/write
/// to.
pub(crate) trait AllocationsStorage {
/// An opaque map storing allocations.
type Allocations: StorageMap<AllocationsKey, Amount, Query = Option<Amount>>;
/// An opaque map storing allocations in a sorted manner.
///
/// This MUST be instantiated with a map using `Identity` for its hasher.
/*
This is premised on the underlying trie iterating from keys with low-bytes to keys with
high-bytes.
We use Identity so we don't have a hasher add pseudorandom bytes to the start of the keys. This
does remove the protection using a hash algorithm here offers against spam attacks (by flooding
the DB with layers, increasing lookup time and Merkle proof sizes, not that we use Merkle
proofs as Polkadot does).
Since amounts are represented with just 8 bytes, only 16 nibbles are present. This caps the
potential depth caused by spam at 16 layers (as the underlying DB operates on nibbles). While
there is an entire 32-byte public key after this, a Blake hash of the key is inserted after the
amount to prevent the key from also being used to cause layer spam. We use a `[u8; 16]` to
represent this, and not a explicit `Blake2_128Concat` hasher, to ensure all prior keys are part
part of the hash. A Substrate-hasher would only hash the immediately following key.
There's also a minimum stake requirement, which further reduces the potential for spam.
*/
type SortedAllocations: StorageMap<SortedAllocationsKey, (), Query = Option<()>>
+ StoragePrefixedMap<()>;
}
/// An interface for managing validators' allocations.
pub(crate) trait Allocations {
/// Set an allocation.
///
/// Returns the validator's prior allocation.
fn set_allocation(network: NetworkId, key: Public, amount: Amount) -> Option<Amount>;
/// Get an allocation.
fn get_allocation(network: NetworkId, key: Public) -> Option<Amount>;
/// Iterate over allocations for a network, yielding the highest-valued allocations.
///
/// This will yield all validators present whose allocation is greater than or equal to the
/// specified minimum.
///
/// If two validators share an allocation, the order is deterministic yet otherwise undefined.
fn iter_allocations(
network: NetworkId,
minimum_allocation: Amount,
) -> impl Iterator<Item = (Public, Amount)>;
/// Calculate the expected key shares for a network, per the current allocations.
fn expected_key_shares(network: NetworkId, allocation_per_key_share: Amount) -> u64;
}
/// Reverses the lexicographic order of a given byte array.
///
/// This is a bijective mapping. Calling reverse twice is equivalent to the identity function.
fn reverse_lexicographic_order<const N: usize>(bytes: [u8; N]) -> [u8; N] {
let mut res = [0u8; N];
for (i, byte) in bytes.iter().enumerate() {
res[i] = !*byte;
}
res
}
/// The storage key to use with the sorted allocations map.
#[inline]
fn sorted_allocation_storage_key(
network: NetworkId,
key: Public,
amount: Amount,
) -> (NetworkId, [u8; 8], [u8; 16], Public) {
// We want the accounts with the highest allocations to be first. Since the DB iterates from
// low to high, we take the BE bytes of the amount (meaning the lowest-value allocations have
// the lowest lexicographic order and will be first), then reverse their order.
let amount = reverse_lexicographic_order(amount.0.to_be_bytes());
// Hash all of the keys to best defend against layer-spam attacks
let hash = sp_io::hashing::blake2_128(&(network, amount, key).encode());
(network, amount, hash, key)
}
// Recover the user's public key from a storage key.
fn recover_key_from_sorted_allocation_storage_key(key: &[u8]) -> Public {
<Public as From<[u8; 32]>>::from(key[(key.len() - 32) ..].try_into().unwrap())
}
// Recover the amount allocated from a storage key.
fn recover_amount_from_sorted_allocation_storage_key(key: &[u8]) -> Amount {
// We read the amount from the end of the key as everything after the amount is fixed-length
let distance_from_end = 8 + 16 + 32;
let start_pos = key.len() - distance_from_end;
let raw: [u8; 8] = key[start_pos .. (start_pos + 8)].try_into().unwrap();
// Take advantage of how this is a bijective mapping
let raw = reverse_lexicographic_order(raw);
Amount(u64::from_be_bytes(raw))
}
impl<Storage: AllocationsStorage> Allocations for Storage {
fn set_allocation(network: NetworkId, key: Public, amount: Amount) -> Option<Amount> {
// Remove their existing allocation, if one exists
let prior = Storage::Allocations::take((network, key));
if let Some(amount) = prior {
Storage::SortedAllocations::remove(sorted_allocation_storage_key(network, key, amount));
}
// If we're setting a non-zero allocation, add it back to the maps
if amount.0 != 0 {
Storage::Allocations::set((network, key), Some(amount));
Storage::SortedAllocations::set(
sorted_allocation_storage_key(network, key, amount),
Some(()),
);
}
prior
}
fn get_allocation(network: NetworkId, key: Public) -> Option<Amount> {
Storage::Allocations::get((network, key))
}
fn iter_allocations(
network: NetworkId,
minimum_allocation: Amount,
) -> impl Iterator<Item = (Public, Amount)> {
// Iterate over the sorted allocations for this network
let mut prefix = Storage::SortedAllocations::final_prefix().to_vec();
prefix.extend(&network.encode());
// Decode the read keys into (key, amount) tuples
frame_support::storage::PrefixIterator::<_, ()>::new(prefix.clone(), prefix, |key, _value| {
Ok((
recover_key_from_sorted_allocation_storage_key(key),
recover_amount_from_sorted_allocation_storage_key(key),
))
})
// Filter by the specified minimum allocation
.filter(move |(_key, allocation)| *allocation >= minimum_allocation)
}
fn expected_key_shares(network: NetworkId, allocation_per_key_share: Amount) -> u64 {
let mut validators_len = 0;
let mut total_key_shares = 0;
for (_, amount) in Self::iter_allocations(network, allocation_per_key_share) {
validators_len += 1;
let key_shares = amount.0 / allocation_per_key_share.0;
total_key_shares += key_shares;
if total_key_shares >= u64::from(MAX_KEY_SHARES_PER_SET) {
break;
}
}
total_key_shares
}
}
#[test]
fn test_reverse_lexicographic_order() {
use rand_core::{RngCore, OsRng};
use sp_io::TestExternalities;
use frame_support::{pallet_prelude::*, Identity, traits::StorageInstance};
TestExternalities::default().execute_with(|| {
struct Storage;
impl StorageInstance for Storage {
fn pallet_prefix() -> &'static str {
"LexicographicOrder"
}
const STORAGE_PREFIX: &'static str = "storage";
}
type Map = StorageMap<Storage, Identity, [u8; 8], (), OptionQuery>;
struct StorageReverse;
impl StorageInstance for StorageReverse {
fn pallet_prefix() -> &'static str {
"LexicographicOrder"
}
const STORAGE_PREFIX: &'static str = "storagereverse";
}
type MapReverse = StorageMap<StorageReverse, Identity, [u8; 8], (), OptionQuery>;
// populate the maps
let mut amounts = vec![];
for _ in 0 .. 100 {
amounts.push(OsRng.next_u64());
}
let mut amounts_sorted = amounts.clone();
amounts_sorted.sort();
for a in amounts {
Map::set(a.to_be_bytes(), Some(()));
MapReverse::set(reverse_lexicographic_order(a.to_be_bytes()), Some(()));
}
// retrive back and check whether they are sorted as expected
let total_size = amounts_sorted.len();
let mut map_iter = Map::iter_keys();
let mut reverse_map_iter = MapReverse::iter_keys();
for i in 0 .. amounts_sorted.len() {
let first = map_iter.next().unwrap();
let second = reverse_map_iter.next().unwrap();
// The next value in the in-order map should be the next value in the sorted amounts
assert_eq!(u64::from_be_bytes(first), amounts_sorted[i]);
// And then if we again apply the bijective mapping, the next value in the reversed map
// should be the next value from the end in the sorted amounts
assert_eq!(
u64::from_be_bytes(reverse_lexicographic_order(second)),
amounts_sorted[total_size - (i + 1)]
);
}
});
}
#[test]
fn test_allocations() {
use rand_core::{RngCore, OsRng};
use borsh::BorshDeserialize;
use sp_io::TestExternalities;
use frame_support::{pallet_prelude::*, Identity, traits::StorageInstance};
TestExternalities::default().execute_with(|| {
struct Storage;
impl StorageInstance for Storage {
fn pallet_prefix() -> &'static str {
"Allocations"
}
const STORAGE_PREFIX: &'static str = "Storage::Allocations";
}
type AllocationsMap =
StorageMap<Storage, Blake2_128Concat, AllocationsKey, Amount, OptionQuery>;
struct StorageSorted;
impl StorageInstance for StorageSorted {
fn pallet_prefix() -> &'static str {
"Allocations"
}
const STORAGE_PREFIX: &'static str = "Storage::SortedAllocations";
}
type SortedAllocationsMap =
StorageMap<StorageSorted, Identity, SortedAllocationsKey, (), OptionQuery>;
struct Allocations;
impl AllocationsStorage for Allocations {
type Allocations = AllocationsMap;
type SortedAllocations = SortedAllocationsMap;
}
let before = NetworkId::deserialize_reader(&mut [0].as_slice()).unwrap();
let network = NetworkId::deserialize_reader(&mut [1].as_slice()).unwrap();
let after = NetworkId::deserialize_reader(&mut [2].as_slice()).unwrap();
// Create allocations
let rand_allocation = || {
let mut key = [0; 32];
OsRng.fill_bytes(&mut key);
let key = Public::from(key);
let amount = Amount(OsRng.next_u64());
(key, amount)
};
const ALLOCATIONS: usize = 100;
let mut allocations = vec![];
for _ in 0 .. ALLOCATIONS {
let (key, amount) = rand_allocation();
allocations.push((key, amount));
assert_eq!(Allocations::set_allocation(network, key, amount), None);
}
// Sort them from highest amount to lowest
allocations.sort_by_key(|item| item.1);
allocations.reverse();
// Set allocations for the previous and next network, by byte, to ensure the map isn't solely
// these allocations. This ensures we don't read from another network accidentally
{
let (key, amount) = rand_allocation();
assert_eq!(Allocations::set_allocation(before, key, amount), None);
assert_eq!(Allocations::set_allocation(after, key, amount), None);
}
// Check the iterator works
{
let mut a = Allocations::iter_allocations(network, Amount(0));
let mut b = allocations.clone().into_iter();
for _ in 0 .. ALLOCATIONS {
assert_eq!(a.next(), b.next());
}
assert!(a.next().is_none());
assert!(b.next().is_none());
}
// Check the minimum works
{
assert_eq!(
Allocations::iter_allocations(network, allocations[0].1).next(),
Some(allocations[0])
);
assert_eq!(
Allocations::iter_allocations(
network,
// Fails with probability ~1/2**57
(allocations[0].1 + Amount(1)).unwrap()
)
.next(),
None,
);
}
});
}

View File

@@ -0,0 +1,313 @@
use sp_core::{Encode, Decode, ConstU32, sr25519::Public, bounded::BoundedVec};
use serai_primitives::{
constants::{MAX_KEY_SHARES_PER_SET, MAX_KEY_SHARES_PER_SET_U32},
network_id::NetworkId,
balance::Amount,
validator_sets::{Session, ValidatorSet, amortize_excess_key_shares},
};
use frame_support::storage::{StorageValue, StorageMap, StoragePrefixedMap};
use crate::allocations::*;
/// The list of genesis validators.
type GenesisValidators = BoundedVec<Public, ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 }>>;
/// The key for the SelectedValidators map.
type SelectedValidatorsKey = (ValidatorSet, [u8; 16], Public);
pub(crate) trait SessionsStorage: AllocationsStorage {
/// The genesis validators
///
/// The usage of is shared with the rest of the pallet. `Sessions` only reads it.
type GenesisValidators: StorageValue<GenesisValidators, Query = GenesisValidators>;
/// The allocation required for a key share.
///
/// The usage of is shared with the rest of the pallet. `Sessions` only reads it.
type AllocationPerKeyShare: StorageMap<NetworkId, Amount, Query = Option<Amount>>;
/// The current session.
///
/// This is opaque and to be exclusively read/write by `Sessions`.
type CurrentSession: StorageMap<NetworkId, Session, Query = Option<Session>>;
/// The latest session which has been decided.
///
/// This is opaque and to be exclusively read/write by `Sessions`.
type LatestDecidedSession: StorageMap<NetworkId, Session, Query = Option<Session>>;
/// The selected validators for a set.
///
/// This MUST be instantiated with a map using `Identity` for its hasher.
///
/// This is opaque and to be exclusively read/write by `Sessions`.
// The value is how many key shares the validator has.
type SelectedValidators: StorageMap<SelectedValidatorsKey, u64> + StoragePrefixedMap<()>;
/// The total allocated stake for a network.
///
/// This is opaque and to be exclusively read/write by `Sessions`.
type TotalAllocatedStake: StorageMap<NetworkId, Amount, Query = Option<Amount>>;
}
/// The storage key for the SelectedValidators map.
fn selected_validators_key(set: ValidatorSet, key: Public) -> SelectedValidatorsKey {
let hash = sp_io::hashing::blake2_128(&(set, key).encode());
(set, hash, key)
}
fn selected_validators<Storage: StorageMap<SelectedValidatorsKey, u64> + StoragePrefixedMap<()>>(
set: ValidatorSet,
) -> impl Iterator<Item = (Public, u64)> {
let mut prefix = Storage::final_prefix().to_vec();
prefix.extend(&set.encode());
frame_support::storage::PrefixIterator::<_, ()>::new(
prefix.clone(),
prefix,
|key, mut key_shares| {
Ok((
// Recover the validator's key from the storage key
<[u8; 32]>::try_from(&key[(key.len() - 32) ..]).unwrap().into(),
// Decode the key shares from the value
u64::decode(&mut key_shares).unwrap(),
))
},
)
}
fn clear_selected_validators<
Storage: StorageMap<SelectedValidatorsKey, u64> + StoragePrefixedMap<()>,
>(
set: ValidatorSet,
) {
let mut prefix = Storage::final_prefix().to_vec();
prefix.extend(&set.encode());
assert!(matches!(
sp_io::storage::clear_prefix(&prefix, None),
sp_io::KillStorageResult::AllRemoved(_)
));
}
pub(crate) enum AllocationError {
NoAllocationPerKeyShareSet,
AllocationLessThanKeyShare,
IntroducesSinglePointOfFailure,
}
pub(crate) trait Sessions {
/// Attempt to spawn a new session for the specified network.
///
/// Validators will be selected by their allocations if `AllocationPerKeyShare` is set for this
/// network. `include_genesis_validators` will cause genesis validators to be included *with
/// greater priority than non-genesis validators*.
///
/// Doesn't spawn the next session if the latest decided session has yet to start. This bounds
/// the current session to be the latest decided session or the one prior.
fn attempt_new_session(network: NetworkId, include_genesis_validators: bool);
/// Have the latest-decided session accept the handover from the current set, if one exists.
///
/// Every decided set must accept the handover to become current.
///
/// May panic if the latest-decided session is already the current session, or if there was no
/// latest-decided session.
fn accept_handover(network: NetworkId);
/// Retire a validator set.
///
/// This MUST be called only for sessions which are no longer current.
fn retire(set: ValidatorSet);
/// Increase a validator's allocation.
///
/// This does not perform any transfers of any coins/tokens. It solely performs the book-keeping
/// of it.
fn increase_allocation(
network: NetworkId,
validator: Public,
amount: Amount,
) -> Result<(), AllocationError>;
}
impl<Storage: SessionsStorage> Sessions for Storage {
fn attempt_new_session(network: NetworkId, include_genesis_validators: bool) {
// If we haven't rotated to the latest decided session, return
// This prevents us from deciding session #n+2 when we haven't even started #n+1
let current_session = Storage::CurrentSession::get(network);
match (current_session, Storage::LatestDecidedSession::get(network)) {
(Some(current), Some(latest)) => {
if current == latest {
// If the latest decided session is current, we can decide the next session
} else {
// If we already have a pending session, don't spawn a new one
return;
}
}
(Some(current), None) => unreachable!("current session but never decided a session"),
// If we decided our first session, but didn't start it, don't decide another session
(None, Some(latest)) => return,
(None, None) => {
// If we've never started a session, we can decide the first session
}
}
let mut selected_validators = Vec::with_capacity(usize::from(MAX_KEY_SHARES_PER_SET / 2));
let mut total_key_shares = 0;
if let Some(allocation_per_key_share) = Storage::AllocationPerKeyShare::get(network) {
for (validator, amount) in Self::iter_allocations(network, allocation_per_key_share) {
// If this allocation is absurd, causing this to not fit within a u16, bound to the max
let key_shares = amount.0 / allocation_per_key_share.0;
selected_validators.push((validator, key_shares));
// We're tracking key shares as a u64 yet the max allowed is a u16, so this won't overflow
total_key_shares += key_shares;
if total_key_shares >= u64::from(MAX_KEY_SHARES_PER_SET) {
break;
}
}
}
// Perform amortization if we've exceeded the maximum amount of key shares
// This is guaranteed not to cause any validators have zero key shares as we'd only be over if
// the last-added (worst) validator had multiple key shares, meaning everyone has more shares
// than we'll amortize here
amortize_excess_key_shares(selected_validators.as_mut_slice());
if include_genesis_validators {
let mut genesis_validators = Storage::GenesisValidators::get()
.into_iter()
.map(|validator| (validator, 1))
.collect::<Vec<_>>();
let genesis_validator_key_shares = u64::try_from(genesis_validators.len()).unwrap();
while (total_key_shares + genesis_validator_key_shares) > u64::from(MAX_KEY_SHARES_PER_SET) {
let (_key, key_shares) = selected_validators.pop().unwrap();
total_key_shares -= key_shares;
}
selected_validators.append(&mut genesis_validators);
total_key_shares += genesis_validator_key_shares;
}
// We kept this accurate but don't actually further read from it
let _ = total_key_shares;
let latest_decided_session = Storage::LatestDecidedSession::mutate(network, |session| {
let next_session = session.map(|session| Session(session.0 + 1)).unwrap_or(Session(0));
*session = Some(next_session);
next_session
});
let latest_decided_set = ValidatorSet { network, session: latest_decided_session };
for (key, key_shares) in selected_validators {
Storage::SelectedValidators::insert(
selected_validators_key(latest_decided_set, key),
key_shares,
);
}
}
fn accept_handover(network: NetworkId) {
let current = {
let current = Storage::CurrentSession::get(network);
let latest_decided = Storage::LatestDecidedSession::get(network)
.expect("accepting handover but never decided a session");
assert_eq!(
current,
latest_decided.0.checked_sub(1).map(Session),
"current session wasn't prior to latest-decided"
);
// Set the CurrentSession variable
Storage::CurrentSession::set(network, Some(latest_decided));
// Return `latest_decided` as `current` as it is now current
latest_decided
};
let mut total_allocated_stake = Amount(0);
for (key, _key_shares) in
selected_validators::<Storage::SelectedValidators>(ValidatorSet { network, session: current })
{
// Safe so long as the SRI supply fits within a u64
total_allocated_stake =
(total_allocated_stake + Self::get_allocation(network, key).unwrap_or(Amount(0))).unwrap();
}
// Update the total allocated stake variable to the current session
Storage::TotalAllocatedStake::set(network, Some(total_allocated_stake));
}
fn retire(set: ValidatorSet) {
assert!(
Some(set.session).map(|session| session.0) <
Storage::CurrentSession::get(set.network).map(|session| session.0),
"retiring a set which is active/upcoming"
);
// Clean-up this set's storage
clear_selected_validators::<Storage::SelectedValidators>(set);
}
fn increase_allocation(
network: NetworkId,
validator: Public,
amount: Amount,
) -> Result<(), AllocationError> {
let Some(allocation_per_key_share) = Storage::AllocationPerKeyShare::get(network) else {
Err(AllocationError::NoAllocationPerKeyShareSet)?
};
let old_allocation = Self::get_allocation(network, validator).unwrap_or(Amount(0));
// Safe so long as the SRI supply fits within a u64, per assumptions on how this is called
let new_allocation = (old_allocation + amount).unwrap();
if new_allocation < allocation_per_key_share {
Err(AllocationError::AllocationLessThanKeyShare)?
}
/*
If the validator set has a single point of failure, the following does nothing. If the
validator set has decentralized and doesn't have a single point of failure, the following
will ensure this allocation doesn't create a single point of failure.
*/
{
// Check the validator set's current expected key shares
let expected_key_shares = Self::expected_key_shares(network, allocation_per_key_share);
// Check if the top validator in this set may be faulty under this f
let top_validator_may_be_faulty = if let Some(top_validator) =
Self::iter_allocations(network, allocation_per_key_share).next()
{
let (_key, amount) = top_validator;
let key_shares = amount.0 / allocation_per_key_share.0;
key_shares <= (expected_key_shares / 3)
} else {
// If there are no validators, we claim the top validator may not be faulty so the
// following check doesn't run
false
};
if top_validator_may_be_faulty {
let old_key_shares = old_allocation.0 / allocation_per_key_share.0;
let new_key_shares = new_allocation.0 / allocation_per_key_share.0;
// Update the amount of expected key shares per the key shares added
let expected_key_shares = (expected_key_shares + (new_key_shares - old_key_shares))
.min(u64::from(MAX_KEY_SHARES_PER_SET));
// If the new key shares exceeds the fault tolerance, don't allow the allocation
if new_key_shares > (expected_key_shares / 3) {
Err(AllocationError::IntroducesSinglePointOfFailure)?
}
}
}
Self::set_allocation(network, validator, new_allocation);
// If this validator is active, update `TotalAllocatedStake`
if let Some(current) = Storage::CurrentSession::get(network) {
if Storage::SelectedValidators::contains_key(selected_validators_key(
ValidatorSet { network, session: current },
validator,
)) {
Storage::TotalAllocatedStake::mutate(network, |existing| {
Some(
(existing.expect("current session but no total allocated stake set") + amount).unwrap(),
)
});
}
}
Ok(())
}
}