Redo primitives, abi

Consolidates all primitives into a single crate. We didn't benefit from its
fragmentation. I'm hesitant to say the new internal-organization is better (it
may be just as clunky), but it's at least in a single crate (not spread out
over micro-crates).

The ABI is the most distinct. We now entirely own it. Block header hashes don't
directly commit to any BABE data (avoiding potentially ~4 KB headers upon
session changes), and are hashed as borsh (a more widely used codec than
SCALE). There are still Substrate variants, using SCALE and with the BABE data,
but they're prunable from a protocol design perspective.

Defines a transaction as a Vec of Calls, allowing atomic operations.
This commit is contained in:
Luke Parker
2025-02-12 03:41:50 -05:00
parent 2f8ce15a92
commit 776e417fd2
49 changed files with 2225 additions and 2092 deletions

View File

@@ -16,29 +16,19 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
zeroize = { version = "^1.5", features = ["derive"], optional = true }
zeroize = { version = "^1.5", features = ["derive"] }
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"] }
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, optional = true }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] }
scale-info = { version = "2", default-features = false, features = ["derive"] }
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true }
serde = { version = "1", default-features = false, features = ["derive", "alloc"], optional = true }
sp-application-crypto = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
bitvec = { version = "1", default-features = false, features = ["alloc"] }
sp-core = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
sp-runtime = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
sp-io = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
sp-std = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
frame-support = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "serai-next", default-features = false }
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["alloc", "ristretto"] }
dkg = { path = "../../crypto/dkg", default-features = false }
[dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
bech32 = { version = "0.11", default-features = false }
[features]
std = ["zeroize", "ciphersuite/std", "scale/std", "borsh?/std", "serde?/std", "scale-info/std", "sp-core/std", "sp-runtime/std", "sp-std/std", "frame-support/std"]
borsh = ["dep:borsh"]
serde = ["dep:serde"]
std = ["zeroize/std", "borsh/std", "ciphersuite/std", "dkg/std", "sp-core/std", "bech32/std"]
default = ["std"]
borsh = [] # TODO
serde = [] # TODO

View File

@@ -0,0 +1,4 @@
# Serai Primitives
`serai-primitives` represents foundational data-types used within Serai's
Substrate blockchain.

View File

@@ -1,144 +0,0 @@
#[cfg(feature = "std")]
use zeroize::Zeroize;
#[cfg(feature = "borsh")]
use borsh::{BorshSerialize, BorshDeserialize};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
use scale::{Encode, Decode, MaxEncodedLen};
use scale_info::TypeInfo;
use sp_core::sr25519::Public;
pub use sp_core::sr25519::Signature;
#[cfg(feature = "std")]
use sp_core::{Pair as PairTrait, sr25519::Pair};
use sp_runtime::traits::{LookupError, Lookup, StaticLookup};
pub type PublicKey = Public;
#[cfg(feature = "borsh")]
pub fn borsh_serialize_public<W: borsh::io::Write>(
public: &Public,
writer: &mut W,
) -> Result<(), borsh::io::Error> {
borsh::BorshSerialize::serialize(&public.0, writer)
}
#[cfg(feature = "borsh")]
pub fn borsh_deserialize_public<R: borsh::io::Read>(
reader: &mut R,
) -> Result<Public, borsh::io::Error> {
let public: [u8; 32] = borsh::BorshDeserialize::deserialize_reader(reader)?;
Ok(Public(public))
}
#[cfg(feature = "borsh")]
pub fn borsh_serialize_signature<W: borsh::io::Write>(
signature: &Signature,
writer: &mut W,
) -> Result<(), borsh::io::Error> {
borsh::BorshSerialize::serialize(&signature.0, writer)
}
#[cfg(feature = "borsh")]
pub fn borsh_deserialize_signature<R: borsh::io::Read>(
reader: &mut R,
) -> Result<Signature, borsh::io::Error> {
let signature: [u8; 64] = borsh::BorshDeserialize::deserialize_reader(reader)?;
Ok(Signature(signature))
}
// TODO: Remove this for solely Public?
#[derive(
Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encode, Decode, MaxEncodedLen, TypeInfo,
)]
#[cfg_attr(feature = "std", derive(Zeroize))]
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct SeraiAddress(pub [u8; 32]);
impl SeraiAddress {
pub fn new(key: [u8; 32]) -> SeraiAddress {
SeraiAddress(key)
}
}
impl From<[u8; 32]> for SeraiAddress {
fn from(key: [u8; 32]) -> SeraiAddress {
SeraiAddress(key)
}
}
impl From<PublicKey> for SeraiAddress {
fn from(key: PublicKey) -> SeraiAddress {
SeraiAddress(key.0)
}
}
impl From<SeraiAddress> for PublicKey {
fn from(address: SeraiAddress) -> PublicKey {
PublicKey::from_raw(address.0)
}
}
#[cfg(feature = "std")]
impl std::fmt::Display for SeraiAddress {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// TODO: Bech32
write!(f, "{:?}", self.0)
}
}
/// Create a Substraate key pair by a name.
///
/// This should never be considered to have a secure private key. It has effectively no entropy.
#[cfg(feature = "std")]
pub fn insecure_pair_from_name(name: &str) -> Pair {
Pair::from_string(&format!("//{name}"), None).unwrap()
}
/// Create a private key for an arbitrary ciphersuite by a name.
///
/// This key should never be considered a secure private key. It has effectively no entropy.
#[cfg(feature = "std")]
pub fn insecure_arbitrary_key_from_name<C: ciphersuite::Ciphersuite>(name: &str) -> C::F {
C::hash_to_F(name.as_bytes())
}
pub struct AccountLookup;
impl Lookup for AccountLookup {
type Source = SeraiAddress;
type Target = PublicKey;
fn lookup(&self, source: SeraiAddress) -> Result<PublicKey, LookupError> {
Ok(PublicKey::from_raw(source.0))
}
}
impl StaticLookup for AccountLookup {
type Source = SeraiAddress;
type Target = PublicKey;
fn lookup(source: SeraiAddress) -> Result<PublicKey, LookupError> {
Ok(source.into())
}
fn unlookup(source: PublicKey) -> SeraiAddress {
source.into()
}
}
pub const fn system_address(pallet: &'static [u8]) -> SeraiAddress {
let mut address = [0; 32];
let mut set = false;
// Implement a while loop since we can't use a for loop
let mut i = 0;
while i < pallet.len() {
address[i] = pallet[i];
if address[i] != 0 {
set = true;
}
i += 1;
}
// Make sure this address isn't the identity point
// Doesn't do address != [0; 32] since that's not const
assert!(set, "address is the identity point");
SeraiAddress(address)
}

View File

@@ -0,0 +1,143 @@
use alloc::vec::Vec;
use zeroize::Zeroize;
use borsh::{BorshSerialize, BorshDeserialize};
use sp_core::{sr25519::Public, ConstU32, bounded::BoundedVec};
/*
We only use a single HRP across all networks. This follows Serai's general practice. Addresses
for external networks are represented in binary, without network information. to minimize
bandwidth and reduce potential for malleability.
This is continued here not solely to be a continuance, yet also with appreciation for the
simplicity. This does make it easier for users to make the mistake of using a testnet address
where they intended to use a mainnet address (and vice-versa). Since public keys are usable on
any network, this should have limited impact and accordingly not be the end of the world.
There's also precedent for this due to Ethereum (though they do have a somewhat-adopted checksum
scheme to encode the network regardless).
*/
const HUMAN_READABLE_PART: bech32::Hrp = bech32::Hrp::parse_unchecked("sri");
/// The address for an account on Serai.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct SeraiAddress(pub [u8; 32]);
impl SeraiAddress {
/// Generate an address for use by the system.
///
/// The returned addresses MAY be valid points. This assumes its infeasible to find the discrete
/// logarithm for a point whose representation has a known Blake2b-256 preimage.
// The alternative would be to massage this until its not a valid point, which isn't worth the
// computational expense as this should be a hard problem for outputs which happen to be points.
pub fn system(label: &[u8]) -> Self {
Self(sp_core::blake2_256(label))
}
}
impl From<Public> for SeraiAddress {
fn from(key: Public) -> Self {
// The encoding of a Ristretto point is the encoding of its address
Self(key.0)
}
}
/*
A `SeraiAddress` may not be a valid public key. The `sr25519::Public` is not a checked public
key, solely bytes alleged to be a public key, which any `SeraiAddress` converted into a `Public`
is also alleged to be.
*/
impl From<SeraiAddress> for Public {
fn from(address: SeraiAddress) -> Self {
Self::from_raw(address.0)
}
}
// We use Bech32m to encode addresses
impl core::fmt::Display for SeraiAddress {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match bech32::encode_to_fmt::<bech32::Bech32m, _>(f, HUMAN_READABLE_PART, &self.0) {
Ok(()) => Ok(()),
Err(bech32::EncodeError::TooLong(_)) => {
unreachable!("32 bytes exceeded bech32 length limit?")
}
Err(bech32::EncodeError::Fmt(e)) => Err(e),
// bech32::EncodeError is non-exhaustive
Err(_) => Err(core::fmt::Error),
}
}
}
/// An error from decoding an address.
pub enum DecodeError {
/// The Bech32m encoding was invalid.
InvalidBech32m,
/// The Bech32m Human-Readable Part was distinct.
DistinctHrp,
/// The encoded data's length was wrong.
InvalidLength,
}
impl core::str::FromStr for SeraiAddress {
type Err = DecodeError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// We drop bech32's error to remain opaque to the implementation
let decoded = bech32::primitives::decode::CheckedHrpstring::new::<bech32::Bech32m>(s)
.map_err(|_| DecodeError::InvalidBech32m)?;
if decoded.hrp() != HUMAN_READABLE_PART {
Err(DecodeError::DistinctHrp)?;
}
let mut res = Self([0; 32]);
let mut iter = decoded.byte_iter();
for i in 0 .. 32 {
let Some(byte) = iter.next() else { Err(DecodeError::InvalidLength)? };
res.0[i] = byte;
}
if iter.next().is_some() {
Err(DecodeError::InvalidLength)?;
}
Ok(res)
}
}
/// An address for an external network.
#[derive(Clone, PartialEq, Eq, Debug, borsh::BorshSerialize, borsh::BorshDeserialize)]
pub struct ExternalAddress(
#[borsh(
serialize_with = "crate::borsh_serialize_bounded_vec",
deserialize_with = "crate::borsh_deserialize_bounded_vec"
)]
BoundedVec<u8, ConstU32<{ Self::MAX_LEN }>>,
);
impl ExternalAddress {
/// The maximum length for an `ExternalAddress`.
pub const MAX_LEN: u32 = 512;
}
/// An error when converting from a `Vec`.
pub enum FromVecError {
/// The source `Vec` was too long to be converted.
TooLong,
}
impl TryFrom<Vec<u8>> for ExternalAddress {
type Error = FromVecError;
fn try_from(vec: Vec<u8>) -> Result<Self, Self::Error> {
vec.try_into().map(ExternalAddress).map_err(|_| FromVecError::TooLong)
}
}
impl From<ExternalAddress> for Vec<u8> {
fn from(ext: ExternalAddress) -> Vec<u8> {
ext.0.into_inner()
}
}
impl zeroize::Zeroize for ExternalAddress {
fn zeroize(&mut self) {
self.0.as_mut().zeroize();
}
}

View File

@@ -1,54 +0,0 @@
use core::{
ops::{Add, Sub, Mul},
fmt::Debug,
};
#[cfg(feature = "std")]
use zeroize::Zeroize;
#[cfg(feature = "borsh")]
use borsh::{BorshSerialize, BorshDeserialize};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
use scale::{Encode, Decode, MaxEncodedLen};
use scale_info::TypeInfo;
/// The type used for amounts within Substrate.
// Distinct from Amount due to Substrate's requirements on this type.
// While Amount could have all the necessary traits implemented, not only are they many, it'd make
// Amount a large type with a variety of misc functions.
// The current type's minimalism sets clear bounds on usage.
pub type SubstrateAmount = u64;
/// The type used for amounts.
#[derive(
Clone, Copy, PartialEq, Eq, PartialOrd, Debug, Encode, Decode, MaxEncodedLen, TypeInfo,
)]
#[cfg_attr(feature = "std", derive(Zeroize))]
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Amount(pub SubstrateAmount);
// TODO: these impl shouldn't panic and return error to be dealt with.
// Otherwise we might have a panic that stops the network.
impl Add for Amount {
type Output = Amount;
fn add(self, other: Amount) -> Amount {
// Explicitly use checked_add so even if range checks are disabled, this is still checked
Amount(self.0.checked_add(other.0).unwrap())
}
}
impl Sub for Amount {
type Output = Amount;
fn sub(self, other: Amount) -> Amount {
Amount(self.0.checked_sub(other.0).unwrap())
}
}
impl Mul for Amount {
type Output = Amount;
fn mul(self, other: Amount) -> Amount {
Amount(self.0.checked_mul(other.0).unwrap())
}
}

View File

@@ -1,38 +1,111 @@
use core::ops::{Add, Sub, Mul};
#[cfg(feature = "std")]
use zeroize::Zeroize;
#[cfg(feature = "borsh")]
use borsh::{BorshSerialize, BorshDeserialize};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
use scale::{Encode, Decode, MaxEncodedLen};
use scale_info::TypeInfo;
use crate::coin::{ExternalCoin, Coin};
use crate::{Amount, Coin, ExternalCoin};
/// The type internally used to represent amounts.
// https://github.com/rust-lang/rust/issues/8995
pub type AmountRepr = u64;
/// The type used for balances (a Coin and Balance).
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)]
#[cfg_attr(feature = "std", derive(Zeroize))]
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Balance {
pub coin: Coin,
/// A wrapper used to represent amounts.
#[derive(
Clone,
Copy,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Debug,
Zeroize,
BorshSerialize,
BorshDeserialize,
)]
pub struct Amount(pub AmountRepr);
impl Add for Amount {
type Output = Option<Amount>;
fn add(self, other: Amount) -> Option<Amount> {
self.0.checked_add(other.0).map(Amount)
}
}
impl Sub for Amount {
type Output = Option<Amount>;
fn sub(self, other: Amount) -> Option<Amount> {
self.0.checked_sub(other.0).map(Amount)
}
}
impl Mul for Amount {
type Output = Option<Amount>;
fn mul(self, other: Amount) -> Option<Amount> {
self.0.checked_mul(other.0).map(Amount)
}
}
/// An ExternalCoin and an Amount, forming a balance for an external coin.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct ExternalBalance {
/// The coin this is a balance for.
pub coin: ExternalCoin,
/// The amount of this balance.
pub amount: Amount,
}
/// The type used for balances (a Coin and Balance).
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)]
#[cfg_attr(feature = "std", derive(Zeroize))]
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ExternalBalance {
pub coin: ExternalCoin,
impl Add<Amount> for ExternalBalance {
type Output = Option<ExternalBalance>;
fn add(self, other: Amount) -> Option<ExternalBalance> {
(self.amount + other).map(|amount| ExternalBalance { coin: self.coin, amount })
}
}
impl Sub<Amount> for ExternalBalance {
type Output = Option<ExternalBalance>;
fn sub(self, other: Amount) -> Option<ExternalBalance> {
(self.amount - other).map(|amount| ExternalBalance { coin: self.coin, amount })
}
}
impl Mul<Amount> for ExternalBalance {
type Output = Option<ExternalBalance>;
fn mul(self, other: Amount) -> Option<ExternalBalance> {
(self.amount * other).map(|amount| ExternalBalance { coin: self.coin, amount })
}
}
/// A Coin and an Amount, forming a balance for a coin.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct Balance {
/// The coin this is a balance for.
pub coin: Coin,
/// The amount of this balance.
pub amount: Amount,
}
impl Add<Amount> for Balance {
type Output = Option<Balance>;
fn add(self, other: Amount) -> Option<Balance> {
(self.amount + other).map(|amount| Balance { coin: self.coin, amount })
}
}
impl Sub<Amount> for Balance {
type Output = Option<Balance>;
fn sub(self, other: Amount) -> Option<Balance> {
(self.amount - other).map(|amount| Balance { coin: self.coin, amount })
}
}
impl Mul<Amount> for Balance {
type Output = Option<Balance>;
fn mul(self, other: Amount) -> Option<Balance> {
(self.amount * other).map(|amount| Balance { coin: self.coin, amount })
}
}
impl From<ExternalBalance> for Balance {
fn from(balance: ExternalBalance) -> Self {
Balance { coin: balance.coin.into(), amount: balance.amount }
@@ -49,25 +122,3 @@ impl TryFrom<Balance> for ExternalBalance {
}
}
}
// TODO: these impl either should be removed or return errors in case of overflows
impl Add<Amount> for Balance {
type Output = Balance;
fn add(self, other: Amount) -> Balance {
Balance { coin: self.coin, amount: self.amount + other }
}
}
impl Sub<Amount> for Balance {
type Output = Balance;
fn sub(self, other: Amount) -> Balance {
Balance { coin: self.coin, amount: self.amount - other }
}
}
impl Mul<Amount> for Balance {
type Output = Balance;
fn mul(self, other: Amount) -> Balance {
Balance { coin: self.coin, amount: self.amount * other }
}
}

View File

@@ -1,55 +0,0 @@
#[cfg(feature = "std")]
use zeroize::Zeroize;
#[cfg(feature = "borsh")]
use borsh::{BorshSerialize, BorshDeserialize};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
use scale::{Encode, Decode, MaxEncodedLen};
use scale_info::TypeInfo;
use sp_core::H256;
/// The type used to identify block numbers.
#[derive(
Clone, Copy, Default, PartialEq, Eq, Hash, Debug, Encode, Decode, MaxEncodedLen, TypeInfo,
)]
#[cfg_attr(feature = "std", derive(Zeroize))]
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct BlockNumber(pub u64);
impl From<u64> for BlockNumber {
fn from(number: u64) -> BlockNumber {
BlockNumber(number)
}
}
/// The type used to identify block hashes.
// This may not be universally compatible
// If a block exists with a hash which isn't 32-bytes, it can be hashed into a value with 32-bytes
// This would require the processor to maintain a mapping of 32-byte IDs to actual hashes, which
// would be fine
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)]
#[cfg_attr(feature = "std", derive(Zeroize))]
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct BlockHash(pub [u8; 32]);
impl AsRef<[u8]> for BlockHash {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
impl From<[u8; 32]> for BlockHash {
fn from(hash: [u8; 32]) -> BlockHash {
BlockHash(hash)
}
}
impl From<H256> for BlockHash {
fn from(hash: H256) -> BlockHash {
BlockHash(hash.into())
}
}

View File

@@ -0,0 +1,118 @@
use zeroize::Zeroize;
use borsh::{io, BorshSerialize, BorshDeserialize};
use crate::network_id::{ExternalNetworkId, NetworkId};
/// The type used to identify coins native to external networks.
///
/// This type serializes to a subset of `Coin`.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[borsh(use_discriminant = true)]
#[non_exhaustive]
pub enum ExternalCoin {
/// Bitcoin, from the Bitcoin network.
Bitcoin = 1,
/// Ether, from the Ethereum network.
Ether = 2,
/// Dai Stablecoin, from the Ethereum network.
Dai = 3,
/// Monero, from the Monero network.
Monero = 4,
}
impl ExternalCoin {
/// All external coins.
pub fn all() -> impl Iterator<Item = Self> {
[ExternalCoin::Bitcoin, ExternalCoin::Ether, ExternalCoin::Dai, ExternalCoin::Monero]
.into_iter()
}
}
/// The type used to identify coins.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize)]
pub enum Coin {
/// The Serai coin.
Serai,
/// An external coin.
External(ExternalCoin),
}
impl BorshSerialize for Coin {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
Self::Serai => writer.write_all(&[0]),
Self::External(external) => external.serialize(writer),
}
}
}
impl BorshDeserialize for Coin {
fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut kind = [0xff];
reader.read_exact(&mut kind)?;
match kind[0] {
0 => Ok(Self::Serai),
_ => ExternalCoin::deserialize_reader(&mut kind.as_slice()).map(Into::into),
}
}
}
impl Coin {
/// All coins.
pub fn all() -> impl Iterator<Item = Self> {
core::iter::once(Coin::Serai).chain(ExternalCoin::all().map(Into::into))
}
}
impl From<ExternalCoin> for Coin {
fn from(coin: ExternalCoin) -> Self {
Coin::External(coin)
}
}
impl TryFrom<Coin> for ExternalCoin {
type Error = ();
fn try_from(coin: Coin) -> Result<Self, Self::Error> {
match coin {
Coin::Serai => Err(())?,
Coin::External(ext) => Ok(ext),
}
}
}
impl ExternalCoin {
/// The external network this coin is native to.
pub fn network(&self) -> ExternalNetworkId {
match self {
ExternalCoin::Bitcoin => ExternalNetworkId::Bitcoin,
ExternalCoin::Ether | ExternalCoin::Dai => ExternalNetworkId::Ethereum,
ExternalCoin::Monero => ExternalNetworkId::Monero,
}
}
/// The decimals used for a single human unit of this coin.
///
/// This may be less than the decimals used for a single human unit of this coin *by defined
/// convention*. If so, that means Serai is *truncating* the decimals. A coin which is defined
/// as having 8 decimals, while Serai claims it has 4 decimals, will have `0.00019999`
/// interpreted as `0.0001` (in human units, in atomic units, 19999 will be interpreted as 1).
pub fn decimals(&self) -> u32 {
match self {
// Ether and DAI have 18 decimals, yet we only track 8 in order to fit them within u64s
ExternalCoin::Bitcoin | ExternalCoin::Ether | ExternalCoin::Dai => 8,
ExternalCoin::Monero => 12,
}
}
}
impl Coin {
/// The network this coin is native to.
pub fn network(&self) -> NetworkId {
match self {
Coin::Serai => NetworkId::Serai,
Coin::External(c) => c.network().into(),
}
}
}

View File

@@ -1,39 +1,13 @@
use crate::BlockNumber;
use core::time::Duration;
// 1 MB
pub const BLOCK_SIZE: u32 = 1024 * 1024;
// 6 seconds
// TODO: Use Duration
pub const TARGET_BLOCK_TIME: u64 = 6;
/// The target block time.
pub const TARGET_BLOCK_TIME: Duration = Duration::from_secs(6);
/// Measured in blocks.
pub const MINUTES: BlockNumber = 60 / TARGET_BLOCK_TIME;
pub const HOURS: BlockNumber = 60 * MINUTES;
pub const DAYS: BlockNumber = 24 * HOURS;
pub const WEEKS: BlockNumber = 7 * DAYS;
// Defines a month as 30 days, which is slightly inaccurate
pub const MONTHS: BlockNumber = 30 * DAYS;
// Defines a year as 12 inaccurate months, which is 360 days literally (~1.5% off)
pub const YEARS: BlockNumber = 12 * MONTHS;
/// The intended duration for a session.
// 1 week
pub const SESSION_LENGTH: Duration = Duration::from_secs(7 * 24 * 60 * 60);
/// 6 months of blocks
pub const GENESIS_SRI_TRICKLE_FEED: u64 = 6 * MONTHS;
// 100 Million SRI
pub const GENESIS_SRI: u64 = 100_000_000 * 10_u64.pow(8);
/// This needs to be long enough for arbitrage to occur and make holding any fake price up
/// sufficiently unrealistic.
#[allow(clippy::cast_possible_truncation)]
pub const ARBITRAGE_TIME: u16 = (2 * HOURS) as u16;
/// Since we use the median price, double the window length.
///
/// We additionally +1 so there is a true median.
pub const MEDIAN_PRICE_WINDOW_LENGTH: u16 = (2 * ARBITRAGE_TIME) + 1;
/// Amount of blocks per epoch in the fast-epoch feature that is used in tests.
pub const FAST_EPOCH_DURATION: u64 = 2 * MINUTES;
/// Amount of blocks for the initial period era of the emissions under the fast-epoch feature.
pub const FAST_EPOCH_INITIAL_PERIOD: u64 = 2 * FAST_EPOCH_DURATION;
/// The maximum amount of key shares per set.
pub const MAX_KEY_SHARES_PER_SET: u16 = 150;
/// The maximum amount of key shares per set, as an u32.
pub const MAX_KEY_SHARES_PER_SET_U32: u32 = MAX_KEY_SHARES_PER_SET as u32;

View File

@@ -0,0 +1,66 @@
use zeroize::Zeroize;
use borsh::{BorshSerialize, BorshDeserialize};
use sp_core::{ConstU32, bounded::BoundedVec};
/// A Ristretto public key.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct Public(pub [u8; 32]);
impl From<sp_core::sr25519::Public> for Public {
fn from(public: sp_core::sr25519::Public) -> Self {
Self(public.0)
}
}
impl From<Public> for sp_core::sr25519::Public {
fn from(public: Public) -> Self {
Self::from_raw(public.0)
}
}
/// A sr25519 signature.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct Signature(pub [u8; 64]);
impl From<sp_core::sr25519::Signature> for Signature {
fn from(signature: sp_core::sr25519::Signature) -> Self {
Self(signature.0)
}
}
impl From<Signature> for sp_core::sr25519::Signature {
fn from(signature: Signature) -> Self {
Self::from_raw(signature.0)
}
}
/// A key for an external network.
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct ExternalKey(
#[borsh(
serialize_with = "crate::borsh_serialize_bounded_vec",
deserialize_with = "crate::borsh_deserialize_bounded_vec"
)]
pub BoundedVec<u8, ConstU32<{ Self::MAX_LEN }>>,
);
impl Zeroize for ExternalKey {
fn zeroize(&mut self) {
self.0.as_mut().zeroize();
}
}
impl ExternalKey {
/// The maximum length for am external key.
/*
This support keys up to 96 bytes (such as BLS12-381 G2, which is the largest elliptic-curve
group element we might reasonably use as a key). This can always be increased if we need to
adopt a different cryptosystem (one where verification keys are multiple group elements, or
where group elements do exceed 96 bytes, such as RSA).
*/
pub const MAX_LEN: u32 = 96;
}
/// The key pair for a validator set.
///
/// This is their Ristretto key, used for publishing data onto Serai, and their key on the external
/// network.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct KeyPair(pub Public, pub ExternalKey);

View File

@@ -1,19 +0,0 @@
#[cfg(feature = "borsh")]
use borsh::{BorshSerialize, BorshDeserialize};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
use scale::{Encode, Decode, MaxEncodedLen};
use crate::{Coin, SubstrateAmount};
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, MaxEncodedLen)]
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct QuotePriceParams {
pub coin1: Coin,
pub coin2: Coin,
pub amount: SubstrateAmount,
pub include_fee: bool,
pub exact_in: bool,
}

View File

@@ -0,0 +1,24 @@
use alloc::vec::Vec;
use zeroize::Zeroize;
use borsh::{BorshSerialize, BorshDeserialize};
use crate::balance::Amount;
/// The value of non-Bitcoin externals coins present at genesis, relative to Bitcoin.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct GenesisValues {
/// The value of Ether, relative to Bitcoin.
pub ether: Amount,
/// The value of DAI, relative to Bitcoin.
pub dai: Amount,
/// The value of Monero, relative to Bitcoin.
pub monero: Amount,
}
impl GenesisValues {
/// The message for the oraclize_values signature.
pub fn oraclize_values_message(&self) -> Vec<u8> {
borsh::to_vec(&(b"GenesisLiquidity-oraclize_values", self)).unwrap()
}
}

View File

@@ -0,0 +1,171 @@
use alloc::{vec, vec::Vec};
use zeroize::Zeroize;
use borsh::{io, BorshSerialize, BorshDeserialize};
use crate::{
BlockHash, crypto::Signature, network_id::ExternalNetworkId,
instructions::InInstructionWithBalance,
};
/*
`Batch`s have a size limit we enforce upon deserialization.
The naive solution would be to deserialize, then serialize, and check the serialized length is
less than the maximum. This performs a redundant allocation and is computationally non-trivial.
The next solution would be to wrap the deserialization with a `Cursor` so one can check the
amount read, yet `Cursor` isn't available under no-std.
We solve this by manually implementing a `Cursor`-equivalent (for our purposes) which let us
check the total amount read is `<=` the maximum size.
The issue is we need every call to `BorshDeserialize::deserialize_reader` to use our custom
reader, which requires manually implementing it, which means we can't use the derive macro and
can't ensure it follows the borsh specification. We solve this by generating two identical
structs, one internal with a derived `BorshDeserialize::deserialize_reader`, one public with a
manually implemented `BorshDeserialize::deserialize_reader` wrapping the internal struct's. This
lets us ensure the correct reader is used and error if the size limit is hit, while still using
a derived `BorshDeserialize` which will definitively be compliant.
*/
macro_rules! batch_struct {
(#[$derive: meta] $pub: vis $name: ident) => {
/// A batch of `InInstruction`s to publish onto Serai.
#[allow(clippy::needless_pub_self)]
#[$derive]
$pub struct $name {
/// The size this will be once encoded.
#[allow(dead_code)] // This is unused for the `BatchDeserialize` instance
#[borsh(skip)]
encoded_size: usize,
/// The network this batch of instructions is coming from.
network: ExternalNetworkId,
/// The ID of this `Batch`.
id: u32,
/// The hash of the external network's block which produced this `Batch`.
external_network_block_hash: BlockHash,
/// The instructions to execute.
instructions: Vec<InInstructionWithBalance>,
}
}
}
batch_struct!(#[derive(BorshDeserialize)] pub(self) BatchDeserialize);
batch_struct!(#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize)] pub Batch);
impl BorshDeserialize for Batch {
fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
// A custom reader which enforces the `Batch`'s max size limit
struct SizeLimitReader<'a, R: io::Read> {
reader: &'a mut R,
read: usize,
}
impl<R: io::Read> io::Read for SizeLimitReader<'_, R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let read = self.reader.read(buf)?;
self.read = self.read.saturating_add(read);
if self.read > Batch::MAX_SIZE {
Err(io::Error::new(io::ErrorKind::Other, "Batch size exceeded maximum"))?;
}
Ok(read)
}
}
let mut size_limit_reader = SizeLimitReader { reader, read: 0 };
let BatchDeserialize {
encoded_size: _,
network,
id,
external_network_block_hash,
instructions,
} = <_>::deserialize_reader(&mut size_limit_reader)?;
Ok(Batch {
encoded_size: size_limit_reader.read,
network,
id,
external_network_block_hash,
instructions,
})
}
}
/// An error incurred while pushing an instruction onto a `Batch`.
pub enum PushInstructionError {
/// The Batch's max size was exceeded.
MaxSizeExceeded,
}
impl Batch {
/// The maximum size of a valid `Batch`'s encoding.
const MAX_SIZE: usize = 32_768;
/// Create a new Batch.
pub fn new(network: ExternalNetworkId, id: u32, external_network_block_hash: BlockHash) -> Self {
let mut batch =
Batch { encoded_size: 0, network, id, external_network_block_hash, instructions: vec![] };
batch.encoded_size = borsh::to_vec(&batch).unwrap().len();
batch
}
/// Push an `InInstructionWithBalance` onto this `Batch`.
pub fn push_instruction(
&mut self,
instruction: InInstructionWithBalance,
) -> Result<(), PushInstructionError> {
if (self.encoded_size.saturating_add(borsh::to_vec(&instruction).unwrap().len())) >
Self::MAX_SIZE
{
Err(PushInstructionError::MaxSizeExceeded)?;
}
self.instructions.push(instruction);
Ok(())
}
/// The message to sign when publishing this Batch.
pub fn publish_batch_message(&self) -> Vec<u8> {
const DST: &[u8] = b"InInstructions-publish_batch";
// We don't estimate the size of this Batch, we just reserve a small constant capacity
let mut buf = Vec::with_capacity(1024);
(DST, self).serialize(&mut buf).unwrap();
buf
}
/// The network this batch of instructions is coming from.
pub fn network(&self) -> ExternalNetworkId {
self.network
}
/// The ID of this `Batch`.
pub fn id(&self) -> u32 {
self.id
}
/// The hash of the external network's block which produced this `Batch`.
pub fn external_network_block_hash(&self) -> BlockHash {
self.external_network_block_hash
}
/// The instructions within this `Batch`.
pub fn instructions(&self) -> &[InInstructionWithBalance] {
&self.instructions
}
}
/// A signed batch.
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct SignedBatch {
/// The signed batch.
pub batch: Batch,
/// The signature.
pub signature: Signature,
}
#[cfg(feature = "std")]
impl Zeroize for SignedBatch {
fn zeroize(&mut self) {
self.batch.zeroize();
self.signature.0.as_mut().zeroize();
}
}

View File

@@ -0,0 +1,86 @@
use zeroize::Zeroize;
use borsh::{BorshSerialize, BorshDeserialize};
use crate::{
address::{SeraiAddress, ExternalAddress},
balance::{Amount, ExternalBalance, Balance},
instructions::OutInstruction,
};
mod batch;
pub use batch::*;
/// The destination for coins.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub enum Destination {
/// The Serai address to transfer the coins to.
Serai(SeraiAddress),
/// Burn the coins with the included `OutInstruction`.
Burn(OutInstruction),
}
/// An instruction on how to handle coins in.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub enum InInstruction {
/// Add the coins as genesis liquidity.
GenesisLiquidity(SeraiAddress),
/// Use the coins to swap to staked SRI, pre-economic security.
SwapToStakedSri {
/// The validator to allocate the stake to.
validator: SeraiAddress,
/// The minimum amount of staked SRI to swap to.
minimum: Amount,
},
/// Transfer the coins to a Serai address, swapping some for SRI.
TransferWithSwap {
/// The Serai address to transfer the coins to, after swapping some.
to: SeraiAddress,
/// The maximum amount of coins to swap for the intended amount of SRI.
maximum_swap: Amount,
/// The SRI amount to swap some of the coins for.
sri: Amount,
},
/// Transfer the coins to a Serai address.
Transfer {
/// The Serai address to transfer the coins to.
to: SeraiAddress,
},
/// Swap part of the coins to SRI and add the coins as liquidity.
SwapAndAddLiquidity {
/// The owner to-be of the added liquidity.
owner: SeraiAddress,
/// The amount of SRI to add within the liquidity position.
sri: Amount,
/// The minimum amount of the coin to add as liquidity.
minimum_coin: Amount,
/// The amount of SRI to swap to and send to the owner to-be to pay for transactions on Serai.
sri_for_fees: Amount,
},
/// Swap the coins.
Swap {
/// The minimum balance to receive.
minimum_out: Balance,
/// The destination to transfer the balance to.
///
/// If `Destination::Burn`, the balance out will be burnt with the included `OutInstruction`.
destination: Destination,
},
}
/// An instruction on how to handle coins in with the address to return the coins to on error.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct RefundableInInstruction {
/// The instruction on how to handle coins in.
pub instruction: InInstruction,
/// The address to return the coins to on error.
pub return_address: Option<ExternalAddress>,
}
/// An instruction on how to handle coins in with the balance to use for the coins in.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct InInstructionWithBalance {
/// The instruction on how to handle coins in.
pub instruction: OutInstruction,
/// The coins in.
pub balance: ExternalBalance,
}

View File

@@ -0,0 +1,5 @@
mod r#in;
pub use r#in::{InInstruction, InInstructionWithBalance, PushInstructionError, Batch, SignedBatch};
mod out;
pub use out::{OutInstruction, OutInstructionWithBalance};

View File

@@ -0,0 +1,21 @@
use zeroize::Zeroize;
use borsh::{BorshSerialize, BorshDeserialize};
use crate::{address::ExternalAddress, balance::ExternalBalance};
/// An instruction on how to transfer coins out.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub enum OutInstruction {
/// Transfer to the specified address.
Transfer(ExternalAddress),
}
/// An instruction on how to transfer coins out with the balance to use for the transfer out.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct OutInstructionWithBalance {
/// The instruction on how to transfer coins out.
pub instruction: OutInstruction,
/// The balance to use for the transfer out.
pub balance: ExternalBalance,
}

View File

@@ -1,168 +1,79 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(feature = "std")]
extern crate alloc;
use zeroize::Zeroize;
use ::borsh::{BorshSerialize, BorshDeserialize};
#[cfg(feature = "borsh")]
use borsh::{BorshSerialize, BorshDeserialize};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
/// Wrappers to implement Borsh on non-Borsh-implementing types.
#[doc(hidden)]
pub mod sp_borsh;
pub(crate) use sp_borsh::*;
use scale::{Encode, Decode, MaxEncodedLen};
use scale_info::TypeInfo;
/// Constants within the Serai protocol.
pub mod constants;
#[cfg(test)]
use sp_io::TestExternalities;
/// Cryptographic types.
pub mod crypto;
#[cfg(test)]
use frame_support::{pallet_prelude::*, Identity, traits::StorageInstance};
/// Address types.
pub mod address;
use sp_core::{ConstU32, bounded::BoundedVec};
pub use sp_application_crypto as crypto;
/// Types for identifying coins.
pub mod coin;
mod amount;
pub use amount::*;
/// The `Amount`, `ExternalBalance`, and `Balance` types.
pub mod balance;
mod block;
pub use block::*;
/// Types for genesis.
pub mod genesis;
mod networks;
pub use networks::*;
/// Types for identifying networks and their properties.
pub mod network_id;
mod balance;
pub use balance::*;
/// Types for identifying and working with validator sets.
pub mod validator_sets;
mod account;
pub use account::*;
/// Types for signaling.
pub mod signals;
mod constants;
pub use constants::*;
/// Instruction types.
pub mod instructions;
mod dex;
#[allow(unused_imports)]
pub use dex::*;
pub type BlockNumber = u64;
pub type Header = sp_runtime::generic::Header<BlockNumber, sp_runtime::traits::BlakeTwo256>;
#[cfg(feature = "borsh")]
pub fn borsh_serialize_bounded_vec<W: borsh::io::Write, T: BorshSerialize, const B: u32>(
bounded: &BoundedVec<T, ConstU32<B>>,
writer: &mut W,
) -> Result<(), borsh::io::Error> {
borsh::BorshSerialize::serialize(bounded.as_slice(), writer)
}
#[cfg(feature = "borsh")]
pub fn borsh_deserialize_bounded_vec<R: borsh::io::Read, T: BorshDeserialize, const B: u32>(
reader: &mut R,
) -> Result<BoundedVec<T, ConstU32<B>>, borsh::io::Error> {
let vec: Vec<T> = borsh::BorshDeserialize::deserialize_reader(reader)?;
vec.try_into().map_err(|_| borsh::io::Error::other("bound exceeded"))
}
pub const MAX_ADDRESS_LEN: u32 = 512;
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)]
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ExternalAddress(
#[cfg_attr(
feature = "borsh",
borsh(
serialize_with = "borsh_serialize_bounded_vec",
deserialize_with = "borsh_deserialize_bounded_vec"
)
)]
BoundedVec<u8, ConstU32<{ MAX_ADDRESS_LEN }>>,
);
#[cfg(feature = "std")]
impl Zeroize for ExternalAddress {
fn zeroize(&mut self) {
self.0.as_mut().zeroize()
/// The type used to identify block numbers.
///
/// A block's number is its zero-indexed position on the list of blocks which form a blockchain.
/// For non-linear structures, this would presumably be the zero-indexed position within some
/// topological order.
#[derive(
Clone, Copy, Default, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize,
)]
pub struct BlockNumber(pub u64);
impl From<u64> for BlockNumber {
fn from(number: u64) -> BlockNumber {
BlockNumber(number)
}
}
impl ExternalAddress {
#[cfg(feature = "std")]
pub fn new(address: Vec<u8>) -> Result<ExternalAddress, &'static str> {
Ok(ExternalAddress(address.try_into().map_err(|_| "address length exceeds {MAX_ADDRESS_LEN}")?))
}
#[cfg(feature = "std")]
pub fn consume(self) -> Vec<u8> {
self.0.into_inner()
/// The type used to identify block hashes.
/*
Across all networks, block hashes may not be 32 bytes. There may be a network which targets 256
bits of security and accordingly has a 64-byte block hash. Serai only targets a 128-bit security
level so this is fine for our use-case. If we do ever see a 64-byte block hash, we can simply
hash it into a 32-byte hash or truncate it.
*/
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct BlockHash(pub [u8; 32]);
impl From<[u8; 32]> for BlockHash {
fn from(hash: [u8; 32]) -> BlockHash {
BlockHash(hash)
}
}
impl AsRef<[u8]> for ExternalAddress {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
impl From<sp_core::H256> for BlockHash {
fn from(hash: sp_core::H256) -> BlockHash {
BlockHash(hash.into())
}
}
/// Lexicographically reverses a given byte array.
pub fn reverse_lexicographic_order<const N: usize>(bytes: [u8; N]) -> [u8; N] {
let mut res = [0u8; N];
for (i, byte) in bytes.iter().enumerate() {
res[i] = !*byte;
}
res
}
#[test]
fn test_reverse_lexicographic_order() {
TestExternalities::default().execute_with(|| {
use rand_core::{RngCore, OsRng};
struct Storage;
impl StorageInstance for Storage {
fn pallet_prefix() -> &'static str {
"LexicographicOrder"
}
const STORAGE_PREFIX: &'static str = "storage";
}
type Map = StorageMap<Storage, Identity, [u8; 8], (), OptionQuery>;
struct StorageReverse;
impl StorageInstance for StorageReverse {
fn pallet_prefix() -> &'static str {
"LexicographicOrder"
}
const STORAGE_PREFIX: &'static str = "storagereverse";
}
type MapReverse = StorageMap<StorageReverse, Identity, [u8; 8], (), OptionQuery>;
// populate the maps
let mut amounts = vec![];
for _ in 0 .. 100 {
amounts.push(OsRng.next_u64());
}
let mut amounts_sorted = amounts.clone();
amounts_sorted.sort();
for a in amounts {
Map::set(a.to_be_bytes(), Some(()));
MapReverse::set(reverse_lexicographic_order(a.to_be_bytes()), Some(()));
}
// retrive back and check whether they are sorted as expected
let total_size = amounts_sorted.len();
let mut map_iter = Map::iter_keys();
let mut reverse_map_iter = MapReverse::iter_keys();
for i in 0 .. amounts_sorted.len() {
let first = map_iter.next().unwrap();
let second = reverse_map_iter.next().unwrap();
assert_eq!(u64::from_be_bytes(first), amounts_sorted[i]);
assert_eq!(
u64::from_be_bytes(reverse_lexicographic_order(second)),
amounts_sorted[total_size - (i + 1)]
);
}
});
}

View File

@@ -0,0 +1,124 @@
use zeroize::Zeroize;
use borsh::{io, BorshSerialize, BorshDeserialize};
use crate::coin::{ExternalCoin, Coin};
/// Identifier for an embedded elliptic curve.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub enum EmbeddedEllipticCurve {
/// The Embedwards25519 curve, defined over (embedded into) Ed25519's/Ristretto's scalar field.
Embedwards25519,
/// The secq256k1 curve, forming a cycle with secp256k1.
Secq256k1,
}
/// The type used to identify external networks.
///
/// This type serializes to a subset of `NetworkId`.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[borsh(use_discriminant = true)]
#[non_exhaustive]
pub enum ExternalNetworkId {
/// The Bitcoin network.
Bitcoin = 1,
/// The Ethereum network.
Ethereum = 2,
/// The Monero network.
Monero = 3,
}
impl ExternalNetworkId {
/// All external networks.
pub fn all() -> impl Iterator<Item = Self> {
[ExternalNetworkId::Bitcoin, ExternalNetworkId::Ethereum, ExternalNetworkId::Monero].into_iter()
}
}
impl ExternalNetworkId {
/// The embedded elliptic curves actively used for this network.
///
/// This is guaranteed to return `[Embedwards25519]` or
/// `[Embedwards25519, *network specific curve*]`.
pub fn embedded_elliptic_curves(&self) -> &'static [EmbeddedEllipticCurve] {
match self {
// We need to generate a Ristretto key for oraclizing and a Secp256k1 key for the network
Self::Bitcoin | Self::Ethereum => {
&[EmbeddedEllipticCurve::Embedwards25519, EmbeddedEllipticCurve::Secq256k1]
}
// Since the oraclizing key curve is the same as the network's curve, we only need it
Self::Monero => &[EmbeddedEllipticCurve::Embedwards25519],
}
}
/// The coins native to this network.
pub fn coins(&self) -> &'static [ExternalCoin] {
match self {
Self::Bitcoin => &[ExternalCoin::Bitcoin],
Self::Ethereum => &[ExternalCoin::Ether, ExternalCoin::Dai],
Self::Monero => &[ExternalCoin::Monero],
}
}
}
/// The type used to identify networks.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize)]
pub enum NetworkId {
/// The Serai network.
Serai,
/// An external network.
External(ExternalNetworkId),
}
impl BorshSerialize for NetworkId {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
Self::Serai => writer.write_all(&[0]),
Self::External(external) => external.serialize(writer),
}
}
}
impl BorshDeserialize for NetworkId {
fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut kind = [0xff];
reader.read_exact(&mut kind)?;
match kind[0] {
0 => Ok(Self::Serai),
_ => ExternalNetworkId::deserialize_reader(&mut kind.as_slice()).map(Into::into),
}
}
}
impl NetworkId {
/// All networks.
pub fn all() -> impl Iterator<Item = Self> {
core::iter::once(NetworkId::Serai).chain(ExternalNetworkId::all().map(Into::into))
}
/// The coins native to this network.
pub fn coins(self) -> impl Iterator<Item = Coin> {
let (coins, external_coins): (&[Coin], &[ExternalCoin]) = match self {
NetworkId::Serai => (&[Coin::Serai], &[]),
NetworkId::External(ext) => (&[], ext.coins()),
};
coins.iter().copied().chain(external_coins.iter().copied().map(Into::into))
}
}
impl From<ExternalNetworkId> for NetworkId {
fn from(network: ExternalNetworkId) -> Self {
NetworkId::External(network)
}
}
impl TryFrom<NetworkId> for ExternalNetworkId {
type Error = ();
fn try_from(network: NetworkId) -> Result<Self, Self::Error> {
match network {
NetworkId::Serai => Err(())?,
NetworkId::External(ext) => Ok(ext),
}
}
}

View File

@@ -1,479 +0,0 @@
#[cfg(feature = "std")]
use zeroize::Zeroize;
use scale::{Decode, Encode, EncodeLike, MaxEncodedLen};
use scale_info::TypeInfo;
#[cfg(feature = "borsh")]
use borsh::{BorshSerialize, BorshDeserialize};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
use sp_core::{ConstU32, bounded::BoundedVec};
use sp_std::{vec, vec::Vec};
#[cfg(feature = "borsh")]
use crate::{borsh_serialize_bounded_vec, borsh_deserialize_bounded_vec};
/// Identifier for an embedded elliptic curve.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)]
#[cfg_attr(feature = "std", derive(Zeroize))]
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum EmbeddedEllipticCurve {
Embedwards25519,
Secq256k1,
}
/// The type used to identify external networks.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TypeInfo)]
#[cfg_attr(feature = "std", derive(Zeroize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum ExternalNetworkId {
Bitcoin,
Ethereum,
Monero,
}
impl Encode for ExternalNetworkId {
fn encode(&self) -> Vec<u8> {
match self {
ExternalNetworkId::Bitcoin => vec![1],
ExternalNetworkId::Ethereum => vec![2],
ExternalNetworkId::Monero => vec![3],
}
}
}
impl Decode for ExternalNetworkId {
fn decode<I: scale::Input>(input: &mut I) -> Result<Self, scale::Error> {
let kind = input.read_byte()?;
match kind {
1 => Ok(Self::Bitcoin),
2 => Ok(Self::Ethereum),
3 => Ok(Self::Monero),
_ => Err(scale::Error::from("invalid format")),
}
}
}
impl MaxEncodedLen for ExternalNetworkId {
fn max_encoded_len() -> usize {
1
}
}
impl EncodeLike for ExternalNetworkId {}
#[cfg(feature = "borsh")]
impl BorshSerialize for ExternalNetworkId {
fn serialize<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_all(&self.encode())
}
}
#[cfg(feature = "borsh")]
impl BorshDeserialize for ExternalNetworkId {
fn deserialize_reader<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {
let mut kind = [0; 1];
reader.read_exact(&mut kind)?;
ExternalNetworkId::decode(&mut kind.as_slice())
.map_err(|_| std::io::Error::other("invalid format"))
}
}
/// The type used to identify networks.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord, TypeInfo)]
#[cfg_attr(feature = "std", derive(Zeroize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum NetworkId {
Serai,
External(ExternalNetworkId),
}
impl Encode for NetworkId {
fn encode(&self) -> Vec<u8> {
match self {
NetworkId::Serai => vec![0],
NetworkId::External(network) => network.encode(),
}
}
}
impl Decode for NetworkId {
fn decode<I: scale::Input>(input: &mut I) -> Result<Self, scale::Error> {
let kind = input.read_byte()?;
match kind {
0 => Ok(Self::Serai),
_ => Ok(ExternalNetworkId::decode(&mut [kind].as_slice())?.into()),
}
}
}
impl MaxEncodedLen for NetworkId {
fn max_encoded_len() -> usize {
1
}
}
impl EncodeLike for NetworkId {}
#[cfg(feature = "borsh")]
impl BorshSerialize for NetworkId {
fn serialize<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_all(&self.encode())
}
}
#[cfg(feature = "borsh")]
impl BorshDeserialize for NetworkId {
fn deserialize_reader<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {
let mut kind = [0; 1];
reader.read_exact(&mut kind)?;
NetworkId::decode(&mut kind.as_slice()).map_err(|_| std::io::Error::other("invalid format"))
}
}
impl ExternalNetworkId {
/// The embedded elliptic curve actively used for this network.
///
/// This is guaranteed to return `[]`, `[Embedwards25519]`, or
/// `[Embedwards25519, *network specific curve*]`.
pub fn embedded_elliptic_curves(&self) -> &'static [EmbeddedEllipticCurve] {
match self {
// We need to generate a Ristretto key for oraclizing and a Secp256k1 key for the network
Self::Bitcoin | Self::Ethereum => {
&[EmbeddedEllipticCurve::Embedwards25519, EmbeddedEllipticCurve::Secq256k1]
}
// Since the oraclizing key curve is the same as the network's curve, we only need it
Self::Monero => &[EmbeddedEllipticCurve::Embedwards25519],
}
}
pub fn coins(&self) -> Vec<ExternalCoin> {
match self {
Self::Bitcoin => vec![ExternalCoin::Bitcoin],
Self::Ethereum => vec![ExternalCoin::Ether, ExternalCoin::Dai],
Self::Monero => vec![ExternalCoin::Monero],
}
}
}
impl NetworkId {
/// The embedded elliptic curve actively used for this network.
///
/// This is guaranteed to return `[]`, `[Embedwards25519]`, or
/// `[Embedwards25519, *network specific curve*]`.
pub fn embedded_elliptic_curves(&self) -> &'static [EmbeddedEllipticCurve] {
match self {
Self::Serai => &[],
Self::External(network) => network.embedded_elliptic_curves(),
}
}
pub fn coins(&self) -> Vec<Coin> {
match self {
Self::Serai => vec![Coin::Serai],
Self::External(network) => {
network.coins().into_iter().map(core::convert::Into::into).collect()
}
}
}
}
impl From<ExternalNetworkId> for NetworkId {
fn from(network: ExternalNetworkId) -> Self {
NetworkId::External(network)
}
}
impl TryFrom<NetworkId> for ExternalNetworkId {
type Error = ();
fn try_from(network: NetworkId) -> Result<Self, Self::Error> {
match network {
NetworkId::Serai => Err(())?,
NetworkId::External(n) => Ok(n),
}
}
}
pub const EXTERNAL_NETWORKS: [ExternalNetworkId; 3] =
[ExternalNetworkId::Bitcoin, ExternalNetworkId::Ethereum, ExternalNetworkId::Monero];
pub const NETWORKS: [NetworkId; 4] = [
NetworkId::Serai,
NetworkId::External(ExternalNetworkId::Bitcoin),
NetworkId::External(ExternalNetworkId::Ethereum),
NetworkId::External(ExternalNetworkId::Monero),
];
pub const EXTERNAL_COINS: [ExternalCoin; 4] =
[ExternalCoin::Bitcoin, ExternalCoin::Ether, ExternalCoin::Dai, ExternalCoin::Monero];
pub const COINS: [Coin; 5] = [
Coin::Serai,
Coin::External(ExternalCoin::Bitcoin),
Coin::External(ExternalCoin::Ether),
Coin::External(ExternalCoin::Dai),
Coin::External(ExternalCoin::Monero),
];
/// The type used to identify external coins.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TypeInfo)]
#[cfg_attr(feature = "std", derive(Zeroize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum ExternalCoin {
Bitcoin,
Ether,
Dai,
Monero,
}
impl Encode for ExternalCoin {
fn encode(&self) -> Vec<u8> {
match self {
ExternalCoin::Bitcoin => vec![4],
ExternalCoin::Ether => vec![5],
ExternalCoin::Dai => vec![6],
ExternalCoin::Monero => vec![7],
}
}
}
impl Decode for ExternalCoin {
fn decode<I: scale::Input>(input: &mut I) -> Result<Self, scale::Error> {
let kind = input.read_byte()?;
match kind {
4 => Ok(Self::Bitcoin),
5 => Ok(Self::Ether),
6 => Ok(Self::Dai),
7 => Ok(Self::Monero),
_ => Err(scale::Error::from("invalid format")),
}
}
}
impl MaxEncodedLen for ExternalCoin {
fn max_encoded_len() -> usize {
1
}
}
impl EncodeLike for ExternalCoin {}
#[cfg(feature = "borsh")]
impl BorshSerialize for ExternalCoin {
fn serialize<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_all(&self.encode())
}
}
#[cfg(feature = "borsh")]
impl BorshDeserialize for ExternalCoin {
fn deserialize_reader<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {
let mut kind = [0; 1];
reader.read_exact(&mut kind)?;
ExternalCoin::decode(&mut kind.as_slice()).map_err(|_| std::io::Error::other("invalid format"))
}
}
/// The type used to identify coins.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TypeInfo)]
#[cfg_attr(feature = "std", derive(Zeroize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum Coin {
Serai,
External(ExternalCoin),
}
impl Encode for Coin {
fn encode(&self) -> Vec<u8> {
match self {
Coin::Serai => vec![0],
Coin::External(ec) => ec.encode(),
}
}
}
impl Decode for Coin {
fn decode<I: scale::Input>(input: &mut I) -> Result<Self, scale::Error> {
let kind = input.read_byte()?;
match kind {
0 => Ok(Self::Serai),
_ => Ok(ExternalCoin::decode(&mut [kind].as_slice())?.into()),
}
}
}
impl MaxEncodedLen for Coin {
fn max_encoded_len() -> usize {
1
}
}
impl EncodeLike for Coin {}
#[cfg(feature = "borsh")]
impl BorshSerialize for Coin {
fn serialize<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_all(&self.encode())
}
}
#[cfg(feature = "borsh")]
impl BorshDeserialize for Coin {
fn deserialize_reader<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {
let mut kind = [0; 1];
reader.read_exact(&mut kind)?;
Coin::decode(&mut kind.as_slice()).map_err(|_| std::io::Error::other("invalid format"))
}
}
impl From<ExternalCoin> for Coin {
fn from(coin: ExternalCoin) -> Self {
Coin::External(coin)
}
}
impl TryFrom<Coin> for ExternalCoin {
type Error = ();
fn try_from(coin: Coin) -> Result<Self, Self::Error> {
match coin {
Coin::Serai => Err(())?,
Coin::External(c) => Ok(c),
}
}
}
impl ExternalCoin {
pub fn network(&self) -> ExternalNetworkId {
match self {
ExternalCoin::Bitcoin => ExternalNetworkId::Bitcoin,
ExternalCoin::Ether | ExternalCoin::Dai => ExternalNetworkId::Ethereum,
ExternalCoin::Monero => ExternalNetworkId::Monero,
}
}
pub fn name(&self) -> &'static str {
match self {
ExternalCoin::Bitcoin => "Bitcoin",
ExternalCoin::Ether => "Ether",
ExternalCoin::Dai => "Dai Stablecoin",
ExternalCoin::Monero => "Monero",
}
}
pub fn symbol(&self) -> &'static str {
match self {
ExternalCoin::Bitcoin => "BTC",
ExternalCoin::Ether => "ETH",
ExternalCoin::Dai => "DAI",
ExternalCoin::Monero => "XMR",
}
}
pub fn decimals(&self) -> u32 {
match self {
// Ether and DAI have 18 decimals, yet we only track 8 in order to fit them within u64s
ExternalCoin::Bitcoin | ExternalCoin::Ether | ExternalCoin::Dai => 8,
ExternalCoin::Monero => 12,
}
}
}
impl Coin {
pub fn native() -> Coin {
Coin::Serai
}
pub fn network(&self) -> NetworkId {
match self {
Coin::Serai => NetworkId::Serai,
Coin::External(c) => c.network().into(),
}
}
pub fn name(&self) -> &'static str {
match self {
Coin::Serai => "Serai",
Coin::External(c) => c.name(),
}
}
pub fn symbol(&self) -> &'static str {
match self {
Coin::Serai => "SRI",
Coin::External(c) => c.symbol(),
}
}
pub fn decimals(&self) -> u32 {
match self {
Coin::Serai => 8,
Coin::External(c) => c.decimals(),
}
}
pub fn is_native(&self) -> bool {
matches!(self, Coin::Serai)
}
}
// Max of 8 coins per network
// Since Serai isn't interested in listing tokens, as on-chain DEXs will almost certainly have
// more liquidity, the only reason we'd have so many coins from a network is if there's no DEX
// on-chain
// There's probably no chain with so many *worthwhile* coins and no on-chain DEX
// This could probably be just 4, yet 8 is a hedge for the unforeseen
// If necessary, this can be increased with a fork
pub const MAX_COINS_PER_NETWORK: u32 = 8;
/// Network definition.
#[derive(Clone, PartialEq, Eq, Debug)]
#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Network {
#[cfg_attr(
feature = "borsh",
borsh(
serialize_with = "borsh_serialize_bounded_vec",
deserialize_with = "borsh_deserialize_bounded_vec"
)
)]
coins: BoundedVec<Coin, ConstU32<{ MAX_COINS_PER_NETWORK }>>,
}
#[cfg(feature = "std")]
impl Zeroize for Network {
fn zeroize(&mut self) {
for coin in self.coins.as_mut() {
coin.zeroize();
}
self.coins.truncate(0);
}
}
impl Network {
#[cfg(feature = "std")]
pub fn new(coins: Vec<Coin>) -> Result<Network, &'static str> {
if coins.is_empty() {
Err("no coins provided")?;
}
let network = coins[0].network();
for coin in coins.iter().skip(1) {
if coin.network() != network {
Err("coins have different networks")?;
}
}
Ok(Network {
coins: coins.try_into().map_err(|_| "coins length exceeds {MAX_COINS_PER_NETWORK}")?,
})
}
pub fn coins(&self) -> &[Coin] {
&self.coins
}
}

View File

@@ -0,0 +1,16 @@
use zeroize::Zeroize;
use borsh::{BorshSerialize, BorshDeserialize};
use crate::network_id::ExternalNetworkId;
/// A signal.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub enum Signal {
/// A signal to retire the current protocol.
Retire {
/// The protocol to retire in favor of.
in_favor_of: [u8; 32],
},
/// A signal to halt an external network.
Halt(ExternalNetworkId),
}

View File

@@ -0,0 +1,37 @@
use borsh::{io::*, BorshSerialize, BorshDeserialize};
use sp_core::{ConstU32, bounded::BoundedVec};
// TODO: Don't serialize this as a Vec<u8>. Shorten the length-prefix, technically encoding as an
// enum.
pub fn borsh_serialize_bitvec<W: Write>(
bitvec: &bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
writer: &mut W,
) -> Result<()> {
let vec: &[u8] = bitvec.as_raw_slice();
BorshSerialize::serialize(vec, writer)
}
pub fn borsh_deserialize_bitvec<R: Read>(
reader: &mut R,
) -> Result<bitvec::vec::BitVec<u8, bitvec::order::Lsb0>> {
let bitvec: alloc::vec::Vec<u8> = BorshDeserialize::deserialize_reader(reader)?;
Ok(bitvec::vec::BitVec::from_vec(bitvec))
}
type SerializeBoundedVecAs<T> = alloc::vec::Vec<T>;
pub fn borsh_serialize_bounded_vec<W: Write, T: BorshSerialize, const B: u32>(
bounded: &BoundedVec<T, ConstU32<B>>,
writer: &mut W,
) -> Result<()> {
let vec: &SerializeBoundedVecAs<T> = bounded.as_ref();
BorshSerialize::serialize(vec, writer)
}
pub fn borsh_deserialize_bounded_vec<R: Read, T: BorshDeserialize, const B: u32>(
reader: &mut R,
) -> Result<BoundedVec<T, ConstU32<B>>> {
let vec: SerializeBoundedVecAs<T> = BorshDeserialize::deserialize_reader(reader)?;
vec.try_into().map_err(|_| Error::new(ErrorKind::Other, "bound exceeded"))
}

View File

@@ -0,0 +1,76 @@
use alloc::vec::Vec;
use zeroize::Zeroize;
use borsh::{BorshSerialize, BorshDeserialize};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use crate::{
crypto::{Public, KeyPair},
network_id::{ExternalNetworkId, NetworkId},
};
mod slashes;
pub use slashes::*;
/// The type used to identify a specific session of validators.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct Session(pub u32);
/// The type used to identify a specific set of validators for an external network.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct ExternalValidatorSet {
/// The network this set of validators are for.
pub network: ExternalNetworkId,
/// Which session this set of validators is occuring during.
pub session: Session,
}
/// The type used to identify a specific set of validators.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct ValidatorSet {
/// The network this set of validators are for.
pub network: NetworkId,
/// Which session this set of validators is occuring during.
pub session: Session,
}
impl From<ExternalValidatorSet> for ValidatorSet {
fn from(set: ExternalValidatorSet) -> Self {
ValidatorSet { network: set.network.into(), session: set.session }
}
}
impl TryFrom<ValidatorSet> for ExternalValidatorSet {
type Error = ();
fn try_from(set: ValidatorSet) -> Result<Self, Self::Error> {
set.network.try_into().map(|network| ExternalValidatorSet { network, session: set.session })
}
}
impl ExternalValidatorSet {
/// The MuSig context for this validator set.
pub fn musig_context(&self) -> Vec<u8> {
borsh::to_vec(&(b"ValidatorSets-musig_key".as_ref(), self)).unwrap()
}
/// The MuSig public key for a validator set.
///
/// This function panics on invalid input, per the definition of `dkg::musig::musig_key`.
pub fn musig_key(&self, set_keys: &[Public]) -> Public {
let mut keys = Vec::new();
for key in set_keys {
keys.push(
<Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut key.0.as_ref())
.expect("invalid participant"),
);
}
Public(dkg::musig::musig_key::<Ristretto>(&self.musig_context(), &keys).unwrap().to_bytes())
}
/// The message for the `set_keys` signature.
pub fn set_keys_message(&self, key_pair: &KeyPair) -> Vec<u8> {
borsh::to_vec(&(b"ValidatorSets-set_keys", self, key_pair)).unwrap()
}
}

View File

@@ -0,0 +1,326 @@
use core::{num::NonZero, time::Duration};
use alloc::vec::Vec;
use zeroize::Zeroize;
use borsh::{BorshSerialize, BorshDeserialize};
use sp_core::{ConstU32, bounded::BoundedVec};
use crate::{
constants::{TARGET_BLOCK_TIME, SESSION_LENGTH, MAX_KEY_SHARES_PER_SET_U32},
balance::Amount,
};
/// Each slash point is equivalent to the downtime implied by missing a block proposal.
// Takes a NonZero<u16> so that the result is never 0, making this safe to divide by.
fn downtime_per_slash_point(validators: NonZero<u16>) -> Duration {
TARGET_BLOCK_TIME * u32::from(u16::from(validators))
}
/// A slash for a validator.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub enum Slash {
/// The slash points accumulated by this validator.
///
/// Each point is considered as `downtime_per_slash_point(validators)` downtime, where
/// `validators` is the amount of validators present in the set.
Points(u32),
/// A fatal slash due to fundamentally faulty behavior.
///
/// This should only be used for misbehavior with explicit evidence of impropriety. This should
/// not be used for liveness failures. The validator will be penalized all allocated stake.
Fatal,
}
impl Slash {
/// Calculate the penalty which should be applied to the validator.
///
/// Does not panic, even when compiled with checked arithmetic.
pub fn penalty(
self,
validators: NonZero<u16>,
allocated_stake: Amount,
session_rewards: Amount,
) -> Amount {
match self {
Self::Points(slash_points) => {
let mut slash_points = u64::from(slash_points);
// Do the logic with the stake in u128 to prevent overflow from multiplying u64s
let allocated_stake = u128::from(allocated_stake.0);
let session_rewards = u128::from(session_rewards.0);
// A Serai validator is allowed to be offline for an average of one day every two weeks
// with no additional penalty. They'll solely not earn rewards for the time they were
// offline.
const GRACE_WINDOW: Duration = Duration::from_secs(2 * 7 * 24 * 60 * 60);
const GRACE: Duration = Duration::from_secs(24 * 60 * 60);
// GRACE / GRACE_WINDOW is the fraction of the time a validator is allowed to be offline
// This means we want SESSION_LENGTH * (GRACE / GRACE_WINDOW), but with the parentheses
// moved so we don't incur the floordiv and hit 0
const PENALTY_FREE_DOWNTIME: Duration = Duration::from_secs(
(SESSION_LENGTH.as_secs() * GRACE.as_secs()) / GRACE_WINDOW.as_secs(),
);
let downtime_per_slash_point = downtime_per_slash_point(validators);
let penalty_free_slash_points =
PENALTY_FREE_DOWNTIME.as_secs() / downtime_per_slash_point.as_secs();
/*
In practice, the following means:
- Hours 0-12 are penalized as if they're hours 0-12.
- Hours 12-24 are penalized as if they're hours 12-36.
- Hours 24-36 are penalized as if they're hours 36-96.
- Hours 36-48 are penalized as if they're hours 96-168.
/* Commented, see below explanation of why.
- Hours 48-168 are penalized for 0-2% of stake.
- 168-336 hours of slashes, for a session only lasting 168 hours, is penalized for 2-10%
of stake.
This means a validator offline has to be offline for more than two days to start having
their stake slashed.
*/
This means a validator offline for two days will not earn any rewards for that session.
*/
const MULTIPLIERS: [u64; 4] = [1, 2, 5, 6];
let reward_slash = {
// In intervals of the penalty-free slash points, weight the slash points accrued
// The multiplier for the first interval is 1 as it's penalty-free
let mut weighted_slash_points_for_reward_slash = 0;
let mut total_possible_slash_points_for_rewards_slash = 0;
for mult in MULTIPLIERS {
let slash_points_in_interval = slash_points.min(penalty_free_slash_points);
weighted_slash_points_for_reward_slash += slash_points_in_interval * mult;
total_possible_slash_points_for_rewards_slash += penalty_free_slash_points * mult;
slash_points -= slash_points_in_interval;
}
// If there are no penalty-free slash points, and the validator was slashed, slash the
// entire reward
(u128::from(weighted_slash_points_for_reward_slash) * session_rewards)
.checked_div(u128::from(total_possible_slash_points_for_rewards_slash))
.unwrap_or({
if weighted_slash_points_for_reward_slash == 0 {
0
} else {
session_rewards
}
})
};
// Ensure the slash never exceeds the amount slashable (due to rounding errors)
let reward_slash = reward_slash.min(session_rewards);
/*
let slash_points_for_entire_session =
SESSION_LENGTH.as_secs() / downtime_per_slash_point.as_secs();
let offline_slash = {
// The amount of stake to slash for being offline
const MAX_STAKE_SLASH_PERCENTAGE_OFFLINE: u64 = 2;
let stake_to_slash_for_being_offline =
(allocated_stake * u128::from(MAX_STAKE_SLASH_PERCENTAGE_OFFLINE)) / 100;
// We already removed the slash points for `intervals * penalty_free_slash_points`
let slash_points_for_reward_slash =
penalty_free_slash_points * u64::try_from(MULTIPLIERS.len()).unwrap();
let slash_points_for_offline_stake_slash =
slash_points_for_entire_session.saturating_sub(slash_points_for_reward_slash);
let slash_points_in_interval = slash_points.min(slash_points_for_offline_stake_slash);
slash_points -= slash_points_in_interval;
// If there are no slash points for the entire session, don't slash stake
// That's an extreme edge case which shouldn't start penalizing validators
(u128::from(slash_points_in_interval) * stake_to_slash_for_being_offline)
.checked_div(u128::from(slash_points_for_offline_stake_slash))
.unwrap_or(0)
};
let disruptive_slash = {
/*
A validator may have more slash points than `slash_points_for_stake_slash` if they
didn't just accrue slashes for missing block proposals, yet also accrued slashes for
being disruptive. In that case, we still want to bound their slash points so they can't
somehow be slashed for 100% of their stake (which should only happen on a fatal slash).
*/
const MAX_STAKE_SLASH_PERCENTAGE_DISRUPTIVE: u64 = 8;
let stake_to_slash_for_being_disruptive =
(allocated_stake * u128::from(MAX_STAKE_SLASH_PERCENTAGE_DISRUPTIVE)) / 100;
// Follows the offline slash for `unwrap_or` policy
(u128::from(slash_points.min(slash_points_for_entire_session)) *
stake_to_slash_for_being_disruptive)
.checked_div(u128::from(slash_points_for_entire_session))
.unwrap_or(0)
};
*/
/*
We do not slash for being offline/disruptive at this time. Doing so allows an adversary
to DoS nodes to not just take them offline, yet also take away their stake. This isn't
preferable to the increased incentive to properly maintain a node when the rewards should
already be sufficient for that purpose.
Validators also shouldn't be able to be so disruptive due to their limiting upon
disruption *while its ongoing*. Slashes as a post-response, while an arguably worthwhile
economic penalty, can never be a response in the moment (as necessary to actually handle
the disruption).
If stake slashing was to be re-enabled, the percentage of stake which is eligible for
slashing should be variable to how close we are to losing liveness. This would mean if
less than 10% of validators are offline, no stake is slashes. If 10% are, 2% is eligible.
If 20% are, 5% is eligible. If 30% are, 10% is eligible.
(or similar)
This would mean that a DoS is insufficient to cause a validator to lose their stake.
Instead, a coordinated DoS against multiple Serai validators would be needed,
strengthening our assumptions.
*/
let offline_slash = 0;
let disruptive_slash = 0;
let stake_slash = (offline_slash + disruptive_slash).min(allocated_stake);
let penalty_u128 = reward_slash + stake_slash;
// saturating_into
Amount(u64::try_from(penalty_u128).unwrap_or(u64::MAX))
}
// On fatal slash, their entire stake is removed
Self::Fatal => Amount(allocated_stake.0 + session_rewards.0),
}
}
}
/// A report of all slashes incurred for a `ValidatorSet`.
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct SlashReport(
#[borsh(
serialize_with = "crate::borsh_serialize_bounded_vec",
deserialize_with = "crate::borsh_deserialize_bounded_vec"
)]
pub BoundedVec<Slash, ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 }>>,
);
/// An error when converting from a `Vec`.
pub enum FromVecError {
/// The source `Vec` was too long to be converted.
TooLong,
}
impl TryFrom<Vec<Slash>> for SlashReport {
type Error = FromVecError;
fn try_from(slashes: Vec<Slash>) -> Result<SlashReport, FromVecError> {
slashes.try_into().map(Self).map_err(|_| FromVecError::TooLong)
}
}
impl zeroize::Zeroize for SlashReport {
fn zeroize(&mut self) {
for slash in self.0.as_mut() {
slash.zeroize();
}
}
}
impl SlashReport {
/// The message to sign when publishing this SlashReport.
// This is assumed binding to the ValidatorSet via the key signed with
pub fn report_slashes_message(&self) -> Vec<u8> {
const DST: &[u8] = b"ValidatorSets-report_slashes";
let mut buf = Vec::with_capacity(
DST.len() + core::mem::size_of::<u32>() + (self.0.len() * core::mem::size_of::<Slash>()),
);
(DST, self).serialize(&mut buf).unwrap();
buf
}
}
#[test]
fn test_penalty() {
for validators in [1, 50, 100, crate::constants::MAX_KEY_SHARES_PER_SET] {
let validators = NonZero::new(validators).unwrap();
// 12 hours of slash points should only decrease the rewards proportionately
let twelve_hours_of_slash_points =
u32::try_from((12 * 60 * 60) / downtime_per_slash_point(validators).as_secs()).unwrap();
assert_eq!(
Slash::Points(twelve_hours_of_slash_points).penalty(
validators,
Amount(u64::MAX),
Amount(168)
),
Amount(12)
);
// 24 hours of slash points should be counted as 36 hours
assert_eq!(
Slash::Points(2 * twelve_hours_of_slash_points).penalty(
validators,
Amount(u64::MAX),
Amount(168)
),
Amount(36)
);
// 36 hours of slash points should be counted as 96 hours
assert_eq!(
Slash::Points(3 * twelve_hours_of_slash_points).penalty(
validators,
Amount(u64::MAX),
Amount(168)
),
Amount(96)
);
// 48 hours of slash points should be counted as 168 hours
assert_eq!(
Slash::Points(4 * twelve_hours_of_slash_points).penalty(
validators,
Amount(u64::MAX),
Amount(168)
),
Amount(168)
);
/*
// A full week of slash points should slash 2%
let week_of_slash_points = 14 * twelve_hours_of_slash_points;
assert_eq!(
Slash::Points(week_of_slash_points).penalty(validators, Amount(1000), Amount(168)),
Amount(20 + 168)
);
// Two weeks of slash points should slash 10%
assert_eq!(
Slash::Points(2 * week_of_slash_points).penalty(validators, Amount(1000), Amount(168)),
Amount(100 + 168)
);
// Anything greater should still only slash 10%
assert_eq!(
Slash::Points(u32::MAX).penalty(validators, Amount(1000), Amount(168)),
Amount(100 + 168)
);
*/
// Anything greater should still only slash the rewards
assert_eq!(
Slash::Points(u32::MAX).penalty(validators, Amount(u64::MAX), Amount(168)),
Amount(168)
);
}
}
#[test]
fn no_overflow() {
// Test with u16::MAX for validators, maximizing the downtime each slash point represents
Slash::Points(u32::MAX).penalty(
NonZero::new(u16::MAX).unwrap(),
Amount(u64::MAX),
Amount(u64::MAX),
);
// Test with 1 for validators, in case validators is inversely correlated
Slash::Points(u32::MAX).penalty(NonZero::new(1).unwrap(), Amount(u64::MAX), Amount(u64::MAX));
}