Redo primitives, abi

Consolidates all primitives into a single crate. We didn't benefit from its
fragmentation. I'm hesitant to say the new internal-organization is better (it
may be just as clunky), but it's at least in a single crate (not spread out
over micro-crates).

The ABI is the most distinct. We now entirely own it. Block header hashes don't
directly commit to any BABE data (avoiding potentially ~4 KB headers upon
session changes), and are hashed as borsh (a more widely used codec than
SCALE). There are still Substrate variants, using SCALE and with the BABE data,
but they're prunable from a protocol design perspective.

Defines a transaction as a Vec of Calls, allowing atomic operations.
This commit is contained in:
Luke Parker
2025-02-12 03:41:50 -05:00
parent 2f8ce15a92
commit 776e417fd2
49 changed files with 2225 additions and 2092 deletions

View File

@@ -0,0 +1,171 @@
use alloc::{vec, vec::Vec};
use zeroize::Zeroize;
use borsh::{io, BorshSerialize, BorshDeserialize};
use crate::{
BlockHash, crypto::Signature, network_id::ExternalNetworkId,
instructions::InInstructionWithBalance,
};
/*
`Batch`s have a size limit we enforce upon deserialization.
The naive solution would be to deserialize, then serialize, and check the serialized length is
less than the maximum. This performs a redundant allocation and is computationally non-trivial.
The next solution would be to wrap the deserialization with a `Cursor` so one can check the
amount read, yet `Cursor` isn't available under no-std.
We solve this by manually implementing a `Cursor`-equivalent (for our purposes) which let us
check the total amount read is `<=` the maximum size.
The issue is we need every call to `BorshDeserialize::deserialize_reader` to use our custom
reader, which requires manually implementing it, which means we can't use the derive macro and
can't ensure it follows the borsh specification. We solve this by generating two identical
structs, one internal with a derived `BorshDeserialize::deserialize_reader`, one public with a
manually implemented `BorshDeserialize::deserialize_reader` wrapping the internal struct's. This
lets us ensure the correct reader is used and error if the size limit is hit, while still using
a derived `BorshDeserialize` which will definitively be compliant.
*/
macro_rules! batch_struct {
(#[$derive: meta] $pub: vis $name: ident) => {
/// A batch of `InInstruction`s to publish onto Serai.
#[allow(clippy::needless_pub_self)]
#[$derive]
$pub struct $name {
/// The size this will be once encoded.
#[allow(dead_code)] // This is unused for the `BatchDeserialize` instance
#[borsh(skip)]
encoded_size: usize,
/// The network this batch of instructions is coming from.
network: ExternalNetworkId,
/// The ID of this `Batch`.
id: u32,
/// The hash of the external network's block which produced this `Batch`.
external_network_block_hash: BlockHash,
/// The instructions to execute.
instructions: Vec<InInstructionWithBalance>,
}
}
}
batch_struct!(#[derive(BorshDeserialize)] pub(self) BatchDeserialize);
batch_struct!(#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize)] pub Batch);
impl BorshDeserialize for Batch {
fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
// A custom reader which enforces the `Batch`'s max size limit
struct SizeLimitReader<'a, R: io::Read> {
reader: &'a mut R,
read: usize,
}
impl<R: io::Read> io::Read for SizeLimitReader<'_, R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let read = self.reader.read(buf)?;
self.read = self.read.saturating_add(read);
if self.read > Batch::MAX_SIZE {
Err(io::Error::new(io::ErrorKind::Other, "Batch size exceeded maximum"))?;
}
Ok(read)
}
}
let mut size_limit_reader = SizeLimitReader { reader, read: 0 };
let BatchDeserialize {
encoded_size: _,
network,
id,
external_network_block_hash,
instructions,
} = <_>::deserialize_reader(&mut size_limit_reader)?;
Ok(Batch {
encoded_size: size_limit_reader.read,
network,
id,
external_network_block_hash,
instructions,
})
}
}
/// An error incurred while pushing an instruction onto a `Batch`.
pub enum PushInstructionError {
/// The Batch's max size was exceeded.
MaxSizeExceeded,
}
impl Batch {
/// The maximum size of a valid `Batch`'s encoding.
const MAX_SIZE: usize = 32_768;
/// Create a new Batch.
pub fn new(network: ExternalNetworkId, id: u32, external_network_block_hash: BlockHash) -> Self {
let mut batch =
Batch { encoded_size: 0, network, id, external_network_block_hash, instructions: vec![] };
batch.encoded_size = borsh::to_vec(&batch).unwrap().len();
batch
}
/// Push an `InInstructionWithBalance` onto this `Batch`.
pub fn push_instruction(
&mut self,
instruction: InInstructionWithBalance,
) -> Result<(), PushInstructionError> {
if (self.encoded_size.saturating_add(borsh::to_vec(&instruction).unwrap().len())) >
Self::MAX_SIZE
{
Err(PushInstructionError::MaxSizeExceeded)?;
}
self.instructions.push(instruction);
Ok(())
}
/// The message to sign when publishing this Batch.
pub fn publish_batch_message(&self) -> Vec<u8> {
const DST: &[u8] = b"InInstructions-publish_batch";
// We don't estimate the size of this Batch, we just reserve a small constant capacity
let mut buf = Vec::with_capacity(1024);
(DST, self).serialize(&mut buf).unwrap();
buf
}
/// The network this batch of instructions is coming from.
pub fn network(&self) -> ExternalNetworkId {
self.network
}
/// The ID of this `Batch`.
pub fn id(&self) -> u32 {
self.id
}
/// The hash of the external network's block which produced this `Batch`.
pub fn external_network_block_hash(&self) -> BlockHash {
self.external_network_block_hash
}
/// The instructions within this `Batch`.
pub fn instructions(&self) -> &[InInstructionWithBalance] {
&self.instructions
}
}
/// A signed batch.
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct SignedBatch {
/// The signed batch.
pub batch: Batch,
/// The signature.
pub signature: Signature,
}
#[cfg(feature = "std")]
impl Zeroize for SignedBatch {
fn zeroize(&mut self) {
self.batch.zeroize();
self.signature.0.as_mut().zeroize();
}
}

View File

@@ -0,0 +1,86 @@
use zeroize::Zeroize;
use borsh::{BorshSerialize, BorshDeserialize};
use crate::{
address::{SeraiAddress, ExternalAddress},
balance::{Amount, ExternalBalance, Balance},
instructions::OutInstruction,
};
mod batch;
pub use batch::*;
/// The destination for coins.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub enum Destination {
/// The Serai address to transfer the coins to.
Serai(SeraiAddress),
/// Burn the coins with the included `OutInstruction`.
Burn(OutInstruction),
}
/// An instruction on how to handle coins in.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub enum InInstruction {
/// Add the coins as genesis liquidity.
GenesisLiquidity(SeraiAddress),
/// Use the coins to swap to staked SRI, pre-economic security.
SwapToStakedSri {
/// The validator to allocate the stake to.
validator: SeraiAddress,
/// The minimum amount of staked SRI to swap to.
minimum: Amount,
},
/// Transfer the coins to a Serai address, swapping some for SRI.
TransferWithSwap {
/// The Serai address to transfer the coins to, after swapping some.
to: SeraiAddress,
/// The maximum amount of coins to swap for the intended amount of SRI.
maximum_swap: Amount,
/// The SRI amount to swap some of the coins for.
sri: Amount,
},
/// Transfer the coins to a Serai address.
Transfer {
/// The Serai address to transfer the coins to.
to: SeraiAddress,
},
/// Swap part of the coins to SRI and add the coins as liquidity.
SwapAndAddLiquidity {
/// The owner to-be of the added liquidity.
owner: SeraiAddress,
/// The amount of SRI to add within the liquidity position.
sri: Amount,
/// The minimum amount of the coin to add as liquidity.
minimum_coin: Amount,
/// The amount of SRI to swap to and send to the owner to-be to pay for transactions on Serai.
sri_for_fees: Amount,
},
/// Swap the coins.
Swap {
/// The minimum balance to receive.
minimum_out: Balance,
/// The destination to transfer the balance to.
///
/// If `Destination::Burn`, the balance out will be burnt with the included `OutInstruction`.
destination: Destination,
},
}
/// An instruction on how to handle coins in with the address to return the coins to on error.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct RefundableInInstruction {
/// The instruction on how to handle coins in.
pub instruction: InInstruction,
/// The address to return the coins to on error.
pub return_address: Option<ExternalAddress>,
}
/// An instruction on how to handle coins in with the balance to use for the coins in.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct InInstructionWithBalance {
/// The instruction on how to handle coins in.
pub instruction: OutInstruction,
/// The coins in.
pub balance: ExternalBalance,
}

View File

@@ -0,0 +1,5 @@
mod r#in;
pub use r#in::{InInstruction, InInstructionWithBalance, PushInstructionError, Batch, SignedBatch};
mod out;
pub use out::{OutInstruction, OutInstructionWithBalance};

View File

@@ -0,0 +1,21 @@
use zeroize::Zeroize;
use borsh::{BorshSerialize, BorshDeserialize};
use crate::{address::ExternalAddress, balance::ExternalBalance};
/// An instruction on how to transfer coins out.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub enum OutInstruction {
/// Transfer to the specified address.
Transfer(ExternalAddress),
}
/// An instruction on how to transfer coins out with the balance to use for the transfer out.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
pub struct OutInstructionWithBalance {
/// The instruction on how to transfer coins out.
pub instruction: OutInstruction,
/// The balance to use for the transfer out.
pub balance: ExternalBalance,
}