mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
Populate UnbalancedMerkleTrees in headers
This commit is contained in:
@@ -8,14 +8,14 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
/// The tag for the hash of a transaction's event, forming a leaf of the Merkle tree of its events.
|
/// The tag for the hash of a transaction's event, forming a leaf of the Merkle tree of its events.
|
||||||
pub const EVENTS_COMMITMENT_TRANSACTION_EVENT_TAG: u8 = 0;
|
pub const TRANSACTION_EVENTS_COMMITMENT_LEAF_TAG: u8 = 0;
|
||||||
/// The tag for the branch hashes of transaction events.
|
/// The tag for the branch hashes of transaction events.
|
||||||
pub const EVENTS_COMMITMENT_TRANSACTION_EVENTS_TAG: u8 = 1;
|
pub const TRANSACTION_EVENTS_COMMITMENT_BRANCH_TAG: u8 = 1;
|
||||||
/// The tag for the hash of a transaction's hash and its events' Merkle root, forming a leaf of the
|
/// The tag for the hash of a transaction's hash and its events' Merkle root, forming a leaf of the
|
||||||
/// Merkle tree which is the events commitment.
|
/// Merkle tree which is the events commitment.
|
||||||
pub const EVENTS_COMMITMENT_TRANSACTION_TAG: u8 = 2;
|
pub const EVENTS_COMMITMENT_LEAF_TAG: u8 = 2;
|
||||||
/// The tag for for the branch hashes of the Merkle tree which is the events commitments.
|
/// The tag for for the branch hashes of the Merkle tree which is the events commitments.
|
||||||
pub const EVENTS_COMMITMENT_TRANSACTIONS_TAG: u8 = 3;
|
pub const EVENTS_COMMITMENT_BRANCH_TAG: u8 = 3;
|
||||||
|
|
||||||
/// A V1 header for a block.
|
/// A V1 header for a block.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
@@ -25,13 +25,27 @@ pub struct HeaderV1 {
|
|||||||
/// The genesis block has number 0.
|
/// The genesis block has number 0.
|
||||||
pub number: u64,
|
pub number: u64,
|
||||||
/// The commitment to the DAG this header builds upon.
|
/// The commitment to the DAG this header builds upon.
|
||||||
pub builds_upon: BlockHash,
|
///
|
||||||
|
/// This is defined as an unbalanced Merkle tree so light clients may sync one header per epoch,
|
||||||
|
/// and then may prove the inclusion of any header in logarithmic depth (without providing the
|
||||||
|
/// entire header chain).
|
||||||
|
///
|
||||||
|
/// Alternative popular options would be a Merkle Mountain Range, which makes more recent blocks
|
||||||
|
/// cheaper to prove at the sacrifice of older blocks being more expensive to prove. An MMR isn't
|
||||||
|
/// used in order to minimize the protocol's surface area. Additionally, even though the
|
||||||
|
/// unbalanced Merkle tree doesn't achieve such notably short paths for recent blocks, it does
|
||||||
|
/// inherently provide lower-depth paths to more recent items *on imbalance*.
|
||||||
|
pub builds_upon: UnbalancedMerkleTree,
|
||||||
/// The UNIX time in milliseconds this block was created at.
|
/// The UNIX time in milliseconds this block was created at.
|
||||||
pub unix_time_in_millis: u64,
|
pub unix_time_in_millis: u64,
|
||||||
/// The commitment to the transactions within this block.
|
/// The commitment to the transactions within this block.
|
||||||
// TODO: Some transactions don't have unique hashes due to assuming validators set unique keys
|
pub transactions_commitment: UnbalancedMerkleTree,
|
||||||
pub transactions_commitment: [u8; 32],
|
|
||||||
/// The commitment to the events within this block.
|
/// The commitment to the events within this block.
|
||||||
|
///
|
||||||
|
/// The leaves of this tree will be of the form
|
||||||
|
/// `(EVENTS_COMMITMENT_LEAF_TAG, transaction hash, transaction's events' Merkle tree root)`.
|
||||||
|
/// A transaction may have the same event multiple times, yet an event may be uniquely identified
|
||||||
|
/// by its path within the tree.
|
||||||
pub events_commitment: UnbalancedMerkleTree,
|
pub events_commitment: UnbalancedMerkleTree,
|
||||||
/// A commitment to the consensus data used to justify adding this block to the blockchain.
|
/// A commitment to the consensus data used to justify adding this block to the blockchain.
|
||||||
pub consensus_commitment: [u8; 32],
|
pub consensus_commitment: [u8; 32],
|
||||||
@@ -52,17 +66,23 @@ impl Header {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Get the commitment to the DAG this header builds upon.
|
/// Get the commitment to the DAG this header builds upon.
|
||||||
pub fn builds_upon(&self) -> BlockHash {
|
pub fn builds_upon(&self) -> UnbalancedMerkleTree {
|
||||||
match self {
|
match self {
|
||||||
Header::V1(HeaderV1 { builds_upon, .. }) => *builds_upon,
|
Header::V1(HeaderV1 { builds_upon, .. }) => *builds_upon,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// The commitment to the transactions within this block.
|
/// The commitment to the transactions within this block.
|
||||||
pub fn transactions_commitment(&self) -> [u8; 32] {
|
pub fn transactions_commitment(&self) -> UnbalancedMerkleTree {
|
||||||
match self {
|
match self {
|
||||||
Header::V1(HeaderV1 { transactions_commitment, .. }) => *transactions_commitment,
|
Header::V1(HeaderV1 { transactions_commitment, .. }) => *transactions_commitment,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/// The commitment to the events within this block.
|
||||||
|
pub fn events_commitment(&self) -> UnbalancedMerkleTree {
|
||||||
|
match self {
|
||||||
|
Header::V1(HeaderV1 { events_commitment, .. }) => *events_commitment,
|
||||||
|
}
|
||||||
|
}
|
||||||
/// Get the hash of the header.
|
/// Get the hash of the header.
|
||||||
pub fn hash(&self) -> BlockHash {
|
pub fn hash(&self) -> BlockHash {
|
||||||
BlockHash(sp_core::blake2_256(&borsh::to_vec(self).unwrap()))
|
BlockHash(sp_core::blake2_256(&borsh::to_vec(self).unwrap()))
|
||||||
@@ -96,21 +116,33 @@ mod substrate {
|
|||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
/// The digest for all of the Serai-specific header fields.
|
/// The digest for all of the Serai-specific header fields added before execution of the block.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
|
||||||
pub struct SeraiDigest {
|
pub struct SeraiPreExecutionDigest {
|
||||||
/// The commitment to the DAG this header builds upon.
|
|
||||||
pub builds_upon: BlockHash,
|
|
||||||
/// The UNIX time in milliseconds this block was created at.
|
/// The UNIX time in milliseconds this block was created at.
|
||||||
pub unix_time_in_millis: u64,
|
pub unix_time_in_millis: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The digest for all of the Serai-specific header fields determined during execution of the
|
||||||
|
/// block.
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub struct SeraiExecutionDigest {
|
||||||
|
/// The commitment to the DAG this header builds upon.
|
||||||
|
pub builds_upon: UnbalancedMerkleTree,
|
||||||
/// The commitment to the transactions within this block.
|
/// The commitment to the transactions within this block.
|
||||||
pub transactions_commitment: [u8; 32],
|
pub transactions_commitment: UnbalancedMerkleTree,
|
||||||
/// The commitment to the events within this block.
|
/// The commitment to the events within this block.
|
||||||
pub events_commitment: UnbalancedMerkleTree,
|
pub events_commitment: UnbalancedMerkleTree,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SeraiDigest {
|
impl SeraiPreExecutionDigest {
|
||||||
const CONSENSUS_ID: [u8; 4] = *b"SRID";
|
/// The consensus ID for a Serai pre-execution digest.
|
||||||
|
pub const CONSENSUS_ID: [u8; 4] = *b"SRIP";
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SeraiExecutionDigest {
|
||||||
|
/// The consensus ID for a Serai execution digest.
|
||||||
|
pub const CONSENSUS_ID: [u8; 4] = *b"SRIE";
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The consensus data for a V1 header.
|
/// The consensus data for a V1 header.
|
||||||
@@ -149,34 +181,43 @@ mod substrate {
|
|||||||
fn from(header: &SubstrateHeader) -> Self {
|
fn from(header: &SubstrateHeader) -> Self {
|
||||||
match header {
|
match header {
|
||||||
SubstrateHeader::V1(header) => {
|
SubstrateHeader::V1(header) => {
|
||||||
let digest =
|
let mut pre_execution_digest = None;
|
||||||
header.consensus.digest.logs().iter().find_map(|digest_item| match digest_item {
|
let mut execution_digest = None;
|
||||||
|
for log in header.consensus.digest.logs() {
|
||||||
|
match log {
|
||||||
DigestItem::PreRuntime(consensus, encoded)
|
DigestItem::PreRuntime(consensus, encoded)
|
||||||
if *consensus == SeraiDigest::CONSENSUS_ID =>
|
if *consensus == SeraiExecutionDigest::CONSENSUS_ID =>
|
||||||
{
|
{
|
||||||
SeraiDigest::deserialize_reader(&mut encoded.as_slice()).ok()
|
pre_execution_digest =
|
||||||
|
SeraiPreExecutionDigest::deserialize_reader(&mut encoded.as_slice()).ok();
|
||||||
|
}
|
||||||
|
DigestItem::Consensus(consensus, encoded)
|
||||||
|
if *consensus == SeraiExecutionDigest::CONSENSUS_ID =>
|
||||||
|
{
|
||||||
|
execution_digest =
|
||||||
|
SeraiExecutionDigest::deserialize_reader(&mut encoded.as_slice()).ok();
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
_ => None,
|
|
||||||
});
|
|
||||||
Header::V1(HeaderV1 {
|
Header::V1(HeaderV1 {
|
||||||
number: header.number,
|
number: header.number,
|
||||||
builds_upon: digest
|
builds_upon: execution_digest
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|digest| digest.builds_upon)
|
.map(|digest| digest.builds_upon)
|
||||||
.unwrap_or(BlockHash::from([0; 32])),
|
.unwrap_or(UnbalancedMerkleTree::EMPTY),
|
||||||
unix_time_in_millis: digest
|
unix_time_in_millis: pre_execution_digest
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|digest| digest.unix_time_in_millis)
|
.map(|digest| digest.unix_time_in_millis)
|
||||||
.unwrap_or(0),
|
.unwrap_or(0),
|
||||||
transactions_commitment: digest
|
transactions_commitment: execution_digest
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|digest| digest.transactions_commitment)
|
.map(|digest| digest.transactions_commitment)
|
||||||
.unwrap_or([0; 32]),
|
.unwrap_or(UnbalancedMerkleTree::EMPTY),
|
||||||
events_commitment: digest
|
events_commitment: execution_digest
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|digest| digest.events_commitment)
|
.map(|digest| digest.events_commitment)
|
||||||
.unwrap_or(UnbalancedMerkleTree::EMPTY),
|
.unwrap_or(UnbalancedMerkleTree::EMPTY),
|
||||||
// TODO: This hashes the digest *including seals*, doesn't it?
|
|
||||||
consensus_commitment: sp_core::blake2_256(&header.consensus.encode()),
|
consensus_commitment: sp_core::blake2_256(&header.consensus.encode()),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -64,84 +64,6 @@ impl UnbalancedMerkleTree {
|
|||||||
}
|
}
|
||||||
Self { root: current[0] }
|
Self { root: current[0] }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculate the Merkle tree root for a list of hashes, passed in as their SCALE encoding.
|
|
||||||
///
|
|
||||||
/// This method does not perform any allocations and is quite optimized. It is intended to be
|
|
||||||
/// called from within the Substrate runtime, a resource-constrained environment. It does take in
|
|
||||||
/// an owned Vec, despite solely using it as a mutable slice, due to the trashing of its content.
|
|
||||||
///
|
|
||||||
/// Please see the documentation of `UnbalancedMerkleTree` and `UnbalancedMerkleTree::new` for
|
|
||||||
/// context on structure.
|
|
||||||
///
|
|
||||||
/// A SCALE encoding will be length-prefixed with a Compact number per
|
|
||||||
/// https://docs.polkadot.com/polkadot-protocol/basics/data-encoding/#data-types.
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub fn from_scale_encoded_list_of_hashes(tag: u8, encoding: Vec<u8>) -> Self {
|
|
||||||
let mut hashes = encoding;
|
|
||||||
|
|
||||||
// Learn the length of the length prefix
|
|
||||||
let length_prefix_len = {
|
|
||||||
let mut slice = hashes.as_slice();
|
|
||||||
<scale::Compact<u32> as scale::Decode>::skip(&mut slice).unwrap();
|
|
||||||
hashes.len() - slice.len()
|
|
||||||
};
|
|
||||||
|
|
||||||
// We calculate the hashes in-place to avoid redundant allocations
|
|
||||||
let mut hashes = hashes.as_mut_slice();
|
|
||||||
|
|
||||||
let mut amount_of_hashes;
|
|
||||||
while {
|
|
||||||
amount_of_hashes = (hashes.len() - length_prefix_len) / 32;
|
|
||||||
amount_of_hashes > 1
|
|
||||||
} {
|
|
||||||
let complete_pairs = amount_of_hashes / 2;
|
|
||||||
for i in 0 .. complete_pairs {
|
|
||||||
// We hash the i'th pair of 32-byte elements
|
|
||||||
let hash = {
|
|
||||||
// The starting position of these elements
|
|
||||||
let start = length_prefix_len + ((2 * i) * 32);
|
|
||||||
/*
|
|
||||||
We write the tag to the byte before this pair starts.
|
|
||||||
|
|
||||||
In the case of the first pair, this corrupts a byte of the length prefix.
|
|
||||||
|
|
||||||
In the case of the nth pair, this corrupts the prior-hashed pair's second element.
|
|
||||||
This is safe as it was already hashed and the data there won't be read again. While
|
|
||||||
we do write, and later read, the carried hash outputs to this buffer, those will
|
|
||||||
always be written to either a pair's first element or a (n * prior-)hashed pair's
|
|
||||||
second element (where n > 2), never the immediately preceding pair's second element.
|
|
||||||
*/
|
|
||||||
hashes[start - 1] = tag;
|
|
||||||
sp_core::blake2_256(&hashes[(start - 1) .. (start + 64)])
|
|
||||||
};
|
|
||||||
// We save this hash to the i'th position
|
|
||||||
{
|
|
||||||
let start = length_prefix_len + (i * 32);
|
|
||||||
hashes[start .. (start + 32)].copy_from_slice(hash.as_slice());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut end_of_hashes_on_next_layer = length_prefix_len + (complete_pairs * 32);
|
|
||||||
|
|
||||||
// If there was an odd hash which wasn't hashed on this layer, carry it
|
|
||||||
if (amount_of_hashes % 2) == 1 {
|
|
||||||
let mut hash = [0xff; 32];
|
|
||||||
hash.copy_from_slice(&hashes[(hashes.len() - 32) ..]);
|
|
||||||
|
|
||||||
let start = end_of_hashes_on_next_layer;
|
|
||||||
end_of_hashes_on_next_layer = start + 32;
|
|
||||||
hashes[start .. end_of_hashes_on_next_layer].copy_from_slice(&hash);
|
|
||||||
}
|
|
||||||
|
|
||||||
hashes = &mut hashes[.. end_of_hashes_on_next_layer];
|
|
||||||
}
|
|
||||||
|
|
||||||
match hashes[length_prefix_len ..].try_into() {
|
|
||||||
Ok(root) => Self { root },
|
|
||||||
Err(_) => Self::EMPTY,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An unbalanced Merkle tree which is incrementally created.
|
/// An unbalanced Merkle tree which is incrementally created.
|
||||||
@@ -177,6 +99,8 @@ impl IncrementalUnbalancedMerkleTree {
|
|||||||
/// Append a leaf to this merkle tree.
|
/// Append a leaf to this merkle tree.
|
||||||
///
|
///
|
||||||
/// The conditions on this leaf are the same as defined by `UnbalancedMerkleTree::new`.
|
/// The conditions on this leaf are the same as defined by `UnbalancedMerkleTree::new`.
|
||||||
|
///
|
||||||
|
/// This will not calculate any hashes not necessary for the eventual root.
|
||||||
pub fn append(&mut self, tag: u8, leaf: [u8; 32]) {
|
pub fn append(&mut self, tag: u8, leaf: [u8; 32]) {
|
||||||
self.branches.push((1, leaf));
|
self.branches.push((1, leaf));
|
||||||
self.reduce(tag);
|
self.reduce(tag);
|
||||||
|
|||||||
@@ -18,6 +18,8 @@ ignored = ["scale", "scale-info"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"] }
|
||||||
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] }
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] }
|
||||||
scale-info = { version = "2", default-features = false, features = ["derive"] }
|
scale-info = { version = "2", default-features = false, features = ["derive"] }
|
||||||
|
|
||||||
|
|||||||
@@ -1,18 +1,172 @@
|
|||||||
#[frame_support::pallet]
|
use core::marker::PhantomData;
|
||||||
mod core_pallet {
|
use alloc::{vec, vec::Vec};
|
||||||
use ::alloc::*;
|
|
||||||
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use frame_support::pallet_prelude::*;
|
use frame_support::pallet_prelude::*;
|
||||||
|
|
||||||
|
use serai_abi::{
|
||||||
|
primitives::merkle::{UnbalancedMerkleTree, IncrementalUnbalancedMerkleTree as Iumt},
|
||||||
|
*,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct IncrementalUnbalancedMerkleTree<
|
||||||
|
T: frame_support::StorageValue<Vec<u8>, Query = Option<Vec<u8>>>,
|
||||||
|
const BRANCH_TAG: u8 = 1,
|
||||||
|
const LEAF_TAG: u8 = 0,
|
||||||
|
>(PhantomData<T>);
|
||||||
|
impl<
|
||||||
|
T: frame_support::StorageValue<Vec<u8>, Query = Option<Vec<u8>>>,
|
||||||
|
const BRANCH_TAG: u8,
|
||||||
|
const LEAF_TAG: u8,
|
||||||
|
> IncrementalUnbalancedMerkleTree<T, BRANCH_TAG, LEAF_TAG>
|
||||||
|
{
|
||||||
|
/// Create a new Merkle tree, expecting there to be none already present.
|
||||||
|
///
|
||||||
|
/// Panics if a Merkle tree was already present.
|
||||||
|
fn new_expecting_none() {
|
||||||
|
T::mutate(|value| {
|
||||||
|
assert!(value.is_none());
|
||||||
|
*value = Some(borsh::to_vec(&Iumt::new()).unwrap());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
/// Append a leaf to the Merkle tree.
|
||||||
|
///
|
||||||
|
/// Panics if no Merkle tree was present.
|
||||||
|
fn append<L: BorshSerialize>(leaf: &L) {
|
||||||
|
let leaf = sp_core::blake2_256(&borsh::to_vec(&(LEAF_TAG, leaf)).unwrap());
|
||||||
|
|
||||||
|
T::mutate(|value| {
|
||||||
|
let mut tree = Iumt::deserialize_reader(&mut value.as_ref().unwrap().as_slice()).unwrap();
|
||||||
|
tree.append(BRANCH_TAG, leaf);
|
||||||
|
*value = Some(borsh::to_vec(&tree).unwrap());
|
||||||
|
})
|
||||||
|
}
|
||||||
|
/// Get the unbalanced merkle tree.
|
||||||
|
///
|
||||||
|
/// Panics if no Merkle tree was present.
|
||||||
|
fn get() -> UnbalancedMerkleTree {
|
||||||
|
Iumt::deserialize_reader(&mut T::get().unwrap().as_slice()).unwrap().calculate(BRANCH_TAG)
|
||||||
|
}
|
||||||
|
/// Take the Merkle tree.
|
||||||
|
///
|
||||||
|
/// Panics if no Merkle tree was present.
|
||||||
|
fn take() -> UnbalancedMerkleTree {
|
||||||
|
T::mutate(|value| {
|
||||||
|
let tree = Iumt::deserialize_reader(&mut value.as_ref().unwrap().as_slice()).unwrap();
|
||||||
|
*value = None;
|
||||||
|
tree.calculate(BRANCH_TAG)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[frame_support::pallet]
|
||||||
|
mod pallet {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
/// The set of all blocks prior added to the blockchain.
|
||||||
|
#[pallet::storage]
|
||||||
|
pub type Blocks<T: Config> = StorageMap<_, Identity, T::Hash, (), OptionQuery>;
|
||||||
|
/// The Merkle tree of all blocks added to the blockchain.
|
||||||
|
#[pallet::storage]
|
||||||
|
#[pallet::unbounded]
|
||||||
|
pub(super) type BlocksCommitment<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
|
||||||
|
pub(super) type BlocksCommitmentMerkle<T> = IncrementalUnbalancedMerkleTree<BlocksCommitment<T>>;
|
||||||
|
|
||||||
|
/// The Merkle tree of all transactions within the current block.
|
||||||
|
#[pallet::storage]
|
||||||
|
#[pallet::unbounded]
|
||||||
|
pub(super) type BlockTransactionsCommitment<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
|
||||||
|
pub(super) type BlockTransactionsCommitmentMerkle<T> =
|
||||||
|
IncrementalUnbalancedMerkleTree<BlockTransactionsCommitment<T>>;
|
||||||
|
|
||||||
|
/// The hashes of events caused by the current transaction.
|
||||||
|
#[pallet::storage]
|
||||||
|
#[pallet::unbounded]
|
||||||
|
pub(super) type TransactionEvents<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
|
||||||
|
pub(super) type TransactionEventsMerkle<T> = IncrementalUnbalancedMerkleTree<
|
||||||
|
TransactionEvents<T>,
|
||||||
|
TRANSACTION_EVENTS_COMMITMENT_BRANCH_TAG,
|
||||||
|
TRANSACTION_EVENTS_COMMITMENT_LEAF_TAG,
|
||||||
|
>;
|
||||||
|
/// The roots of the Merkle trees of each transaction's events.
|
||||||
|
#[pallet::storage]
|
||||||
|
#[pallet::unbounded]
|
||||||
|
pub(super) type BlockEventsCommitment<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
|
||||||
|
pub(super) type BlockEventsCommitmentMerkle<T> = IncrementalUnbalancedMerkleTree<
|
||||||
|
BlockEventsCommitment<T>,
|
||||||
|
EVENTS_COMMITMENT_BRANCH_TAG,
|
||||||
|
EVENTS_COMMITMENT_LEAF_TAG,
|
||||||
|
>;
|
||||||
|
|
||||||
|
/// A mapping from an account to its next nonce.
|
||||||
#[pallet::storage]
|
#[pallet::storage]
|
||||||
pub type NextNonce<T: Config> =
|
pub type NextNonce<T: Config> =
|
||||||
StorageMap<_, Blake2_128Concat, T::AccountId, T::Nonce, ValueQuery>;
|
StorageMap<_, Blake2_128Concat, T::AccountId, T::Nonce, ValueQuery>;
|
||||||
#[pallet::storage]
|
|
||||||
pub type Blocks<T: Config> = StorageMap<_, Identity, T::Hash, (), OptionQuery>;
|
|
||||||
|
|
||||||
#[pallet::config]
|
#[pallet::config]
|
||||||
pub trait Config: frame_system::Config {}
|
pub trait Config:
|
||||||
|
frame_system::Config<
|
||||||
|
Block: sp_runtime::traits::Block<Header: sp_runtime::traits::Header<Hash: Into<[u8; 32]>>>,
|
||||||
|
>
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#[pallet::pallet]
|
#[pallet::pallet]
|
||||||
pub struct Pallet<T>(_);
|
pub struct Pallet<T>(_);
|
||||||
|
|
||||||
|
impl<T: Config> Pallet<T> {
|
||||||
|
pub fn start_transaction() {
|
||||||
|
TransactionEventsMerkle::<T>::new_expecting_none();
|
||||||
|
}
|
||||||
|
// TODO: Have this called
|
||||||
|
pub fn on_event(event: impl TryInto<serai_abi::Event>) {
|
||||||
|
if let Ok(event) = event.try_into() {
|
||||||
|
TransactionEventsMerkle::<T>::append(&event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn end_transaction(transaction_hash: [u8; 32]) {
|
||||||
|
BlockTransactionsCommitmentMerkle::<T>::append(&transaction_hash);
|
||||||
|
|
||||||
|
let transaction_events_root = TransactionEventsMerkle::<T>::take().root;
|
||||||
|
|
||||||
|
// Append the leaf (the transaction's hash and its events' root) to the block's events'
|
||||||
|
// commitment
|
||||||
|
BlockEventsCommitmentMerkle::<T>::append(&(&transaction_hash, &transaction_events_root));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(super) use pallet::*;
|
||||||
|
|
||||||
|
pub struct StartOfBlock<T: Config>(PhantomData<T>);
|
||||||
|
impl<T: Config> frame_support::traits::PreInherents for StartOfBlock<T> {
|
||||||
|
fn pre_inherents() {
|
||||||
|
let parent_hash = frame_system::Pallet::<T>::parent_hash();
|
||||||
|
Blocks::<T>::set(parent_hash, Some(()));
|
||||||
|
// TODO: Better detection of genesis
|
||||||
|
if parent_hash == Default::default() {
|
||||||
|
BlocksCommitmentMerkle::<T>::new_expecting_none();
|
||||||
|
} else {
|
||||||
|
let parent_hash: [u8; 32] = parent_hash.into();
|
||||||
|
BlocksCommitmentMerkle::<T>::append(&parent_hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
BlockTransactionsCommitmentMerkle::<T>::new_expecting_none();
|
||||||
|
BlockEventsCommitmentMerkle::<T>::new_expecting_none();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct EndOfBlock<T: Config>(PhantomData<T>);
|
||||||
|
impl<T: Config> frame_support::traits::PostTransactions for EndOfBlock<T> {
|
||||||
|
fn post_transactions() {
|
||||||
|
frame_system::Pallet::<T>::deposit_log(sp_runtime::generic::DigestItem::Consensus(
|
||||||
|
SeraiExecutionDigest::CONSENSUS_ID,
|
||||||
|
borsh::to_vec(&SeraiExecutionDigest {
|
||||||
|
builds_upon: BlocksCommitmentMerkle::<T>::get(),
|
||||||
|
transactions_commitment: BlockTransactionsCommitmentMerkle::<T>::take(),
|
||||||
|
events_commitment: BlockEventsCommitmentMerkle::<T>::take(),
|
||||||
|
})
|
||||||
|
.unwrap(),
|
||||||
|
));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
pub(super) use core_pallet::*;
|
|
||||||
|
|||||||
@@ -110,10 +110,10 @@ impl frame_system::Config for Runtime {
|
|||||||
// No migrations set
|
// No migrations set
|
||||||
type SingleBlockMigrations = ();
|
type SingleBlockMigrations = ();
|
||||||
type MultiBlockMigrator = ();
|
type MultiBlockMigrator = ();
|
||||||
// We don't define any block-level hooks at this time
|
|
||||||
type PreInherents = ();
|
type PreInherents = core_pallet::StartOfBlock<Runtime>;
|
||||||
type PostInherents = ();
|
type PostInherents = ();
|
||||||
type PostTransactions = ();
|
type PostTransactions = core_pallet::EndOfBlock<Runtime>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl core_pallet::Config for Runtime {}
|
impl core_pallet::Config for Runtime {}
|
||||||
@@ -226,16 +226,9 @@ impl serai_abi::TransactionContext for Context {
|
|||||||
fn current_time(&self) -> Option<u64> {
|
fn current_time(&self) -> Option<u64> {
|
||||||
todo!("TODO")
|
todo!("TODO")
|
||||||
}
|
}
|
||||||
/// Get, and consume, the next nonce for an account.
|
/// Get the next nonce for an account.
|
||||||
fn get_and_consume_next_nonce(&self, signer: &SeraiAddress) -> u32 {
|
fn next_nonce(&self, signer: &SeraiAddress) -> u32 {
|
||||||
core_pallet::NextNonce::<Runtime>::mutate(signer, |value| {
|
core_pallet::NextNonce::<Runtime>::get(signer)
|
||||||
// Copy the current value for the next nonce
|
|
||||||
let next_nonce = *value;
|
|
||||||
// Increment the next nonce in the DB, consuming the current value
|
|
||||||
*value += 1;
|
|
||||||
// Return the existing value
|
|
||||||
next_nonce
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
/// If the signer can pay the SRI fee.
|
/// If the signer can pay the SRI fee.
|
||||||
fn can_pay_fee(
|
fn can_pay_fee(
|
||||||
@@ -245,6 +238,14 @@ impl serai_abi::TransactionContext for Context {
|
|||||||
) -> Result<(), sp_runtime::transaction_validity::TransactionValidityError> {
|
) -> Result<(), sp_runtime::transaction_validity::TransactionValidityError> {
|
||||||
todo!("TODO")
|
todo!("TODO")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn start_transaction(&self) {
|
||||||
|
Core::start_transaction();
|
||||||
|
}
|
||||||
|
/// Consume the next nonce for an account.
|
||||||
|
fn consume_next_nonce(&self, signer: &SeraiAddress) {
|
||||||
|
core_pallet::NextNonce::<Runtime>::mutate(signer, |value| *value += 1);
|
||||||
|
}
|
||||||
/// Have the transaction pay its SRI fee.
|
/// Have the transaction pay its SRI fee.
|
||||||
fn pay_fee(
|
fn pay_fee(
|
||||||
&self,
|
&self,
|
||||||
@@ -253,6 +254,9 @@ impl serai_abi::TransactionContext for Context {
|
|||||||
) -> Result<(), sp_runtime::transaction_validity::TransactionValidityError> {
|
) -> Result<(), sp_runtime::transaction_validity::TransactionValidityError> {
|
||||||
todo!("TODO")
|
todo!("TODO")
|
||||||
}
|
}
|
||||||
|
fn end_transaction(&self, transaction_hash: [u8; 32]) {
|
||||||
|
Core::end_transaction(transaction_hash);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO
|
/* TODO
|
||||||
|
|||||||
Reference in New Issue
Block a user