32 Commits

Author SHA1 Message Date
Luke Parker
a63a86ba79 Test Ether InInstructions 2025-01-23 09:30:54 -05:00
Luke Parker
e922264ebf Add selector collisions to the IERC20 lib 2025-01-23 08:25:59 -05:00
Luke Parker
7e53eff642 Fix the async flow with the Router
It had sequential async calls with complexity O(n), with a variety of redundant
calls. There was also a constant of... 4? 5? for each item. Now, the total
sequence depth is just 3-4.
2025-01-23 06:16:58 -05:00
Luke Parker
669b8b776b Work on testing the Router
Completes the `Executed` enum in the router. Adds an `Escape` struct. Both are
needed for testing purposes.

Documents the gas constants in intent and reasoning.

Adds modernized tests around key rotation and the escape hatch.

Also updates the rest of the codebase which had accumulated errors.
2025-01-23 02:06:06 -05:00
Luke Parker
6508957cbc Make a proper nonReentrant modifier
A transaction couldn't call execute twice within a single TX prior. Now, it
can.

Also adds a bit more context to the escape hatch events/errors.
2025-01-23 00:04:44 -05:00
Luke Parker
373e794d2c Check the escaped to address has code set
Document choice not to use a confirmation flow there as well.
2025-01-22 22:45:51 -05:00
Luke Parker
c8f3a32fdf Replace custom read/write impls in router with borsh 2025-01-21 03:49:29 -05:00
Luke Parker
f690bf831f Remove old code still marked TODO 2025-01-19 02:36:34 -05:00
Luke Parker
0b30ac175e Restore workspace-wide clippy
Fixes accumulated errors in the Substrate code. Modifies the runtime build to
work with a modern clippy. Removes e2e tests from the workspace.
2025-01-19 02:27:35 -05:00
Luke Parker
47560fa9a9 Test manually implemented serializations in the Router lib 2025-01-19 00:45:26 -05:00
Luke Parker
9d57c4eb4d Downscope dependencies in serai-processor-ethereum-primitives, const-hex decode bytecode in ethereum-schnorr-contract 2025-01-19 00:16:50 -05:00
Luke Parker
642ba00952 Update Deployer README, 80-character line length 2025-01-19 00:03:56 -05:00
Luke Parker
3c9c12d320 Test the Deployer contract 2025-01-18 23:58:38 -05:00
Luke Parker
f6b52b3fd3 Maximum line length of 80 in Deployer.sol 2025-01-18 15:22:58 -05:00
Luke Parker
0d906363a0 Simplify and test deterministically_sign 2025-01-18 15:13:39 -05:00
Luke Parker
8222ce78d8 Correct accumulated errors in the processor 2025-01-18 12:41:57 -05:00
Luke Parker
cb906242e7 2025 nightly
Supersedes #640.
2025-01-18 12:41:25 -05:00
Luke Parker
2a19e9da93 Update to libp2p 0.54
This is the same libp2p Substrate uses as of
https://github.com/paritytech/polkadot-sdk/pull/6248.
2025-01-17 04:50:15 -05:00
Luke Parker
2226dd59cc Comment all dependencies in substrate/node
Causes the Cargo.lock to no longer include the substrate dependencies
(including its copy of libp2p).
2025-01-17 04:09:27 -05:00
Luke Parker
be2098d2e1 Remove Serai from the ConfirmDkgTask 2025-01-15 21:00:50 -05:00
Luke Parker
6b41f32371 Correct handling of InvalidNonce within the coordinator 2025-01-15 20:48:54 -05:00
Luke Parker
19b87c7f5a Add the DKG confirmation flow
Finishes the coordinator redo
2025-01-15 20:29:57 -05:00
Luke Parker
505f1b20a4 Correct re-attempts for the DKG Confirmation protocol
Also spawns the SetKeys task.
2025-01-15 17:49:41 -05:00
Luke Parker
8b52b921f3 Have the Tributary scanner yield DKG confirmation signing protocol data 2025-01-15 15:16:30 -05:00
Luke Parker
f36bbcba25 Flatten the map of preprocesses/shares, send Participant index with DkgParticipation 2025-01-15 14:24:51 -05:00
Luke Parker
167826aa88 Implement SeraiAddress <-> Participant mapping and add RemoveParticipant transactions 2025-01-15 12:51:35 -05:00
Luke Parker
bea4f92b7a Fix parity-db builds for the Coordinator 2025-01-15 12:10:11 -05:00
Luke Parker
7312fa8d3c Spawn PublishSlashReportTask
Updates it so that it'll try for every network instead of returning after any
network fails.

Uses the SlashReport type throughout the codebase.
2025-01-15 12:08:28 -05:00
Luke Parker
92a4cceeeb Spawn PublishBatchTask
Also removes the expectation Batches published via it are sent in an ordered
fashion. That won't be true if the signing protocols complete out-of-order (as
possible when we are signing them in parallel).
2025-01-15 11:21:55 -05:00
Luke Parker
3357181fe2 Handle sign::ProcessorMessage::[Preprocesses, Shares] 2025-01-15 10:47:47 -05:00
Luke Parker
7ce5bdad44 Don't add transactions for topics which have yet to be recognized 2025-01-15 07:01:24 -05:00
Luke Parker
0de3fda921 Further space out requests for cosigns from the network 2025-01-15 05:59:56 -05:00
116 changed files with 4149 additions and 1999 deletions

View File

@@ -1 +1 @@
nightly-2024-07-01
nightly-2025-01-01

939
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -144,9 +144,9 @@ members = [
"tests/docker",
"tests/message-queue",
"tests/processor",
"tests/coordinator",
"tests/full-stack",
# TODO "tests/processor",
# TODO "tests/coordinator",
# TODO "tests/full-stack",
"tests/reproducible-runtime",
]

View File

@@ -30,53 +30,13 @@ pub trait Get {
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
/// randomly, or any other action, at time of write or at time of commit.
#[must_use]
pub trait DbTxn: Sized + Send + Get {
pub trait DbTxn: Send + Get {
/// Write a value to this key.
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
/// Delete the value from this key.
fn del(&mut self, key: impl AsRef<[u8]>);
/// Commit this transaction.
fn commit(self);
/// Close this transaction.
///
/// This is equivalent to `Drop` on transactions which can be dropped. This is explicit and works
/// with transactions which can't be dropped.
fn close(self) {
drop(self);
}
}
// Credit for the idea goes to https://jack.wrenn.fyi/blog/undroppable
pub struct Undroppable<T>(Option<T>);
impl<T> Drop for Undroppable<T> {
fn drop(&mut self) {
// Use an assertion at compile time to prevent this code from compiling if generated
#[allow(clippy::assertions_on_constants)]
const {
assert!(false, "Undroppable DbTxn was dropped. Ensure all code paths call commit or close");
}
}
}
impl<T: DbTxn> Get for Undroppable<T> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
self.0.as_ref().unwrap().get(key)
}
}
impl<T: DbTxn> DbTxn for Undroppable<T> {
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
self.0.as_mut().unwrap().put(key, value);
}
fn del(&mut self, key: impl AsRef<[u8]>) {
self.0.as_mut().unwrap().del(key);
}
fn commit(mut self) {
self.0.take().unwrap().commit();
let _ = core::mem::ManuallyDrop::new(self);
}
fn close(mut self) {
drop(self.0.take().unwrap());
let _ = core::mem::ManuallyDrop::new(self);
}
}
/// A database supporting atomic transaction.
@@ -91,10 +51,6 @@ pub trait Db: 'static + Send + Sync + Clone + Get {
let dst_len = u8::try_from(item_dst.len()).unwrap();
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
}
/// Open a new transaction which may be dropped.
fn unsafe_txn(&mut self) -> Self::Transaction<'_>;
/// Open a new transaction which must be committed or closed.
fn txn(&mut self) -> Undroppable<Self::Transaction<'_>> {
Undroppable(Some(self.unsafe_txn()))
}
/// Open a new transaction.
fn txn(&mut self) -> Self::Transaction<'_>;
}

View File

@@ -74,7 +74,7 @@ impl Get for MemDb {
}
impl Db for MemDb {
type Transaction<'a> = MemDbTxn<'a>;
fn unsafe_txn(&mut self) -> MemDbTxn<'_> {
fn txn(&mut self) -> MemDbTxn<'_> {
MemDbTxn(self, HashMap::new(), HashSet::new())
}
}

View File

@@ -37,7 +37,7 @@ impl Get for Arc<ParityDb> {
}
impl Db for Arc<ParityDb> {
type Transaction<'a> = Transaction<'a>;
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
fn txn(&mut self) -> Self::Transaction<'_> {
Transaction(self, vec![])
}
}

View File

@@ -39,7 +39,7 @@ impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
}
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
type Transaction<'a> = Transaction<'a, T>;
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
fn txn(&mut self) -> Self::Transaction<'_> {
let mut opts = WriteOptions::default();
opts.set_sync(true);
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)

View File

@@ -11,7 +11,7 @@ use crate::{Client, Error};
#[allow(dead_code)]
#[derive(Debug)]
pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
impl<'a> Response<'a> {
impl Response<'_> {
pub fn status(&self) -> StatusCode {
self.0.status()
}

View File

@@ -25,12 +25,13 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
blake2 = { version = "0.10", default-features = false, features = ["std"] }
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
frost = { package = "modular-frost", path = "../crypto/frost" }
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
dkg = { path = "../crypto/dkg", default-features = false, features = ["std"] }
frost-schnorrkel = { path = "../crypto/schnorrkel" }
hex = { version = "0.4", default-features = false, features = ["std"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
zalloc = { path = "../common/zalloc" }
serai-db = { path = "../common/db" }
@@ -43,9 +44,6 @@ tributary-sdk = { path = "./tributary-sdk" }
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
hex = { version = "0.4", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
log = { version = "0.4", default-features = false, features = ["std"] }
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }

View File

@@ -24,15 +24,6 @@ pub(crate) struct CosignDelayTask<D: Db> {
pub(crate) db: D,
}
struct AwaitUndroppable<T: DbTxn>(Option<core::mem::ManuallyDrop<Undroppable<T>>>);
impl<T: DbTxn> Drop for AwaitUndroppable<T> {
fn drop(&mut self) {
if let Some(mut txn) = self.0.take() {
(unsafe { core::mem::ManuallyDrop::take(&mut txn) }).close();
}
}
}
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
type Error = DoesNotError;
@@ -44,18 +35,14 @@ impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
// Receive the next block to mark as cosigned
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
txn.close();
break;
};
// Calculate when we should mark it as valid
let time_valid =
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
// Sleep until then
let mut txn = AwaitUndroppable(Some(core::mem::ManuallyDrop::new(txn)));
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
.await;
let mut txn = core::mem::ManuallyDrop::into_inner(txn.0.take().unwrap());
// Set the cosigned block
LatestCosignedBlockNumber::set(&mut txn, &block_number);

View File

@@ -1,5 +1,5 @@
use core::future::Future;
use std::time::{Duration, SystemTime};
use std::time::{Duration, Instant, SystemTime};
use serai_db::*;
use serai_task::ContinuallyRan;
@@ -77,17 +77,27 @@ pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
pub(crate) db: D,
pub(crate) request: R,
pub(crate) last_request_for_cosigns: Instant,
}
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
type Error = String;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
let should_request_cosigns = |last_request_for_cosigns: &mut Instant| {
const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(60);
if Instant::now() < (*last_request_for_cosigns + REQUEST_COSIGNS_SPACING) {
return false;
}
*last_request_for_cosigns = Instant::now();
true
};
async move {
let mut known_cosign = None;
let mut made_progress = false;
loop {
let mut txn = self.db.unsafe_txn();
let mut txn = self.db.txn();
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
else {
break;
@@ -118,12 +128,13 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
// Check if the sum weight doesn't cross the required threshold
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
// Request the necessary cosigns over the network
// TODO: Add a timer to ensure this isn't called too often
self
.request
.request_notable_cosigns(global_session)
.await
.map_err(|e| format!("{e:?}"))?;
if should_request_cosigns(&mut self.last_request_for_cosigns) {
self
.request
.request_notable_cosigns(global_session)
.await
.map_err(|e| format!("{e:?}"))?;
}
// We return an error so the delay before this task is run again increases
return Err(format!(
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
@@ -180,11 +191,13 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
// If this session hasn't yet produced notable cosigns, then we presume we'll see
// the desired non-notable cosigns as part of normal operations, without needing to
// explicitly request them
self
.request
.request_notable_cosigns(global_session)
.await
.map_err(|e| format!("{e:?}"))?;
if should_request_cosigns(&mut self.last_request_for_cosigns) {
self
.request
.request_notable_cosigns(global_session)
.await
.map_err(|e| format!("{e:?}"))?;
}
// We return an error so the delay before this task is run again increases
return Err(format!(
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",

View File

@@ -70,7 +70,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
for block_number in start_block_number ..= latest_block_number {
let mut txn = self.db.unsafe_txn();
let mut txn = self.db.txn();
let (block, mut has_events) =
block_has_events_justifying_a_cosign(&self.serai, block_number)

View File

@@ -3,7 +3,7 @@
#![deny(missing_docs)]
use core::{fmt::Debug, future::Future};
use std::{sync::Arc, collections::HashMap};
use std::{sync::Arc, collections::HashMap, time::Instant};
use blake2::{Digest, Blake2s256};
@@ -104,6 +104,24 @@ pub struct Cosign {
pub cosigner: NetworkId,
}
impl CosignIntent {
/// Convert this into a `Cosign`.
pub fn into_cosign(self, cosigner: NetworkId) -> Cosign {
let CosignIntent { global_session, block_number, block_hash, notable: _ } = self;
Cosign { global_session, block_number, block_hash, cosigner }
}
}
impl Cosign {
/// The message to sign to sign this cosign.
///
/// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`.
pub fn signature_message(&self) -> Vec<u8> {
// We use a schnorrkel context to domain-separate this
self.encode()
}
}
/// A signed cosign.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct SignedCosign {
@@ -118,7 +136,7 @@ impl SignedCosign {
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.encode(), &signature).is_ok()
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok()
}
}
@@ -288,8 +306,12 @@ impl<D: Db> Cosigning<D> {
.continually_run(intend_task, vec![evaluator_task_handle]),
);
tokio::spawn(
(evaluator::CosignEvaluatorTask { db: db.clone(), request })
.continually_run(evaluator_task, vec![delay_task_handle]),
(evaluator::CosignEvaluatorTask {
db: db.clone(),
request,
last_request_for_cosigns: Instant::now(),
})
.continually_run(evaluator_task, vec![delay_task_handle]),
);
tokio::spawn(
(delay::CosignDelayTask { db: db.clone() })
@@ -424,7 +446,7 @@ impl<D: Db> Cosigning<D> {
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
// cosign
let mut txn = self.db.unsafe_txn();
let mut txn = self.db.txn();
if !faulty {
// If this is for a future global session, we don't acknowledge this cosign at this time
@@ -480,30 +502,3 @@ impl<D: Db> Cosigning<D> {
res
}
}
mod tests {
use super::*;
struct RNC;
impl RequestNotableCosigns for RNC {
/// The error type which may be encountered when requesting notable cosigns.
type Error = ();
/// Request the notable cosigns for this global session.
fn request_notable_cosigns(
&self,
global_session: [u8; 32],
) -> impl Send + Future<Output = Result<(), Self::Error>> {
async move { Ok(()) }
}
}
#[tokio::test]
async fn test() {
let db: serai_db::MemDb = serai_db::MemDb::new();
let serai = unsafe { core::mem::transmute(0u64) };
let request = RNC;
let tasks = vec![];
let _ = Cosigning::spawn(db, serai, request, tasks);
core::future::pending().await
}
}

View File

@@ -35,7 +35,7 @@ tributary-sdk = { path = "../../tributary-sdk" }
futures-util = { version = "0.3", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false, features = ["sync"] }
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
libp2p = { version = "0.54", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
log = { version = "0.4", default-features = false, features = ["std"] }
serai-task = { path = "../../../common/task", version = "0.1" }

View File

@@ -11,8 +11,7 @@ use serai_client::primitives::PublicKey as Public;
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libp2p::{
core::UpgradeInfo,
InboundUpgrade, OutboundUpgrade,
core::upgrade::{UpgradeInfo, InboundConnectionUpgrade, OutboundConnectionUpgrade},
identity::{self, PeerId},
noise,
};
@@ -119,12 +118,18 @@ impl UpgradeInfo for OnlyValidators {
}
}
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundUpgrade<S> for OnlyValidators {
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundConnectionUpgrade<S>
for OnlyValidators
{
type Output = (PeerId, noise::Output<S>);
type Error = io::Error;
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
fn upgrade_inbound(self, socket: S, info: Self::Info) -> Self::Future {
fn upgrade_inbound(
self,
socket: S,
info: <Self as UpgradeInfo>::Info,
) -> <Self as InboundConnectionUpgrade<S>>::Future {
Box::pin(async move {
let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
.unwrap()
@@ -147,12 +152,18 @@ impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundUpgrade<S> for O
}
}
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundUpgrade<S> for OnlyValidators {
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundConnectionUpgrade<S>
for OnlyValidators
{
type Output = (PeerId, noise::Output<S>);
type Error = io::Error;
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
fn upgrade_outbound(self, socket: S, info: Self::Info) -> Self::Future {
fn upgrade_outbound(
self,
socket: S,
info: <Self as UpgradeInfo>::Info,
) -> <Self as OutboundConnectionUpgrade<S>>::Future {
Box::pin(async move {
let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
.unwrap()

View File

@@ -50,7 +50,7 @@ mod ping;
/// The request-response messages and behavior
mod reqres;
use reqres::{RequestId, Request, Response};
use reqres::{InboundRequestId, Request, Response};
/// The gossip messages and behavior
mod gossip;
@@ -66,14 +66,6 @@ use dial::DialTask;
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
// usize::max, manually implemented, as max isn't a const fn
const MAX_LIBP2P_MESSAGE_SIZE: usize =
if gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE > reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE {
gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE
} else {
reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE
};
fn peer_id_from_public(public: PublicKey) -> PeerId {
// 0 represents the identity Multihash, that no hash was performed
// It's an internal constant so we can't refer to the constant inside libp2p
@@ -143,9 +135,9 @@ struct Libp2pInner {
signed_cosigns: Mutex<mpsc::UnboundedReceiver<SignedCosign>>,
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
heartbeat_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, ValidatorSet, [u8; 32])>>,
notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, [u8; 32])>>,
inbound_request_responses: mpsc::UnboundedSender<(RequestId, Response)>,
heartbeat_requests: Mutex<mpsc::UnboundedReceiver<(InboundRequestId, ValidatorSet, [u8; 32])>>,
notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(InboundRequestId, [u8; 32])>>,
inbound_request_responses: mpsc::UnboundedSender<(InboundRequestId, Response)>,
}
/// The libp2p-backed P2P implementation.
@@ -176,19 +168,9 @@ impl Libp2p {
Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() })
};
let new_yamux = || {
let mut config = yamux::Config::default();
// 1 MiB default + max message size
config.set_max_buffer_size((1024 * 1024) + MAX_LIBP2P_MESSAGE_SIZE);
// 256 KiB default + max message size
config
.set_receive_window_size(((256 * 1024) + MAX_LIBP2P_MESSAGE_SIZE).try_into().unwrap());
config
};
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
.with_tokio()
.with_tcp(TcpConfig::default().nodelay(true), new_only_validators, new_yamux)
.with_tcp(TcpConfig::default().nodelay(true), new_only_validators, yamux::Config::default)
.unwrap()
.with_behaviour(|_| Behavior {
allow_list: allow_block_list::Behaviour::default(),

View File

@@ -10,7 +10,7 @@ use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libp2p::request_response::{
self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport,
};
pub use request_response::{RequestId, Message};
pub use request_response::{InboundRequestId, Message};
use serai_cosign::SignedCosign;
@@ -129,7 +129,6 @@ pub(crate) type Event = GenericEvent<Request, Response>;
pub(crate) type Behavior = Behaviour<Codec>;
pub(crate) fn new_behavior() -> Behavior {
let mut config = Config::default();
config.set_request_timeout(Duration::from_secs(5));
let config = Config::default().with_request_timeout(Duration::from_secs(5));
Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config)
}

View File

@@ -17,7 +17,7 @@ use serai_cosign::SignedCosign;
use futures_util::StreamExt;
use libp2p::{
identity::PeerId,
request_response::{RequestId, ResponseChannel},
request_response::{InboundRequestId, OutboundRequestId, ResponseChannel},
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
};
@@ -65,12 +65,12 @@ pub(crate) struct SwarmTask {
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
outbound_request_responses: HashMap<RequestId, oneshot::Sender<Response>>,
outbound_request_responses: HashMap<OutboundRequestId, oneshot::Sender<Response>>,
inbound_request_response_channels: HashMap<RequestId, ResponseChannel<Response>>,
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
inbound_request_response_channels: HashMap<InboundRequestId, ResponseChannel<Response>>,
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ValidatorSet, [u8; 32])>,
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>,
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>,
}
impl SwarmTask {
@@ -222,25 +222,21 @@ impl SwarmTask {
}
}
SwarmEvent::Behaviour(
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event)
) => {
// This *is* an exhaustive match as these events are empty enums
match event {}
}
SwarmEvent::Behaviour(
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, })
) => {
if result.is_err() {
self.swarm.close_connection(connection);
SwarmEvent::Behaviour(event) => {
match event {
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event) => {
// This *is* an exhaustive match as these events are empty enums
match event {}
}
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, }) => {
if result.is_err() {
self.swarm.close_connection(connection);
}
}
BehaviorEvent::Reqres(event) => self.handle_reqres(event),
BehaviorEvent::Gossip(event) => self.handle_gossip(event),
}
}
SwarmEvent::Behaviour(BehaviorEvent::Reqres(event)) => {
self.handle_reqres(event)
}
SwarmEvent::Behaviour(BehaviorEvent::Gossip(event)) => {
self.handle_gossip(event)
}
// We don't handle any of these
SwarmEvent::IncomingConnection { .. } |
@@ -250,7 +246,14 @@ impl SwarmTask {
SwarmEvent::ExpiredListenAddr { .. } |
SwarmEvent::ListenerClosed { .. } |
SwarmEvent::ListenerError { .. } |
SwarmEvent::Dialing { .. } => {}
SwarmEvent::Dialing { .. } |
SwarmEvent::NewExternalAddrCandidate { .. } |
SwarmEvent::ExternalAddrConfirmed { .. } |
SwarmEvent::ExternalAddrExpired { .. } |
SwarmEvent::NewExternalAddrOfPeer { .. } => {}
// Requires as SwarmEvent is non-exhaustive
_ => log::warn!("unhandled SwarmEvent: {event:?}"),
}
}
@@ -321,9 +324,9 @@ impl SwarmTask {
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ValidatorSet, [u8; 32])>,
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>,
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>,
) {
tokio::spawn(
SwarmTask {

View File

@@ -51,6 +51,14 @@ impl Validators {
serai: impl Borrow<Serai>,
sessions: impl Borrow<HashMap<NetworkId, Session>>,
) -> Result<Vec<(NetworkId, Session, HashSet<PeerId>)>, SeraiError> {
/*
This uses the latest finalized block, not the latest cosigned block, which should be fine as
in the worst case, we'd connect to unexpected validators. They still shouldn't be able to
bypass the cosign protocol unless a historical global session was malicious, in which case
the cosign protocol already breaks.
Besides, we can't connect to historical validators, only the current validators.
*/
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
let temporal_serai = temporal_serai.validator_sets();

View File

@@ -3,9 +3,11 @@ use std::{path::Path, fs};
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait};
use serai_db::{create_db, db_channel};
use dkg::Participant;
use serai_client::{
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet},
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
};
use serai_cosign::SignedCosign;
@@ -13,7 +15,7 @@ use serai_coordinator_substrate::NewSetInformation;
use serai_coordinator_tributary::Transaction;
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
pub(crate) type Db = serai_db::ParityDb;
pub(crate) type Db = std::sync::Arc<serai_db::ParityDb>;
#[cfg(feature = "rocksdb")]
pub(crate) type Db = serai_db::RocksDB;
@@ -76,6 +78,10 @@ create_db! {
LastProcessorMessage: (network: NetworkId) -> u64,
// Cosigns we produced and tried to intake yet incurred an error while doing so
ErroneousCosigns: () -> Vec<SignedCosign>,
// The keys to confirm and set on the Serai network
KeysToConfirm: (set: ValidatorSet) -> KeyPair,
// The key was set on the Serai network
KeySet: (set: ValidatorSet) -> (),
}
}
@@ -93,21 +99,51 @@ mod _internal_db {
db_channel! {
Coordinator {
// Tributary transactions to publish
TributaryTransactions: (set: ValidatorSet) -> Transaction,
// Tributary transactions to publish from the Processor messages
TributaryTransactionsFromProcessorMessages: (set: ValidatorSet) -> Transaction,
// Tributary transactions to publish from the DKG confirmation task
TributaryTransactionsFromDkgConfirmation: (set: ValidatorSet) -> Transaction,
// Participants to remove
RemoveParticipant: (set: ValidatorSet) -> Participant,
}
}
}
pub(crate) struct TributaryTransactions;
impl TributaryTransactions {
pub(crate) struct TributaryTransactionsFromProcessorMessages;
impl TributaryTransactionsFromProcessorMessages {
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, tx: &Transaction) {
// If this set has yet to be retired, send this transaction
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
_internal_db::TributaryTransactions::send(txn, set, tx);
_internal_db::TributaryTransactionsFromProcessorMessages::send(txn, set, tx);
}
}
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<Transaction> {
_internal_db::TributaryTransactions::try_recv(txn, set)
_internal_db::TributaryTransactionsFromProcessorMessages::try_recv(txn, set)
}
}
pub(crate) struct TributaryTransactionsFromDkgConfirmation;
impl TributaryTransactionsFromDkgConfirmation {
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, tx: &Transaction) {
// If this set has yet to be retired, send this transaction
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
_internal_db::TributaryTransactionsFromDkgConfirmation::send(txn, set, tx);
}
}
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<Transaction> {
_internal_db::TributaryTransactionsFromDkgConfirmation::try_recv(txn, set)
}
}
pub(crate) struct RemoveParticipant;
impl RemoveParticipant {
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, participant: Participant) {
// If this set has yet to be retired, send this transaction
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
_internal_db::RemoveParticipant::send(txn, set, &participant);
}
}
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<Participant> {
_internal_db::RemoveParticipant::try_recv(txn, set)
}
}

View File

@@ -0,0 +1,434 @@
use core::{ops::Deref, future::Future};
use std::{boxed::Box, collections::HashMap};
use zeroize::Zeroizing;
use rand_core::OsRng;
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost_schnorrkel::{
frost::{
dkg::{Participant, musig::musig},
FrostError,
sign::*,
},
Schnorrkel,
};
use serai_db::{DbTxn, Db as DbTrait};
use serai_client::{
primitives::SeraiAddress,
validator_sets::primitives::{ValidatorSet, musig_context, set_keys_message},
};
use serai_task::{DoesNotError, ContinuallyRan};
use serai_coordinator_substrate::{NewSetInformation, Keys};
use serai_coordinator_tributary::{Transaction, DkgConfirmationMessages};
use crate::{KeysToConfirm, KeySet, TributaryTransactionsFromDkgConfirmation};
fn schnorrkel() -> Schnorrkel {
Schnorrkel::new(b"substrate") // TODO: Pull the constant for this
}
fn our_i(
set: &NewSetInformation,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
data: &HashMap<Participant, Vec<u8>>,
) -> Participant {
let public = SeraiAddress((Ristretto::generator() * key.deref()).to_bytes());
let mut our_i = None;
for participant in data.keys() {
let validator_index = usize::from(u16::from(*participant) - 1);
let (validator, _weight) = set.validators[validator_index];
if validator == public {
our_i = Some(*participant);
}
}
our_i.unwrap()
}
// Take a HashMap of participations with non-contiguous Participants and convert them to a
// contiguous sequence.
//
// The input data is expected to not include our own data, which also won't be in the output data.
//
// Returns the mapping from the contiguous Participants to the original Participants.
fn make_contiguous<T>(
our_i: Participant,
mut data: HashMap<Participant, Vec<u8>>,
transform: impl Fn(Vec<u8>) -> std::io::Result<T>,
) -> Result<HashMap<Participant, T>, Participant> {
assert!(!data.contains_key(&our_i));
let mut ordered_participants = data.keys().copied().collect::<Vec<_>>();
ordered_participants.sort_by_key(|participant| u16::from(*participant));
let mut our_i = Some(our_i);
let mut contiguous = HashMap::new();
let mut i = 1;
for participant in ordered_participants {
// If this is the first participant after our own index, increment to account for our index
if let Some(our_i_value) = our_i {
if u16::from(participant) > u16::from(our_i_value) {
i += 1;
our_i = None;
}
}
let contiguous_index = Participant::new(i).unwrap();
let data = match transform(data.remove(&participant).unwrap()) {
Ok(data) => data,
Err(_) => Err(participant)?,
};
contiguous.insert(contiguous_index, data);
i += 1;
}
Ok(contiguous)
}
fn handle_frost_error<T>(result: Result<T, FrostError>) -> Result<T, Participant> {
match &result {
Ok(_) => Ok(result.unwrap()),
Err(FrostError::InvalidPreprocess(participant) | FrostError::InvalidShare(participant)) => {
Err(*participant)
}
// All of these should be unreachable
Err(
FrostError::InternalError(_) |
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_),
) => {
result.unwrap();
unreachable!("continued execution after unwrapping Result::Err");
}
}
}
#[rustfmt::skip]
enum Signer {
Preprocess { attempt: u32, seed: CachedPreprocess, preprocess: [u8; 64] },
Share {
attempt: u32,
musig_validators: Vec<SeraiAddress>,
share: [u8; 32],
machine: Box<AlgorithmSignatureMachine<Ristretto, Schnorrkel>>,
},
}
/// Performs the DKG Confirmation protocol.
pub(crate) struct ConfirmDkgTask<CD: DbTrait, TD: DbTrait> {
db: CD,
set: NewSetInformation,
tributary_db: TD,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
signer: Option<Signer>,
}
impl<CD: DbTrait, TD: DbTrait> ConfirmDkgTask<CD, TD> {
pub(crate) fn new(
db: CD,
set: NewSetInformation,
tributary_db: TD,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
) -> Self {
Self { db, set, tributary_db, key, signer: None }
}
fn slash(db: &mut CD, set: ValidatorSet, validator: SeraiAddress) {
let mut txn = db.txn();
TributaryTransactionsFromDkgConfirmation::send(
&mut txn,
set,
&Transaction::RemoveParticipant { participant: validator, signed: Default::default() },
);
txn.commit();
}
fn preprocess(
db: &mut CD,
set: ValidatorSet,
attempt: u32,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
signer: &mut Option<Signer>,
) {
// Perform the preprocess
let (machine, preprocess) = AlgorithmMachine::new(
schnorrkel(),
// We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet
musig(&musig_context(set), key, &[Ristretto::generator() * key.deref()]).unwrap().into(),
)
.preprocess(&mut OsRng);
// We take the preprocess so we can use it in a distinct machine with the actual Musig
// parameters
let seed = machine.cache();
let mut preprocess_bytes = [0u8; 64];
preprocess_bytes.copy_from_slice(&preprocess.serialize());
let preprocess = preprocess_bytes;
let mut txn = db.txn();
// If this attempt has already been preprocessed for, the Tributary will de-duplicate it
// This may mean the Tributary preprocess is distinct from ours, but we check for that later
TributaryTransactionsFromDkgConfirmation::send(
&mut txn,
set,
&Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed: Default::default() },
);
txn.commit();
*signer = Some(Signer::Preprocess { attempt, seed, preprocess });
}
}
impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
type Error = DoesNotError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let mut made_progress = false;
// If we were sent a key to set, create the signer for it
if self.signer.is_none() && KeysToConfirm::get(&self.db, self.set.set).is_some() {
// Create and publish the initial preprocess
Self::preprocess(&mut self.db, self.set.set, 0, &self.key, &mut self.signer);
made_progress = true;
}
// If we have keys to confirm, handle all messages from the tributary
if let Some(key_pair) = KeysToConfirm::get(&self.db, self.set.set) {
// Handle all messages from the Tributary
loop {
let mut tributary_txn = self.tributary_db.txn();
let Some(msg) = DkgConfirmationMessages::try_recv(&mut tributary_txn, self.set.set)
else {
break;
};
match msg {
messages::sign::CoordinatorMessage::Reattempt {
id: messages::sign::SignId { attempt, .. },
} => {
// Create and publish the preprocess for the specified attempt
Self::preprocess(&mut self.db, self.set.set, attempt, &self.key, &mut self.signer);
}
messages::sign::CoordinatorMessage::Preprocesses {
id: messages::sign::SignId { attempt, .. },
mut preprocesses,
} => {
// Confirm the preprocess we're expected to sign with is the one we locally have
// It may be different if we rebooted and made a second preprocess for this attempt
let Some(Signer::Preprocess { attempt: our_attempt, seed, preprocess }) =
self.signer.take()
else {
// If this message is not expected, commit the txn to drop it and move on
// At some point, we'll get a Reattempt and reset
tributary_txn.commit();
break;
};
// Determine the MuSig key signed with
let musig_validators = {
let mut ordered_participants = preprocesses.keys().copied().collect::<Vec<_>>();
ordered_participants.sort_by_key(|participant| u16::from(*participant));
let mut res = vec![];
for participant in ordered_participants {
let (validator, _weight) =
self.set.validators[usize::from(u16::from(participant) - 1)];
res.push(validator);
}
res
};
let musig_public_keys = musig_validators
.iter()
.map(|key| {
Ristretto::read_G(&mut key.0.as_slice())
.expect("Serai validator had invalid public key")
})
.collect::<Vec<_>>();
let keys =
musig(&musig_context(self.set.set), &self.key, &musig_public_keys).unwrap().into();
// Rebuild the machine
let (machine, preprocess_from_cache) =
AlgorithmSignMachine::from_cache(schnorrkel(), keys, seed);
assert_eq!(preprocess.as_slice(), preprocess_from_cache.serialize().as_slice());
// Ensure this is a consistent signing session
let our_i = our_i(&self.set, &self.key, &preprocesses);
let consistent = (attempt == our_attempt) &&
(preprocesses.remove(&our_i).unwrap().as_slice() == preprocess.as_slice());
if !consistent {
tributary_txn.commit();
break;
}
// Reformat the preprocesses into the expected format for Musig
let preprocesses = match make_contiguous(our_i, preprocesses, |preprocess| {
machine.read_preprocess(&mut preprocess.as_slice())
}) {
Ok(preprocesses) => preprocesses,
// This yields the *original participant index*
Err(participant) => {
Self::slash(
&mut self.db,
self.set.set,
self.set.validators[usize::from(u16::from(participant) - 1)].0,
);
tributary_txn.commit();
break;
}
};
// Calculate our share
let (machine, share) = match handle_frost_error(
machine.sign(preprocesses, &set_keys_message(&self.set.set, &key_pair)),
) {
Ok((machine, share)) => (machine, share),
// This yields the *musig participant index*
Err(participant) => {
Self::slash(
&mut self.db,
self.set.set,
musig_validators[usize::from(u16::from(participant) - 1)],
);
tributary_txn.commit();
break;
}
};
// Send our share
let share = <[u8; 32]>::try_from(share.serialize()).unwrap();
let mut txn = self.db.txn();
TributaryTransactionsFromDkgConfirmation::send(
&mut txn,
self.set.set,
&Transaction::DkgConfirmationShare { attempt, share, signed: Default::default() },
);
txn.commit();
self.signer = Some(Signer::Share {
attempt,
musig_validators,
share,
machine: Box::new(machine),
});
}
messages::sign::CoordinatorMessage::Shares {
id: messages::sign::SignId { attempt, .. },
mut shares,
} => {
let Some(Signer::Share { attempt: our_attempt, musig_validators, share, machine }) =
self.signer.take()
else {
tributary_txn.commit();
break;
};
// Ensure this is a consistent signing session
let our_i = our_i(&self.set, &self.key, &shares);
let consistent = (attempt == our_attempt) &&
(shares.remove(&our_i).unwrap().as_slice() == share.as_slice());
if !consistent {
tributary_txn.commit();
break;
}
// Reformat the shares into the expected format for Musig
let shares = match make_contiguous(our_i, shares, |share| {
machine.read_share(&mut share.as_slice())
}) {
Ok(shares) => shares,
// This yields the *original participant index*
Err(participant) => {
Self::slash(
&mut self.db,
self.set.set,
self.set.validators[usize::from(u16::from(participant) - 1)].0,
);
tributary_txn.commit();
break;
}
};
match handle_frost_error(machine.complete(shares)) {
Ok(signature) => {
// Create the bitvec of the participants
let mut signature_participants;
{
use bitvec::prelude::*;
signature_participants = bitvec![u8, Lsb0; 0; 0];
let mut i = 0;
for (validator, _) in &self.set.validators {
if Some(validator) == musig_validators.get(i) {
signature_participants.push(true);
i += 1;
} else {
signature_participants.push(false);
}
}
}
// This is safe to call multiple times as it'll just change which *valid*
// signature to publish
let mut txn = self.db.txn();
Keys::set(
&mut txn,
self.set.set,
key_pair.clone(),
signature_participants,
signature.into(),
);
txn.commit();
}
// This yields the *musig participant index*
Err(participant) => {
Self::slash(
&mut self.db,
self.set.set,
musig_validators[usize::from(u16::from(participant) - 1)],
);
tributary_txn.commit();
break;
}
}
}
}
// Because we successfully handled this message, note we made proress
made_progress = true;
tributary_txn.commit();
}
}
// Check if the key has been set on Serai
if KeysToConfirm::get(&self.db, self.set.set).is_some() &&
KeySet::get(&self.db, self.set.set).is_some()
{
// Take the keys to confirm so we never instantiate the signer again
let mut txn = self.db.txn();
KeysToConfirm::take(&mut txn, self.set.set);
KeySet::take(&mut txn, self.set.set);
txn.commit();
// Drop our own signer
// The task won't die until the Tributary does, but now it'll never do anything again
self.signer = None;
made_progress = true;
}
Ok(made_progress)
}
}
}

View File

@@ -14,8 +14,8 @@ use borsh::BorshDeserialize;
use tokio::sync::mpsc;
use serai_client::{
primitives::{NetworkId, PublicKey},
validator_sets::primitives::ValidatorSet,
primitives::{NetworkId, PublicKey, SeraiAddress, Signature},
validator_sets::primitives::{ValidatorSet, KeyPair},
Serai,
};
use message_queue::{Service, client::MessageQueue};
@@ -23,13 +23,17 @@ use message_queue::{Service, client::MessageQueue};
use serai_task::{Task, TaskHandle, ContinuallyRan};
use serai_cosign::{Faulted, SignedCosign, Cosigning};
use serai_coordinator_substrate::{CanonicalEventStream, EphemeralEventStream, SignSlashReport};
use serai_coordinator_tributary::{Signed, Transaction, SubstrateBlockPlans};
use serai_coordinator_substrate::{
CanonicalEventStream, EphemeralEventStream, SignSlashReport, SetKeysTask, SignedBatches,
PublishBatchTask, SlashReports, PublishSlashReportTask,
};
use serai_coordinator_tributary::{SigningProtocolRound, Signed, Transaction, SubstrateBlockPlans};
mod db;
use db::*;
mod tributary;
mod dkg_confirmation;
mod substrate;
use substrate::SubstrateTask;
@@ -145,11 +149,25 @@ fn spawn_cosigning<D: serai_db::Db>(
});
}
async fn handle_processor_messages(
async fn handle_network(
mut db: impl serai_db::Db,
message_queue: Arc<MessageQueue>,
serai: Arc<Serai>,
network: NetworkId,
) {
// Spawn the task to publish batches for this network
{
let (publish_batch_task_def, publish_batch_task) = Task::new();
tokio::spawn(
PublishBatchTask::new(db.clone(), serai.clone(), network)
.unwrap()
.continually_run(publish_batch_task_def, vec![]),
);
// Forget its handle so it always runs in the background
core::mem::forget(publish_batch_task);
}
// Handle Processor messages
loop {
let (msg_id, msg) = {
let msg = message_queue.next(Service::Processor(network)).await;
@@ -180,7 +198,7 @@ async fn handle_processor_messages(
messages::ProcessorMessage::KeyGen(msg) => match msg {
messages::key_gen::ProcessorMessage::Participation { session, participation } => {
let set = ValidatorSet { network, session };
TributaryTransactions::send(
TributaryTransactionsFromProcessorMessages::send(
&mut txn,
set,
&Transaction::DkgParticipation { participation, signed: Signed::default() },
@@ -190,45 +208,84 @@ async fn handle_processor_messages(
session,
substrate_key,
network_key,
} => todo!("TODO Transaction::DkgConfirmationPreprocess"),
messages::key_gen::ProcessorMessage::Blame { session, participant } => {
let set = ValidatorSet { network, session };
TributaryTransactions::send(
} => {
KeysToConfirm::set(
&mut txn,
set,
&Transaction::RemoveParticipant {
participant: todo!("TODO"),
signed: Signed::default(),
},
ValidatorSet { network, session },
&KeyPair(
PublicKey::from_raw(substrate_key),
network_key
.try_into()
.expect("generated a network key which exceeds the maximum key length"),
),
);
}
messages::key_gen::ProcessorMessage::Blame { session, participant } => {
RemoveParticipant::send(&mut txn, ValidatorSet { network, session }, participant);
}
},
messages::ProcessorMessage::Sign(msg) => match msg {
messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => {
let set = ValidatorSet { network, session };
TributaryTransactions::send(
RemoveParticipant::send(&mut txn, ValidatorSet { network, session }, participant);
}
messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => {
let set = ValidatorSet { network, session: id.session };
if id.attempt == 0 {
// Batches are declared by their intent to be signed
if let messages::sign::VariantSignId::Batch(hash) = id.id {
TributaryTransactionsFromProcessorMessages::send(
&mut txn,
set,
&Transaction::Batch { hash },
);
}
}
TributaryTransactionsFromProcessorMessages::send(
&mut txn,
set,
&Transaction::RemoveParticipant {
participant: todo!("TODO"),
&Transaction::Sign {
id: id.id,
attempt: id.attempt,
round: SigningProtocolRound::Preprocess,
data: preprocesses,
signed: Signed::default(),
},
);
}
messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => {
todo!("TODO Transaction::Batch + Transaction::Sign")
messages::sign::ProcessorMessage::Shares { id, shares } => {
let set = ValidatorSet { network, session: id.session };
TributaryTransactionsFromProcessorMessages::send(
&mut txn,
set,
&Transaction::Sign {
id: id.id,
attempt: id.attempt,
round: SigningProtocolRound::Share,
data: shares,
signed: Signed::default(),
},
);
}
messages::sign::ProcessorMessage::Shares { id, shares } => todo!("TODO Transaction::Sign"),
},
messages::ProcessorMessage::Coordinator(msg) => match msg {
messages::coordinator::ProcessorMessage::CosignedBlock { cosign } => {
SignedCosigns::send(&mut txn, &cosign);
}
messages::coordinator::ProcessorMessage::SignedBatch { batch } => {
todo!("TODO PublishBatchTask")
SignedBatches::send(&mut txn, &batch);
}
messages::coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
todo!("TODO PublishSlashReportTask")
messages::coordinator::ProcessorMessage::SignedSlashReport {
session,
slash_report,
signature,
} => {
SlashReports::set(
&mut txn,
ValidatorSet { network, session },
slash_report,
Signature(signature),
);
}
},
messages::ProcessorMessage::Substrate(msg) => match msg {
@@ -243,7 +300,7 @@ async fn handle_processor_messages(
for (session, plans) in by_session {
let set = ValidatorSet { network, session };
SubstrateBlockPlans::set(&mut txn, set, block, &plans);
TributaryTransactions::send(
TributaryTransactionsFromProcessorMessages::send(
&mut txn,
set,
&Transaction::SubstrateBlock { hash: block },
@@ -309,10 +366,16 @@ async fn main() {
// Cleanup all historic Tributaries
while let Some(to_cleanup) = TributaryCleanup::try_recv(&mut txn) {
prune_tributary_db(to_cleanup);
// Remove the keys to confirm for this network
KeysToConfirm::take(&mut txn, to_cleanup);
KeySet::take(&mut txn, to_cleanup);
// Drain the cosign intents created for this set
while !Cosigning::<Db>::intended_cosigns(&mut txn, to_cleanup).is_empty() {}
// Drain the transactions to publish for this set
while TributaryTransactions::try_recv(&mut txn, to_cleanup).is_some() {}
while TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, to_cleanup).is_some() {}
while TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, to_cleanup).is_some() {}
// Drain the participants to remove for this set
while RemoveParticipant::try_recv(&mut txn, to_cleanup).is_some() {}
// Remove the SignSlashReport notification
SignSlashReport::try_recv(&mut txn, to_cleanup);
}
@@ -376,7 +439,7 @@ async fn main() {
EphemeralEventStream::new(
db.clone(),
serai.clone(),
PublicKey::from_raw((<Ristretto as Ciphersuite>::generator() * serai_key.deref()).to_bytes()),
SeraiAddress((<Ristretto as Ciphersuite>::generator() * serai_key.deref()).to_bytes()),
)
.continually_run(substrate_ephemeral_task_def, vec![substrate_task]),
);
@@ -417,12 +480,32 @@ async fn main() {
.continually_run(substrate_task_def, vec![]),
);
// Handle all of the Processors' messages
// Handle each of the networks
for network in serai_client::primitives::NETWORKS {
if network == NetworkId::Serai {
continue;
}
tokio::spawn(handle_processor_messages(db.clone(), message_queue.clone(), network));
tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network));
}
// Spawn the task to set keys
{
let (set_keys_task_def, set_keys_task) = Task::new();
tokio::spawn(
SetKeysTask::new(db.clone(), serai.clone()).continually_run(set_keys_task_def, vec![]),
);
// Forget its handle so it always runs in the background
core::mem::forget(set_keys_task);
}
// Spawn the task to publish slash reports
{
let (publish_slash_report_task_def, publish_slash_report_task) = Task::new();
tokio::spawn(
PublishSlashReportTask::new(db, serai).continually_run(publish_slash_report_task_def, vec![]),
);
// Always have this run in the background
core::mem::forget(publish_slash_report_task);
}
// Run the spawned tasks ad-infinitum

View File

@@ -19,7 +19,7 @@ use serai_task::ContinuallyRan;
use serai_coordinator_tributary::Transaction;
use serai_coordinator_p2p::P2p;
use crate::Db;
use crate::{Db, KeySet};
pub(crate) struct SubstrateTask<P: P2p> {
pub(crate) serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
@@ -47,8 +47,9 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
};
match msg {
// TODO: Stop trying to confirm the DKG
messages::substrate::CoordinatorMessage::SetKeys { .. } => todo!("TODO"),
messages::substrate::CoordinatorMessage::SetKeys { session, .. } => {
KeySet::set(&mut txn, ValidatorSet { network, session }, &());
}
messages::substrate::CoordinatorMessage::SlashesReported { session } => {
let prior_retired = crate::db::RetiredTributary::get(&txn, network);
let next_to_be_retired =

View File

@@ -21,10 +21,21 @@ use message_queue::{Service, Metadata, client::MessageQueue};
use serai_cosign::{Faulted, CosignIntent, Cosigning};
use serai_coordinator_substrate::{NewSetInformation, SignSlashReport};
use serai_coordinator_tributary::{Transaction, ProcessorMessages, CosignIntents, ScanTributaryTask};
use serai_coordinator_tributary::{
Topic, Transaction, ProcessorMessages, CosignIntents, RecognizedTopics, ScanTributaryTask,
};
use serai_coordinator_p2p::P2p;
use crate::{Db, TributaryTransactions};
use crate::{
Db, TributaryTransactionsFromProcessorMessages, TributaryTransactionsFromDkgConfirmation,
RemoveParticipant, dkg_confirmation::ConfirmDkgTask,
};
create_db! {
Coordinator {
PublishOnRecognition: (set: ValidatorSet, topic: Topic) -> Transaction,
}
}
db_channel! {
Coordinator {
@@ -147,12 +158,101 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan
}
}
/// Adds all of the transactions sent via `TributaryTransactions`.
#[must_use]
async fn add_signed_unsigned_transaction<TD: DbTrait, P: P2p>(
tributary: &Tributary<TD, Transaction, P>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
mut tx: Transaction,
) -> bool {
// If this is a signed transaction, sign it
if matches!(tx.kind(), TransactionKind::Signed(_, _)) {
tx.sign(&mut OsRng, tributary.genesis(), key);
}
let res = tributary.add_transaction(tx.clone()).await;
match &res {
// Fresh publication, already published
Ok(true | false) => {}
Err(
TransactionError::TooLargeTransaction |
TransactionError::InvalidSigner |
TransactionError::InvalidSignature |
TransactionError::InvalidContent,
) => {
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
}
// InvalidNonce may be out-of-order TXs, not invalid ones, but we only create nonce #n+1 after
// on-chain inclusion of the TX with nonce #n, so it is invalid within our context unless the
// issue is this transaction was already included on-chain
Err(TransactionError::InvalidNonce) => {
let TransactionKind::Signed(order, signed) = tx.kind() else {
panic!("non-Signed transaction had InvalidNonce");
};
let next_nonce = tributary
.next_nonce(&signed.signer, &order)
.await
.expect("signer who is a present validator didn't have a nonce");
assert!(next_nonce != signed.nonce);
// We're publishing an old transaction
if next_nonce > signed.nonce {
return true;
}
panic!("nonce in transaction wasn't contiguous with nonce on-chain");
}
// We've published too many transactions recently
Err(TransactionError::TooManyInMempool) => {
return false;
}
// This isn't a Provided transaction so this should never be hit
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
}
true
}
async fn add_with_recognition_check<TD: DbTrait, P: P2p>(
set: ValidatorSet,
tributary_db: &mut TD,
tributary: &Tributary<TD, Transaction, P>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
tx: Transaction,
) -> bool {
let kind = tx.kind();
match kind {
TransactionKind::Provided(_) => provide_transaction(set, tributary, tx).await,
TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
// If this is a transaction with signing data, check the topic is recognized before
// publishing
let topic = tx.topic();
let still_requires_recognition = if let Some(topic) = topic {
(topic.requires_recognition() && (!RecognizedTopics::recognized(tributary_db, set, topic)))
.then_some(topic)
} else {
None
};
if let Some(topic) = still_requires_recognition {
// Queue the transaction until the topic is recognized
// We use the Tributary DB for this so it's cleaned up when the Tributary DB is
let mut tributary_txn = tributary_db.txn();
PublishOnRecognition::set(&mut tributary_txn, set, topic, &tx);
tributary_txn.commit();
} else {
// Actually add the transaction
if !add_signed_unsigned_transaction(tributary, key, tx).await {
return false;
}
}
}
}
true
}
/// Adds all of the transactions sent via `TributaryTransactionsFromProcessorMessages`.
pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
db: CD,
tributary_db: TD,
tributary: Tributary<TD, Transaction, P>,
set: ValidatorSet,
set: NewSetInformation,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
}
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> {
@@ -161,49 +261,87 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactio
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let mut made_progress = false;
// Provide/add all transactions sent our way
loop {
let mut txn = self.db.txn();
let Some(mut tx) = TributaryTransactions::try_recv(&mut txn, self.set) else { break };
let Some(tx) = TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, self.set.set)
else {
break;
};
let kind = tx.kind();
match kind {
TransactionKind::Provided(_) => provide_transaction(self.set, &self.tributary, tx).await,
TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
// If this is a signed transaction, sign it
if matches!(kind, TransactionKind::Signed(_, _)) {
tx.sign(&mut OsRng, self.tributary.genesis(), &self.key);
}
// Actually add the transaction
// TODO: If this is a preprocess, make sure the topic has been recognized
let res = self.tributary.add_transaction(tx.clone()).await;
match &res {
// Fresh publication, already published
Ok(true | false) => {}
Err(
TransactionError::TooLargeTransaction |
TransactionError::InvalidSigner |
TransactionError::InvalidNonce |
TransactionError::InvalidSignature |
TransactionError::InvalidContent,
) => {
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
}
// We've published too many transactions recently
// Drop this txn to try to publish it again later on a future iteration
Err(TransactionError::TooManyInMempool) => {
drop(txn);
break;
}
// This isn't a Provided transaction so this should never be hit
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
}
}
if !add_with_recognition_check(
self.set.set,
&mut self.tributary_db,
&self.tributary,
&self.key,
tx,
)
.await
{
break;
}
made_progress = true;
txn.commit();
}
loop {
let mut txn = self.db.txn();
let Some(tx) = TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, self.set.set)
else {
break;
};
if !add_with_recognition_check(
self.set.set,
&mut self.tributary_db,
&self.tributary,
&self.key,
tx,
)
.await
{
break;
}
made_progress = true;
txn.commit();
}
// Provide/add all transactions due to newly recognized topics
loop {
let mut tributary_txn = self.tributary_db.txn();
let Some(topic) =
RecognizedTopics::try_recv_topic_requiring_recognition(&mut tributary_txn, self.set.set)
else {
break;
};
if let Some(tx) = PublishOnRecognition::take(&mut tributary_txn, self.set.set, topic) {
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
break;
}
}
made_progress = true;
tributary_txn.commit();
}
// Publish any participant removals
loop {
let mut txn = self.db.txn();
let Some(participant) = RemoveParticipant::try_recv(&mut txn, self.set.set) else { break };
let tx = Transaction::RemoveParticipant {
participant: self.set.participant_indexes_reverse_lookup[&participant],
signed: Default::default(),
};
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
break;
}
made_progress = true;
txn.commit();
}
Ok(made_progress)
}
}
@@ -323,6 +461,8 @@ async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
/// - Spawn the ScanTributaryTask
/// - Spawn the ProvideCosignCosignedTransactionsTask
/// - Spawn the TributaryProcessorMessagesTask
/// - Spawn the AddTributaryTransactionsTask
/// - Spawn the ConfirmDkgTask
/// - Spawn the SignSlashReportTask
/// - Iterate the scan task whenever a new block occurs (not just on the standard interval)
pub(crate) async fn spawn_tributary<P: P2p>(
@@ -403,38 +543,45 @@ pub(crate) async fn spawn_tributary<P: P2p>(
// Spawn the scan task
let (scan_tributary_task_def, scan_tributary_task) = Task::new();
tokio::spawn(
ScanTributaryTask::<_, P>::new(tributary_db.clone(), &set, reader)
ScanTributaryTask::<_, P>::new(tributary_db.clone(), set.clone(), reader)
// This is the only handle for this TributaryProcessorMessagesTask, so when this task is
// dropped, it will be too
.continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]),
);
// Spawn the sign slash report task
let (sign_slash_report_task_def, sign_slash_report_task) = Task::new();
tokio::spawn(
(SignSlashReportTask {
db: db.clone(),
tributary_db: tributary_db.clone(),
tributary: tributary.clone(),
set: set.clone(),
key: serai_key.clone(),
})
.continually_run(sign_slash_report_task_def, vec![]),
);
// Spawn the add transactions task
let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new();
tokio::spawn(
(AddTributaryTransactionsTask {
db: db.clone(),
tributary_db,
tributary_db: tributary_db.clone(),
tributary: tributary.clone(),
set: set.set,
key: serai_key,
set: set.clone(),
key: serai_key.clone(),
})
.continually_run(add_tributary_transactions_task_def, vec![]),
);
// Spawn the task to confirm the DKG result
let (confirm_dkg_task_def, confirm_dkg_task) = Task::new();
tokio::spawn(
ConfirmDkgTask::new(db.clone(), set.clone(), tributary_db.clone(), serai_key.clone())
.continually_run(confirm_dkg_task_def, vec![add_tributary_transactions_task]),
);
// Spawn the sign slash report task
let (sign_slash_report_task_def, sign_slash_report_task) = Task::new();
tokio::spawn(
(SignSlashReportTask {
db: db.clone(),
tributary_db,
tributary: tributary.clone(),
set: set.clone(),
key: serai_key,
})
.continually_run(sign_slash_report_task_def, vec![]),
);
// Whenever a new block occurs, immediately run the scan task
// This function also preserves the ProvideCosignCosignedTransactionsTask handle until the
// Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad
@@ -444,10 +591,6 @@ pub(crate) async fn spawn_tributary<P: P2p>(
set.set,
tributary,
scan_tributary_task,
vec![
provide_cosign_cosigned_transactions_task,
sign_slash_report_task,
add_tributary_transactions_task,
],
vec![provide_cosign_cosigned_transactions_task, confirm_dkg_task, sign_slash_report_task],
));
}

View File

@@ -22,6 +22,9 @@ bitvec = { version = "1", default-features = false, features = ["std"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
log = { version = "0.4", default-features = false, features = ["std"] }

View File

@@ -180,7 +180,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
batch = Some(ExecutedBatch {
id: *id,
publisher: *publishing_session,
external_network_block_hash: *external_network_block_hash,
external_network_block_hash: external_network_block_hash.0,
in_instructions_hash: *in_instructions_hash,
in_instruction_results: in_instruction_results
.iter()

View File

@@ -4,7 +4,7 @@ use std::sync::Arc;
use futures::stream::{StreamExt, FuturesOrdered};
use serai_client::{
primitives::{PublicKey, NetworkId, EmbeddedEllipticCurve},
primitives::{NetworkId, SeraiAddress, EmbeddedEllipticCurve},
validator_sets::primitives::MAX_KEY_SHARES_PER_SET,
Serai,
};
@@ -26,14 +26,14 @@ create_db!(
pub struct EphemeralEventStream<D: Db> {
db: D,
serai: Arc<Serai>,
validator: PublicKey,
validator: SeraiAddress,
}
impl<D: Db> EphemeralEventStream<D> {
/// Create a new ephemeral event stream.
///
/// Only one of these may exist over the provided database.
pub fn new(db: D, serai: Arc<Serai>, validator: PublicKey) -> Self {
pub fn new(db: D, serai: Arc<Serai>, validator: SeraiAddress) -> Self {
Self { db, serai, validator }
}
}
@@ -145,6 +145,10 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
"block #{block_number} declared a new set but didn't have the participants"
))?
};
let validators = validators
.into_iter()
.map(|(validator, weight)| (SeraiAddress::from(validator), weight))
.collect::<Vec<_>>();
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
if in_set {
if u16::try_from(validators.len()).is_err() {
@@ -177,14 +181,16 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
embedded_elliptic_curve_keys.push_back(async move {
tokio::try_join!(
// One future to fetch the substrate embedded key
serai
.embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519),
serai.embedded_elliptic_curve_key(
validator.into(),
EmbeddedEllipticCurve::Embedwards25519
),
// One future to fetch the external embedded key, if there is a distinct curve
async {
// `embedded_elliptic_curves` is documented to have the second entry be the
// network-specific curve (if it exists and is distinct from Embedwards25519)
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
serai.embedded_elliptic_curve_key(validator, *curve).await.map(Some)
serai.embedded_elliptic_curve_key(validator.into(), *curve).await.map(Some)
} else {
Ok(None)
}
@@ -215,19 +221,22 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
}
}
crate::NewSet::send(
&mut txn,
&NewSetInformation {
set: *set,
serai_block: block.block_hash,
declaration_time: block.time,
// TODO: Why do we have this as an explicit field here?
// Shouldn't thiis be inlined into the Processor's key gen code, where it's used?
threshold: ((total_weight * 2) / 3) + 1,
validators,
evrf_public_keys,
},
);
let mut new_set = NewSetInformation {
set: *set,
serai_block: block.block_hash,
declaration_time: block.time,
// TODO: Why do we have this as an explicit field here?
// Shouldn't this be inlined into the Processor's key gen code, where it's used?
threshold: ((total_weight * 2) / 3) + 1,
validators,
evrf_public_keys,
participant_indexes: Default::default(),
participant_indexes_reverse_lookup: Default::default(),
};
// These aren't serialized, and we immediately serialize and drop this, so this isn't
// necessary. It's just good practice not have this be dirty
new_set.init_participant_indexes();
crate::NewSet::send(&mut txn, &new_set);
}
}

View File

@@ -2,12 +2,16 @@
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
use std::collections::HashMap;
use scale::{Encode, Decode};
use borsh::{io, BorshSerialize, BorshDeserialize};
use borsh::{BorshSerialize, BorshDeserialize};
use dkg::Participant;
use serai_client::{
primitives::{NetworkId, PublicKey, Signature, SeraiAddress},
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
primitives::{NetworkId, SeraiAddress, Signature},
validator_sets::primitives::{Session, ValidatorSet, KeyPair, SlashReport},
in_instructions::primitives::SignedBatch,
Transaction,
};
@@ -26,22 +30,9 @@ pub use publish_batch::PublishBatchTask;
mod publish_slash_report;
pub use publish_slash_report::PublishSlashReportTask;
fn borsh_serialize_validators<W: io::Write>(
validators: &Vec<(PublicKey, u16)>,
writer: &mut W,
) -> Result<(), io::Error> {
// This doesn't use `encode_to` as `encode_to` panics if the writer returns an error
writer.write_all(&validators.encode())
}
fn borsh_deserialize_validators<R: io::Read>(
reader: &mut R,
) -> Result<Vec<(PublicKey, u16)>, io::Error> {
Decode::decode(&mut scale::IoReader(reader)).map_err(io::Error::other)
}
/// The information for a new set.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
#[borsh(init = init_participant_indexes)]
pub struct NewSetInformation {
/// The set.
pub set: ValidatorSet,
@@ -52,13 +43,37 @@ pub struct NewSetInformation {
/// The threshold to use.
pub threshold: u16,
/// The validators, with the amount of key shares they have.
#[borsh(
serialize_with = "borsh_serialize_validators",
deserialize_with = "borsh_deserialize_validators"
)]
pub validators: Vec<(PublicKey, u16)>,
pub validators: Vec<(SeraiAddress, u16)>,
/// The eVRF public keys.
///
/// This will have the necessary copies of the keys proper for each validator's weight,
/// accordingly syncing up with `participant_indexes`.
pub evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
/// The participant indexes, indexed by their validator.
#[borsh(skip)]
pub participant_indexes: HashMap<SeraiAddress, Vec<Participant>>,
/// The validators, indexed by their participant indexes.
#[borsh(skip)]
pub participant_indexes_reverse_lookup: HashMap<Participant, SeraiAddress>,
}
impl NewSetInformation {
fn init_participant_indexes(&mut self) {
let mut next_i = 1;
self.participant_indexes = HashMap::with_capacity(self.validators.len());
self.participant_indexes_reverse_lookup = HashMap::with_capacity(self.validators.len());
for (validator, weight) in &self.validators {
let mut these_is = Vec::with_capacity((*weight).into());
for _ in 0 .. *weight {
let this_i = Participant::new(next_i).unwrap();
next_i += 1;
these_is.push(this_i);
self.participant_indexes_reverse_lookup.insert(this_i, *validator);
}
self.participant_indexes.insert(*validator, these_is);
}
}
}
mod _public_db {
@@ -175,8 +190,6 @@ impl Keys {
pub struct SignedBatches;
impl SignedBatches {
/// Send a `SignedBatch` to publish onto Serai.
///
/// These will be published sequentially. Out-of-order sending risks hanging the task.
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
_public_db::SignedBatches::send(txn, batch.batch.network, batch);
}
@@ -185,10 +198,6 @@ impl SignedBatches {
}
}
/// The slash report was invalid.
#[derive(Debug)]
pub struct InvalidSlashReport;
/// The slash reports to publish onto Serai.
pub struct SlashReports;
impl SlashReports {
@@ -196,30 +205,25 @@ impl SlashReports {
///
/// This only saves the most recent slashes as only a single session is eligible to have its
/// slashes reported at once.
///
/// Returns Err if the slashes are invalid. Returns Ok if the slashes weren't detected as
/// invalid. Slashes may be considered invalid by the Serai blockchain later even if not detected
/// as invalid here.
pub fn set(
txn: &mut impl DbTxn,
set: ValidatorSet,
slashes: Vec<(SeraiAddress, u32)>,
slash_report: SlashReport,
signature: Signature,
) -> Result<(), InvalidSlashReport> {
) {
// If we have a more recent slash report, don't write this historic one
if let Some((existing_session, _)) = _public_db::SlashReports::get(txn, set.network) {
if existing_session.0 >= set.session.0 {
return Ok(());
return;
}
}
let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
set.network,
slashes.try_into().map_err(|_| InvalidSlashReport)?,
slash_report,
signature,
);
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
Ok(())
}
pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> {
let (session, tx) = _public_db::SlashReports::take(txn, network)?;

View File

@@ -1,14 +1,21 @@
use core::future::Future;
use std::sync::Arc;
use serai_db::{DbTxn, Db};
use serai_client::{primitives::NetworkId, SeraiError, Serai};
#[rustfmt::skip]
use serai_client::{primitives::NetworkId, in_instructions::primitives::SignedBatch, SeraiError, Serai};
use serai_db::{Get, DbTxn, Db, create_db};
use serai_task::ContinuallyRan;
use crate::SignedBatches;
create_db!(
CoordinatorSubstrate {
LastPublishedBatch: (network: NetworkId) -> u32,
BatchesToPublish: (network: NetworkId, batch: u32) -> SignedBatch,
}
);
/// Publish `SignedBatch`s from `SignedBatches` onto Serai.
pub struct PublishBatchTask<D: Db> {
db: D,
@@ -34,32 +41,52 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let mut made_progress = false;
// Read from SignedBatches, which is sequential, into our own mapping
loop {
let mut txn = self.db.txn();
let Some(batch) = SignedBatches::try_recv(&mut txn, self.network) else {
// No batch to publish at this time
break;
};
// Publish this Batch if it hasn't already been published
// If this is a Batch not yet published, save it into our unordered mapping
if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id) {
BatchesToPublish::set(&mut txn, self.network, batch.batch.id, &batch);
}
txn.commit();
}
// Synchronize our last published batch with the Serai network's
let next_to_publish = {
// This uses the latest finalized block, not the latest cosigned block, which should be
// fine as in the worst case, the only impact is no longer attempting TX publication
let serai = self.serai.as_of_latest_finalized_block().await?;
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
if last_batch < Some(batch.batch.id) {
// This stream of Batches *should* be sequential within the larger context of the Serai
// coordinator. In this library, we use a more relaxed definition and don't assert
// sequence. This does risk hanging the task, if Batch #n+1 is sent before Batch #n, but
// that is a documented fault of the `SignedBatches` API.
let mut txn = self.db.txn();
let mut our_last_batch = LastPublishedBatch::get(&txn, self.network);
while our_last_batch < last_batch {
let next_batch = our_last_batch.map(|batch| batch + 1).unwrap_or(0);
// Clean up the Batch to publish since it's already been published
BatchesToPublish::take(&mut txn, self.network, next_batch);
our_last_batch = Some(next_batch);
}
if let Some(last_batch) = our_last_batch {
LastPublishedBatch::set(&mut txn, self.network, &last_batch);
}
last_batch.map(|batch| batch + 1).unwrap_or(0)
};
let made_progress =
if let Some(batch) = BatchesToPublish::get(&self.db, self.network, next_to_publish) {
self
.serai
.publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
.await?;
}
txn.commit();
made_progress = true;
}
true
} else {
false
};
Ok(made_progress)
}
}

View File

@@ -22,66 +22,82 @@ impl<D: Db> PublishSlashReportTask<D> {
}
}
impl<D: Db> PublishSlashReportTask<D> {
// Returns if a slash report was successfully published
async fn publish(&mut self, network: NetworkId) -> Result<bool, String> {
let mut txn = self.db.txn();
let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else {
// No slash report to publish
return Ok(false);
};
// This uses the latest finalized block, not the latest cosigned block, which should be
// fine as in the worst case, the only impact is no longer attempting TX publication
let serai = self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
let serai = serai.validator_sets();
let session_after_slash_report = Session(session.0 + 1);
let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
let current_session = current_session.map(|session| session.0);
// Only attempt to publish the slash report for session #n while session #n+1 is still
// active
let session_after_slash_report_retired = current_session > Some(session_after_slash_report.0);
if session_after_slash_report_retired {
// Commit the txn to drain this slash report from the database and not try it again later
txn.commit();
return Ok(false);
}
if Some(session_after_slash_report.0) != current_session {
// We already checked the current session wasn't greater, and they're not equal
assert!(current_session < Some(session_after_slash_report.0));
// This would mean the Serai node is resyncing and is behind where it prior was
Err("have a slash report for a session Serai has yet to retire".to_string())?;
}
// If this session which should publish a slash report already has, move on
let key_pending_slash_report =
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
if key_pending_slash_report.is_none() {
txn.commit();
return Ok(false);
};
match self.serai.publish(&slash_report).await {
Ok(()) => {
txn.commit();
Ok(true)
}
// This could be specific to this TX (such as an already in mempool error) and it may be
// worthwhile to continue iteration with the other pending slash reports. We assume this
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
// miniscule compared to the window available to publish the slash report. That makes
// this a non-issue.
Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}")),
}
}
}
impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
type Error = String;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let mut made_progress = false;
let mut error = None;
for network in serai_client::primitives::NETWORKS {
if network == NetworkId::Serai {
continue;
};
let mut txn = self.db.txn();
let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else {
// No slash report to publish
continue;
};
let serai =
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
let serai = serai.validator_sets();
let session_after_slash_report = Session(session.0 + 1);
let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
let current_session = current_session.map(|session| session.0);
// Only attempt to publish the slash report for session #n while session #n+1 is still
// active
let session_after_slash_report_retired =
current_session > Some(session_after_slash_report.0);
if session_after_slash_report_retired {
// Commit the txn to drain this slash report from the database and not try it again later
txn.commit();
continue;
}
if Some(session_after_slash_report.0) != current_session {
// We already checked the current session wasn't greater, and they're not equal
assert!(current_session < Some(session_after_slash_report.0));
// This would mean the Serai node is resyncing and is behind where it prior was
Err("have a slash report for a session Serai has yet to retire".to_string())?;
}
// If this session which should publish a slash report already has, move on
let key_pending_slash_report =
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
if key_pending_slash_report.is_none() {
txn.commit();
continue;
};
match self.serai.publish(&slash_report).await {
Ok(()) => {
txn.commit();
made_progress = true;
}
// This could be specific to this TX (such as an already in mempool error) and it may be
// worthwhile to continue iteration with the other pending slash reports. We assume this
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
// miniscule compared to the window available to publish the slash report. That makes
// this a non-issue.
Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}"))?,
}
let network_res = self.publish(network).await;
// We made progress if any network successfully published their slash report
made_progress |= network_res == Ok(true);
// We want to yield the first error *after* attempting for every network
error = error.or(network_res.err());
}
// Yield the error
if let Some(error) = error {
Err(error)?
}
Ok(made_progress)
}

View File

@@ -39,6 +39,8 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
continue;
};
// This uses the latest finalized block, not the latest cosigned block, which should be
// fine as in the worst case, the only impact is no longer attempting TX publication
let serai =
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
let serai = serai.validator_sets();

View File

@@ -21,13 +21,14 @@ workspace = true
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
blake2 = { version = "0.10", default-features = false, features = ["std"] }
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
blake2 = { version = "0.10", default-features = false, features = ["std"] }
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
serai-db = { path = "../../common/db" }

View File

@@ -15,20 +15,35 @@ use crate::transaction::SigningProtocolRound;
/// A topic within the database which the group participates in
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
pub(crate) enum Topic {
pub enum Topic {
/// Vote to remove a participant
RemoveParticipant { participant: SeraiAddress },
RemoveParticipant {
/// The participant to remove
participant: SeraiAddress,
},
// DkgParticipation isn't represented here as participations are immediately sent to the
// processor, not accumulated within this databse
/// Participation in the signing protocol to confirm the DKG results on Substrate
DkgConfirmation { attempt: u32, round: SigningProtocolRound },
DkgConfirmation {
/// The attempt number this is for
attempt: u32,
/// The round of the signing protocol
round: SigningProtocolRound,
},
/// The local view of the SlashReport, to be aggregated into the final SlashReport
SlashReport,
/// Participation in a signing protocol
Sign { id: VariantSignId, attempt: u32, round: SigningProtocolRound },
Sign {
/// The ID of the signing protocol
id: VariantSignId,
/// The attempt number this is for
attempt: u32,
/// The round of the signing protocol
round: SigningProtocolRound,
},
}
enum Participating {
@@ -79,9 +94,9 @@ impl Topic {
}
}
// The SignId for this topic
//
// Returns None if Topic isn't Topic::Sign
/// The SignId for this topic
///
/// Returns None if Topic isn't Topic::Sign
pub(crate) fn sign_id(self, set: ValidatorSet) -> Option<messages::sign::SignId> {
#[allow(clippy::match_same_arms)]
match self {
@@ -92,6 +107,33 @@ impl Topic {
}
}
/// The SignId for this DKG Confirmation.
///
/// This is undefined except for being consistent to the DKG Confirmation signing protocol and
/// unique across sets.
///
/// Returns None if Topic isn't Topic::DkgConfirmation.
pub(crate) fn dkg_confirmation_sign_id(
self,
set: ValidatorSet,
) -> Option<messages::sign::SignId> {
#[allow(clippy::match_same_arms)]
match self {
Topic::RemoveParticipant { .. } => None,
Topic::DkgConfirmation { attempt, round: _ } => Some({
let id = {
let mut id = [0; 32];
let encoded_set = set.encode();
id[.. encoded_set.len()].copy_from_slice(&encoded_set);
VariantSignId::Batch(id)
};
SignId { session: set.session, id, attempt }
}),
Topic::SlashReport { .. } => None,
Topic::Sign { .. } => None,
}
}
/// The topic which precedes this topic as a prerequisite
///
/// The preceding topic must define this topic as succeeding
@@ -138,21 +180,22 @@ impl Topic {
}
}
fn requires_whitelisting(&self) -> bool {
/// If this topic requires recognition before entries are permitted for it.
pub fn requires_recognition(&self) -> bool {
#[allow(clippy::match_same_arms)]
match self {
// We don't require whitelisting to remove a participant
// We don't require recognition to remove a participant
Topic::RemoveParticipant { .. } => false,
// We don't require whitelisting for the first attempt, solely the re-attempts
// We don't require recognition for the first attempt, solely the re-attempts
Topic::DkgConfirmation { attempt, .. } => *attempt != 0,
// We don't require whitelisting for the slash report
// We don't require recognition for the slash report
Topic::SlashReport { .. } => false,
// We do require whitelisting for every sign protocol
// We do require recognition for every sign protocol
Topic::Sign { .. } => true,
}
}
fn required_participation(&self, n: u64) -> u64 {
fn required_participation(&self, n: u16) -> u16 {
let _ = self;
// All of our topics require 2/3rds participation
((2 * n) / 3) + 1
@@ -198,11 +241,11 @@ create_db!(
// If this block has already been cosigned.
Cosigned: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> (),
// The plans to whitelist upon a `Transaction::SubstrateBlock` being included on-chain.
// The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain.
SubstrateBlockPlans: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> Vec<[u8; 32]>,
// The weight accumulated for a topic.
AccumulatedWeight: (set: ValidatorSet, topic: Topic) -> u64,
AccumulatedWeight: (set: ValidatorSet, topic: Topic) -> u16,
// The entries accumulated for a topic, by validator.
Accumulated: <D: Borshy>(set: ValidatorSet, topic: Topic, validator: SeraiAddress) -> D,
@@ -213,7 +256,12 @@ create_db!(
db_channel!(
CoordinatorTributary {
// Messages to send to the processor
ProcessorMessages: (set: ValidatorSet) -> messages::CoordinatorMessage,
// Messages for the DKG confirmation
DkgConfirmationMessages: (set: ValidatorSet) -> messages::sign::CoordinatorMessage,
// Topics which have been explicitly recognized
RecognizedTopics: (set: ValidatorSet) -> Topic,
}
);
@@ -262,7 +310,7 @@ impl TributaryDb {
);
ActivelyCosigning::set(txn, set, &substrate_block_hash);
TributaryDb::recognize_topic(
Self::recognize_topic(
txn,
set,
Topic::Sign {
@@ -292,6 +340,10 @@ impl TributaryDb {
pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ValidatorSet, topic: Topic) {
AccumulatedWeight::set(txn, set, topic, &0);
RecognizedTopics::send(txn, set, &topic);
}
pub(crate) fn recognized(getter: &impl Get, set: ValidatorSet, topic: Topic) -> bool {
AccumulatedWeight::get(getter, set, topic).is_some()
}
pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ValidatorSet, block_number: u64) {
@@ -312,6 +364,12 @@ impl TributaryDb {
Self::recognize_topic(txn, set, topic);
if let Some(id) = topic.sign_id(set) {
Self::send_message(txn, set, messages::sign::CoordinatorMessage::Reattempt { id });
} else if let Some(id) = topic.dkg_confirmation_sign_id(set) {
DkgConfirmationMessages::send(
txn,
set,
&messages::sign::CoordinatorMessage::Reattempt { id },
);
}
}
}
@@ -339,19 +397,24 @@ impl TributaryDb {
txn: &mut impl DbTxn,
set: ValidatorSet,
validators: &[SeraiAddress],
total_weight: u64,
total_weight: u16,
block_number: u64,
topic: Topic,
validator: SeraiAddress,
validator_weight: u64,
validator_weight: u16,
data: &D,
) -> DataSet<D> {
// This function will only be called once for a (validator, topic) tuple due to how we handle
// nonces on transactions (deterministically to the topic)
let accumulated_weight = AccumulatedWeight::get(txn, set, topic);
if topic.requires_whitelisting() && accumulated_weight.is_none() {
Self::fatal_slash(txn, set, validator, "participated in unrecognized topic");
if topic.requires_recognition() && accumulated_weight.is_none() {
Self::fatal_slash(
txn,
set,
validator,
"participated in unrecognized topic which requires recognition",
);
return DataSet::None;
}
let mut accumulated_weight = accumulated_weight.unwrap_or(0);

View File

@@ -6,6 +6,7 @@ use core::{marker::PhantomData, future::Future};
use std::collections::HashMap;
use ciphersuite::group::GroupEncoding;
use dkg::Participant;
use serai_client::{
primitives::SeraiAddress,
@@ -27,13 +28,14 @@ use tributary_sdk::{
use serai_cosign::CosignIntent;
use serai_coordinator_substrate::NewSetInformation;
use messages::sign::VariantSignId;
use messages::sign::{VariantSignId, SignId};
mod transaction;
pub use transaction::{SigningProtocolRound, Signed, Transaction};
mod db;
use db::*;
pub use db::Topic;
/// Messages to send to the Processors.
pub struct ProcessorMessages;
@@ -44,6 +46,24 @@ impl ProcessorMessages {
}
}
/// Messages for the DKG confirmation.
pub struct DkgConfirmationMessages;
impl DkgConfirmationMessages {
/// Receive a message for the DKG confirmation.
///
/// These messages use the ProcessorMessage API as that's what existing flows are designed
/// around, enabling their reuse. The ProcessorMessage includes a VariantSignId which isn't
/// applicable to the DKG confirmation (as there's no such variant of the VariantSignId). The
/// actual ID is undefined other than it will be consistent to the signing protocol and unique
/// across validator sets, with no guarantees of uniqueness across contexts.
pub fn try_recv(
txn: &mut impl DbTxn,
set: ValidatorSet,
) -> Option<messages::sign::CoordinatorMessage> {
db::DkgConfirmationMessages::try_recv(txn, set)
}
}
/// The cosign intents.
pub struct CosignIntents;
impl CosignIntents {
@@ -62,10 +82,28 @@ impl CosignIntents {
}
}
/// The plans to whitelist upon a `Transaction::SubstrateBlock` being included on-chain.
/// An interface to the topics recognized on this Tributary.
pub struct RecognizedTopics;
impl RecognizedTopics {
/// If this topic has been recognized by this Tributary.
///
/// This will either be by explicit recognition or participation.
pub fn recognized(getter: &impl Get, set: ValidatorSet, topic: Topic) -> bool {
TributaryDb::recognized(getter, set, topic)
}
/// The next topic requiring recognition which has been recognized by this Tributary.
pub fn try_recv_topic_requiring_recognition(
txn: &mut impl DbTxn,
set: ValidatorSet,
) -> Option<Topic> {
db::RecognizedTopics::try_recv(txn, set)
}
}
/// The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain.
pub struct SubstrateBlockPlans;
impl SubstrateBlockPlans {
/// Set the plans to whitelist upon the associated `Transaction::SubstrateBlock` being included
/// Set the plans to recognize upon the associated `Transaction::SubstrateBlock` being included
/// on-chain.
///
/// This must be done before the associated `Transaction::Cosign` is provided.
@@ -75,7 +113,7 @@ impl SubstrateBlockPlans {
substrate_block_hash: [u8; 32],
plans: &Vec<[u8; 32]>,
) {
db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, &plans);
db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, plans);
}
fn take(
txn: &mut impl DbTxn,
@@ -90,32 +128,32 @@ struct ScanBlock<'a, TD: Db, TDT: DbTxn, P: P2p> {
_td: PhantomData<TD>,
_p2p: PhantomData<P>,
tributary_txn: &'a mut TDT,
set: ValidatorSet,
set: &'a NewSetInformation,
validators: &'a [SeraiAddress],
total_weight: u64,
validator_weights: &'a HashMap<SeraiAddress, u64>,
total_weight: u16,
validator_weights: &'a HashMap<SeraiAddress, u16>,
}
impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
fn potentially_start_cosign(&mut self) {
// Don't start a new cosigning instance if we're actively running one
if TributaryDb::actively_cosigning(self.tributary_txn, self.set).is_some() {
if TributaryDb::actively_cosigning(self.tributary_txn, self.set.set).is_some() {
return;
}
// Fetch the latest intended-to-be-cosigned block
let Some(latest_substrate_block_to_cosign) =
TributaryDb::latest_substrate_block_to_cosign(self.tributary_txn, self.set)
TributaryDb::latest_substrate_block_to_cosign(self.tributary_txn, self.set.set)
else {
return;
};
// If it was already cosigned, return
if TributaryDb::cosigned(self.tributary_txn, self.set, latest_substrate_block_to_cosign) {
if TributaryDb::cosigned(self.tributary_txn, self.set.set, latest_substrate_block_to_cosign) {
return;
}
let intent =
CosignIntents::take(self.tributary_txn, self.set, latest_substrate_block_to_cosign)
CosignIntents::take(self.tributary_txn, self.set.set, latest_substrate_block_to_cosign)
.expect("Transaction::Cosign locally provided but CosignIntents wasn't populated");
assert_eq!(
intent.block_hash, latest_substrate_block_to_cosign,
@@ -125,20 +163,71 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
// Mark us as actively cosigning
TributaryDb::start_cosigning(
self.tributary_txn,
self.set,
self.set.set,
latest_substrate_block_to_cosign,
intent.block_number,
);
// Send the message for the processor to start signing
TributaryDb::send_message(
self.tributary_txn,
self.set,
self.set.set,
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
session: self.set.session,
intent,
session: self.set.set.session,
cosign: intent.into_cosign(self.set.set.network),
},
);
}
fn accumulate_dkg_confirmation<D: AsRef<[u8]> + Borshy>(
&mut self,
block_number: u64,
topic: Topic,
data: &D,
signer: SeraiAddress,
) -> Option<(SignId, HashMap<Participant, Vec<u8>>)> {
match TributaryDb::accumulate::<D>(
self.tributary_txn,
self.set.set,
self.validators,
self.total_weight,
block_number,
topic,
signer,
self.validator_weights[&signer],
data,
) {
DataSet::None => None,
DataSet::Participating(data_set) => {
let id = topic.dkg_confirmation_sign_id(self.set.set).unwrap();
// This will be used in a MuSig protocol, so the Participant indexes are the validator's
// position in the list regardless of their weight
let flatten_data_set = |data_set: HashMap<_, D>| {
let mut entries = HashMap::with_capacity(usize::from(self.total_weight));
for (validator, participation) in data_set {
let (index, (_validator, _weight)) = &self
.set
.validators
.iter()
.enumerate()
.find(|(_i, (validator_i, _weight))| validator == *validator_i)
.unwrap();
// The index is zero-indexed yet participants are one-indexed
let index = index + 1;
entries.insert(
Participant::new(u16::try_from(index).unwrap()).unwrap(),
participation.as_ref().to_vec(),
);
}
entries
};
let data_set = flatten_data_set(data_set);
Some((id, data_set))
}
}
}
fn handle_application_tx(&mut self, block_number: u64, tx: Transaction) {
let signer = |signed: Signed| SeraiAddress(signed.signer().to_bytes());
@@ -147,13 +236,14 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
// TODO: The fact they can publish these TXs makes this a notable spam vector
if TributaryDb::is_fatally_slashed(
self.tributary_txn,
self.set,
self.set.set,
SeraiAddress(signer.to_bytes()),
) {
return;
}
}
let topic = tx.topic();
match tx {
// Accumulate this vote and fatally slash the participant if past the threshold
Transaction::RemoveParticipant { participant, signed } => {
@@ -163,7 +253,7 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
if !self.validators.iter().any(|validator| *validator == participant) {
TributaryDb::fatal_slash(
self.tributary_txn,
self.set,
self.set.set,
signer,
"voted to remove non-existent participant",
);
@@ -172,18 +262,23 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
match TributaryDb::accumulate(
self.tributary_txn,
self.set,
self.set.set,
self.validators,
self.total_weight,
block_number,
Topic::RemoveParticipant { participant },
topic.unwrap(),
signer,
self.validator_weights[&signer],
&(),
) {
DataSet::None => {}
DataSet::Participating(_) => {
TributaryDb::fatal_slash(self.tributary_txn, self.set, participant, "voted to remove");
TributaryDb::fatal_slash(
self.tributary_txn,
self.set.set,
participant,
"voted to remove",
);
}
};
}
@@ -192,28 +287,52 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
Transaction::DkgParticipation { participation, signed } => {
TributaryDb::send_message(
self.tributary_txn,
self.set,
self.set.set,
messages::key_gen::CoordinatorMessage::Participation {
session: self.set.session,
participant: todo!("TODO"),
session: self.set.set.session,
participant: self.set.participant_indexes[&signer(signed)][0],
participation,
},
);
}
Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed } => {
// Accumulate the preprocesses into our own FROST attempt manager
todo!("TODO")
Transaction::DkgConfirmationPreprocess { attempt: _, preprocess, signed } => {
let topic = topic.unwrap();
let signer = signer(signed);
let Some((id, data_set)) =
self.accumulate_dkg_confirmation(block_number, topic, &preprocess, signer)
else {
return;
};
db::DkgConfirmationMessages::send(
self.tributary_txn,
self.set.set,
&messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set },
);
}
Transaction::DkgConfirmationShare { attempt, share, signed } => {
// Accumulate the shares into our own FROST attempt manager
todo!("TODO: SetKeysTask")
Transaction::DkgConfirmationShare { attempt: _, share, signed } => {
let topic = topic.unwrap();
let signer = signer(signed);
let Some((id, data_set)) =
self.accumulate_dkg_confirmation(block_number, topic, &share, signer)
else {
return;
};
db::DkgConfirmationMessages::send(
self.tributary_txn,
self.set.set,
&messages::sign::CoordinatorMessage::Shares { id, shares: data_set },
);
}
Transaction::Cosign { substrate_block_hash } => {
// Update the latest intended-to-be-cosigned Substrate block
TributaryDb::set_latest_substrate_block_to_cosign(
self.tributary_txn,
self.set,
self.set.set,
substrate_block_hash,
);
// Start a new cosign if we aren't already working on one
@@ -226,32 +345,32 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
not-yet-Cosigned cosigns, we flag all cosigned blocks as cosigned. Then, when we choose
the next block to work on, we won't if it's already been cosigned.
*/
TributaryDb::mark_cosigned(self.tributary_txn, self.set, substrate_block_hash);
TributaryDb::mark_cosigned(self.tributary_txn, self.set.set, substrate_block_hash);
// If we aren't actively cosigning this block, return
// This occurs when we have Cosign TXs A, B, C, we received Cosigned for A and start on C,
// and then receive Cosigned for B
if TributaryDb::actively_cosigning(self.tributary_txn, self.set) !=
if TributaryDb::actively_cosigning(self.tributary_txn, self.set.set) !=
Some(substrate_block_hash)
{
return;
}
// Since this is the block we were cosigning, mark us as having finished cosigning
TributaryDb::finish_cosigning(self.tributary_txn, self.set);
TributaryDb::finish_cosigning(self.tributary_txn, self.set.set);
// Start working on the next cosign
self.potentially_start_cosign();
}
Transaction::SubstrateBlock { hash } => {
// Whitelist all of the IDs this Substrate block causes to be signed
let plans = SubstrateBlockPlans::take(self.tributary_txn, self.set, hash).expect(
// Recognize all of the IDs this Substrate block causes to be signed
let plans = SubstrateBlockPlans::take(self.tributary_txn, self.set.set, hash).expect(
"Transaction::SubstrateBlock locally provided but SubstrateBlockPlans wasn't populated",
);
for plan in plans {
TributaryDb::recognize_topic(
self.tributary_txn,
self.set,
self.set.set,
Topic::Sign {
id: VariantSignId::Transaction(plan),
attempt: 0,
@@ -261,10 +380,10 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
}
}
Transaction::Batch { hash } => {
// Whitelist the signing of this batch
// Recognize the signing of this batch
TributaryDb::recognize_topic(
self.tributary_txn,
self.set,
self.set.set,
Topic::Sign {
id: VariantSignId::Batch(hash),
attempt: 0,
@@ -279,7 +398,7 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
if slash_points.len() != self.validators.len() {
TributaryDb::fatal_slash(
self.tributary_txn,
self.set,
self.set.set,
signer,
"slash report was for a distinct amount of signers",
);
@@ -289,11 +408,11 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
// Accumulate, and if past the threshold, calculate *the* slash report and start signing it
match TributaryDb::accumulate(
self.tributary_txn,
self.set,
self.set.set,
self.validators,
self.total_weight,
block_number,
Topic::SlashReport,
topic.unwrap(),
signer,
self.validator_weights[&signer],
&slash_points,
@@ -307,10 +426,6 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
have a supermajority agree the slash should be fatal. If there isn't a supermajority,
but the median believe the slash should be fatal, we need to fallback to a large
constant.
Also, TODO, each slash point should probably be considered as
`MAX_KEY_SHARES_PER_SET * BLOCK_TIME` seconds of downtime. As this time crosses
various thresholds (1 day, 3 days, etc), a multiplier should be attached.
*/
let mut median_slash_report = Vec::with_capacity(self.validators.len());
for i in 0 .. self.validators.len() {
@@ -351,7 +466,7 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
// Create the resulting slash report
let mut slash_report = vec![];
for (validator, points) in self.validators.iter().copied().zip(amortized_slash_report) {
for points in amortized_slash_report {
// TODO: Natively store this as a `Slash`
if points == u32::MAX {
slash_report.push(Slash::Fatal);
@@ -364,7 +479,7 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
// Recognize the topic for signing the slash report
TributaryDb::recognize_topic(
self.tributary_txn,
self.set,
self.set.set,
Topic::Sign {
id: VariantSignId::SlashReport,
attempt: 0,
@@ -374,24 +489,24 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
// Send the message for the processor to start signing
TributaryDb::send_message(
self.tributary_txn,
self.set,
self.set.set,
messages::coordinator::CoordinatorMessage::SignSlashReport {
session: self.set.session,
report: slash_report,
session: self.set.set.session,
slash_report: slash_report.try_into().unwrap(),
},
);
}
};
}
Transaction::Sign { id, attempt, round, data, signed } => {
let topic = Topic::Sign { id, attempt, round };
Transaction::Sign { id: _, attempt: _, round, data, signed } => {
let topic = topic.unwrap();
let signer = signer(signed);
if u64::try_from(data.len()).unwrap() != self.validator_weights[&signer] {
if data.len() != usize::from(self.validator_weights[&signer]) {
TributaryDb::fatal_slash(
self.tributary_txn,
self.set,
self.set.set,
signer,
"signer signed with a distinct amount of key shares than they had key shares",
);
@@ -400,7 +515,7 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
match TributaryDb::accumulate(
self.tributary_txn,
self.set,
self.set.set,
self.validators,
self.total_weight,
block_number,
@@ -411,12 +526,22 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
) {
DataSet::None => {}
DataSet::Participating(data_set) => {
let id = topic.sign_id(self.set).expect("Topic::Sign didn't have SignId");
let flatten_data_set = |data_set| todo!("TODO");
let id = topic.sign_id(self.set.set).expect("Topic::Sign didn't have SignId");
let flatten_data_set = |data_set: HashMap<_, Vec<_>>| {
let mut entries = HashMap::with_capacity(usize::from(self.total_weight));
for (validator, shares) in data_set {
let indexes = &self.set.participant_indexes[&validator];
assert_eq!(indexes.len(), shares.len());
for (index, share) in indexes.iter().zip(shares) {
entries.insert(*index, share);
}
}
entries
};
let data_set = flatten_data_set(data_set);
TributaryDb::send_message(
self.tributary_txn,
self.set,
self.set.set,
match round {
SigningProtocolRound::Preprocess => {
messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set }
@@ -427,13 +552,13 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
},
)
}
};
}
}
}
}
fn handle_block(mut self, block_number: u64, block: Block<Transaction>) {
TributaryDb::start_of_block(self.tributary_txn, self.set, block_number);
TributaryDb::start_of_block(self.tributary_txn, self.set.set, block_number);
for tx in block.transactions {
match tx {
@@ -460,7 +585,7 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
// errors, mark the node as fatally slashed
TributaryDb::fatal_slash(
self.tributary_txn,
self.set,
self.set.set,
SeraiAddress(msgs.0.msg.sender),
&format!("invalid tendermint messages: {msgs:?}"),
);
@@ -476,10 +601,10 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
/// The task to scan the Tributary, populating `ProcessorMessages`.
pub struct ScanTributaryTask<TD: Db, P: P2p> {
tributary_db: TD,
set: ValidatorSet,
set: NewSetInformation,
validators: Vec<SeraiAddress>,
total_weight: u64,
validator_weights: HashMap<SeraiAddress, u64>,
total_weight: u16,
validator_weights: HashMap<SeraiAddress, u16>,
tributary: TributaryReader<TD, Transaction>,
_p2p: PhantomData<P>,
}
@@ -488,15 +613,13 @@ impl<TD: Db, P: P2p> ScanTributaryTask<TD, P> {
/// Create a new instance of this task.
pub fn new(
tributary_db: TD,
new_set: &NewSetInformation,
set: NewSetInformation,
tributary: TributaryReader<TD, Transaction>,
) -> Self {
let mut validators = Vec::with_capacity(new_set.validators.len());
let mut validators = Vec::with_capacity(set.validators.len());
let mut total_weight = 0;
let mut validator_weights = HashMap::with_capacity(new_set.validators.len());
for (validator, weight) in new_set.validators.iter().copied() {
let validator = SeraiAddress::from(validator);
let weight = u64::from(weight);
let mut validator_weights = HashMap::with_capacity(set.validators.len());
for (validator, weight) in set.validators.iter().copied() {
validators.push(validator);
total_weight += weight;
validator_weights.insert(validator, weight);
@@ -504,7 +627,7 @@ impl<TD: Db, P: P2p> ScanTributaryTask<TD, P> {
ScanTributaryTask {
tributary_db,
set: new_set.set,
set,
validators,
total_weight,
validator_weights,
@@ -520,7 +643,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let (mut last_block_number, mut last_block_hash) =
TributaryDb::last_handled_tributary_block(&self.tributary_db, self.set)
TributaryDb::last_handled_tributary_block(&self.tributary_db, self.set.set)
.unwrap_or((0, self.tributary.genesis()));
let mut made_progress = false;
@@ -539,7 +662,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
if !self.tributary.locally_provided_txs_in_block(&block_hash, order) {
return Err(format!(
"didn't have the provided Transactions on-chain for set (ephemeral error): {:?}",
self.set
self.set.set
));
}
}
@@ -549,7 +672,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
_td: PhantomData::<TD>,
_p2p: PhantomData::<P>,
tributary_txn: &mut tributary_txn,
set: self.set,
set: &self.set,
validators: &self.validators,
total_weight: self.total_weight,
validator_weights: &self.validator_weights,
@@ -557,7 +680,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
.handle_block(block_number, block);
TributaryDb::set_last_handled_tributary_block(
&mut tributary_txn,
self.set,
self.set.set,
block_number,
block_hash,
);
@@ -577,7 +700,6 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
pub fn slash_report_transaction(getter: &impl Get, set: &NewSetInformation) -> Transaction {
let mut slash_points = Vec::with_capacity(set.validators.len());
for (validator, _weight) in set.validators.iter().copied() {
let validator = SeraiAddress::from(validator);
slash_points.push(SlashPoints::get(getter, set.set, validator).unwrap_or(0));
}
Transaction::SlashReport { slash_points, signed: Signed::default() }

View File

@@ -25,6 +25,8 @@ use tributary_sdk::{
},
};
use crate::db::Topic;
/// The round this data is for, within a signing protocol.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
pub enum SigningProtocolRound {
@@ -180,7 +182,7 @@ pub enum Transaction {
///
/// This is provided after the block has been cosigned.
///
/// With the acknowledgement of a Substrate block, we can whitelist all the `VariantSignId`s
/// With the acknowledgement of a Substrate block, we can recognize all the `VariantSignId`s
/// resulting from its handling.
SubstrateBlock {
/// The hash of the Substrate block
@@ -257,9 +259,7 @@ impl TransactionTrait for Transaction {
Transaction::Cosign { .. } => TransactionKind::Provided("Cosign"),
Transaction::Cosigned { .. } => TransactionKind::Provided("Cosigned"),
// TODO: Provide this
Transaction::SubstrateBlock { .. } => TransactionKind::Provided("SubstrateBlock"),
// TODO: Provide this
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed(
@@ -318,6 +318,36 @@ impl TransactionTrait for Transaction {
}
impl Transaction {
/// The topic in the database for this transaction.
pub fn topic(&self) -> Option<Topic> {
#[allow(clippy::match_same_arms)] // This doesn't make semantic sense here
match self {
Transaction::RemoveParticipant { participant, .. } => {
Some(Topic::RemoveParticipant { participant: *participant })
}
Transaction::DkgParticipation { .. } => None,
Transaction::DkgConfirmationPreprocess { attempt, .. } => {
Some(Topic::DkgConfirmation { attempt: *attempt, round: SigningProtocolRound::Preprocess })
}
Transaction::DkgConfirmationShare { attempt, .. } => {
Some(Topic::DkgConfirmation { attempt: *attempt, round: SigningProtocolRound::Share })
}
// Provided TXs
Transaction::Cosign { .. } |
Transaction::Cosigned { .. } |
Transaction::SubstrateBlock { .. } |
Transaction::Batch { .. } => None,
Transaction::Sign { id, attempt, round, .. } => {
Some(Topic::Sign { id: *id, attempt: *attempt, round: *round })
}
Transaction::SlashReport { .. } => Some(Topic::SlashReport),
}
}
/// Sign a transaction.
///
/// Panics if signing a transaction whose type isn't `TransactionKind::Signed`.
@@ -335,10 +365,12 @@ impl Transaction {
Transaction::DkgConfirmationPreprocess { ref mut signed, .. } => signed,
Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
Transaction::Cosign { .. } => panic!("signing CosignSubstrateBlock"),
Transaction::Cosigned { .. } => panic!("signing Cosigned"),
Transaction::SubstrateBlock { .. } => panic!("signing SubstrateBlock"),
Transaction::Batch { .. } => panic!("signing Batch"),
Transaction::Cosign { .. } => panic!("signing Cosign transaction (provided)"),
Transaction::Cosigned { .. } => panic!("signing Cosigned transaction (provided)"),
Transaction::SubstrateBlock { .. } => {
panic!("signing SubstrateBlock transaction (provided)")
}
Transaction::Batch { .. } => panic!("signing Batch transaction (provided)"),
Transaction::Sign { ref mut signed, .. } => signed,

View File

@@ -92,7 +92,7 @@ impl Neg for FieldElement {
}
}
impl<'a> Neg for &'a FieldElement {
impl Neg for &FieldElement {
type Output = FieldElement;
fn neg(self) -> Self::Output {
(*self).neg()

View File

@@ -37,11 +37,11 @@ pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F {
// Get a wide amount of bytes to safely reduce without bias
// In most cases, <=1.5x bytes is enough. 2x is still standard and there's some theoretical
// groups which may technically require more than 1.5x bytes for this to work as intended
let target_bytes = ((usize::try_from(F::NUM_BITS).unwrap() + 7) / 8) * 2;
let target_bytes = usize::try_from(F::NUM_BITS).unwrap().div_ceil(8) * 2;
let mut challenge_bytes = transcript.challenge(b"challenge");
let challenge_bytes_len = challenge_bytes.as_ref().len();
// If the challenge is 32 bytes, and we need 64, we need two challenges
let needed_challenges = (target_bytes + (challenge_bytes_len - 1)) / challenge_bytes_len;
let needed_challenges = target_bytes.div_ceil(challenge_bytes_len);
// The following algorithm should be equivalent to a wide reduction of the challenges,
// interpreted as concatenated, big-endian byte string

View File

@@ -33,7 +33,7 @@ pub struct ArithmeticCircuitStatement<'a, C: Ciphersuite> {
V: PointVector<C>,
}
impl<'a, C: Ciphersuite> Zeroize for ArithmeticCircuitStatement<'a, C> {
impl<C: Ciphersuite> Zeroize for ArithmeticCircuitStatement<'_, C> {
fn zeroize(&mut self) {
self.constraints.zeroize();
self.C.zeroize();

View File

@@ -247,7 +247,7 @@ impl<C: Ciphersuite> Generators<C> {
}
}
impl<'a, C: Ciphersuite> ProofGenerators<'a, C> {
impl<C: Ciphersuite> ProofGenerators<'_, C> {
pub(crate) fn len(&self) -> usize {
self.g_bold.len()
}

View File

@@ -203,14 +203,15 @@ pub trait SignMachine<S>: Send + Sync + Sized {
/// SignatureMachine this SignMachine turns into.
type SignatureMachine: SignatureMachine<S, SignatureShare = Self::SignatureShare>;
/// Cache this preprocess for usage later. This cached preprocess MUST only be used once. Reuse
/// of it enables recovery of your private key share. Third-party recovery of a cached preprocess
/// also enables recovery of your private key share, so this MUST be treated with the same
/// security as your private key share.
/// Cache this preprocess for usage later.
///
/// This cached preprocess MUST only be used once. Reuse of it enables recovery of your private
/// key share. Third-party recovery of a cached preprocess also enables recovery of your private
/// key share, so this MUST be treated with the same security as your private key share.
fn cache(self) -> CachedPreprocess;
/// Create a sign machine from a cached preprocess.
///
/// After this, the preprocess must be deleted so it's never reused. Any reuse will presumably
/// cause the signer to leak their secret share.
fn from_cache(
@@ -219,11 +220,14 @@ pub trait SignMachine<S>: Send + Sync + Sized {
cache: CachedPreprocess,
) -> (Self, Self::Preprocess);
/// Read a Preprocess message. Despite taking self, this does not save the preprocess.
/// It must be externally cached and passed into sign.
/// Read a Preprocess message.
///
/// Despite taking self, this does not save the preprocess. It must be externally cached and
/// passed into sign.
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess>;
/// Sign a message.
///
/// Takes in the participants' preprocess messages. Returns the signature share to be broadcast
/// to all participants, over an authenticated channel. The parties who participate here will
/// become the signing set for this session.

View File

@@ -59,7 +59,7 @@ pub(crate) fn prep_bits<G: Group<Scalar: PrimeFieldBits>>(
for pair in pairs {
let p = groupings.len();
let mut bits = pair.0.to_le_bits();
groupings.push(vec![0; (bits.len() + (w_usize - 1)) / w_usize]);
groupings.push(vec![0; bits.len().div_ceil(w_usize)]);
for (i, mut bit) in bits.iter_mut().enumerate() {
let mut bit = u8_from_bool(&mut bit);

View File

@@ -16,6 +16,8 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
const-hex = { version = "1", default-features = false, features = ["std", "core-error"] }
subtle = { version = "2", default-features = false, features = ["std"] }
sha3 = { version = "0.10", default-features = false, features = ["std"] }
group = { version = "0.13", default-features = false, features = ["alloc"] }

View File

@@ -2,4 +2,5 @@
An Ethereum contract to verify Schnorr signatures.
This crate will fail to build if `solc` is not installed and available.
This crate will fail to build if the expected version of `solc` is not
installed and available.

View File

@@ -4,8 +4,16 @@
#![allow(non_snake_case)]
/// The initialization bytecode of the Schnorr library.
pub const INIT_BYTECODE: &str =
include_str!(concat!(env!("OUT_DIR"), "/ethereum-schnorr-contract/Schnorr.bin"));
pub const BYTECODE: &[u8] = {
const BYTECODE_HEX: &[u8] =
include_bytes!(concat!(env!("OUT_DIR"), "/ethereum-schnorr-contract/Schnorr.bin"));
const BYTECODE: [u8; BYTECODE_HEX.len() / 2] =
match const_hex::const_decode_to_array::<{ BYTECODE_HEX.len() / 2 }>(BYTECODE_HEX) {
Ok(bytecode) => bytecode,
Err(_) => panic!("Schnorr.bin did not contain valid hex"),
};
&BYTECODE
};
mod public_key;
pub use public_key::PublicKey;

View File

@@ -18,14 +18,10 @@ use crate::{Signature, tests::test_key};
fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
let sig = ecdsa::Signature::from_scalars(r, s).ok()?;
let message: [u8; 32] = message.to_repr().into();
alloy_core::primitives::Signature::from_signature_and_parity(
sig,
alloy_core::primitives::Parity::Parity(odd_y),
)
.ok()?
.recover_address_from_prehash(&alloy_core::primitives::B256::from(message))
.ok()
.map(Into::into)
alloy_core::primitives::PrimitiveSignature::from_signature_and_parity(sig, odd_y)
.recover_address_from_prehash(&alloy_core::primitives::B256::from(message))
.ok()
.map(Into::into)
}
// Test ecrecover behaves as expected

View File

@@ -56,7 +56,7 @@ impl AggregateRangeWitness {
}
}
impl<'a> AggregateRangeStatement<'a> {
impl AggregateRangeStatement<'_> {
fn initial_transcript(&self) -> (Scalar, Vec<EdwardsPoint>) {
let V = self.commitments.iter().map(|c| c * INV_EIGHT()).collect::<Vec<_>>();
(keccak256_to_scalar(V.iter().flat_map(|V| V.compress().to_bytes()).collect::<Vec<_>>()), V)

View File

@@ -9,6 +9,7 @@ use crate::{
// https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/
// tests/unit_tests/test_tx_utils.cpp
// which is licensed
#[allow(clippy::empty_line_after_outer_attr)] // rustfmt is for the comment, not for the const
#[rustfmt::skip]
/*
Copyright (c) 2014-2022, The Monero Project

View File

@@ -1,26 +1,3 @@
use messages::{
coordinator::{
SubstrateSignableId, PlanMeta, CoordinatorMessage as CoordinatorCoordinatorMessage,
},
CoordinatorMessage,
};
use serai_env as env;
use message_queue::{Service, client::MessageQueue};
mod db;
pub use db::*;
mod coordinator;
pub use coordinator::*;
mod multisigs;
use multisigs::{MultisigEvent, MultisigManager};
#[cfg(test)]
mod tests;
async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
txn: &mut D::Transaction<'_>,
network: &N,

View File

@@ -28,6 +28,7 @@ ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, fea
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] }
serai-client = { path = "../../substrate/client", default-features = false }
serai-cosign = { path = "../../coordinator/cosign" }
log = { version = "0.4", default-features = false, features = ["std"] }
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }

View File

@@ -3,12 +3,14 @@ use std::sync::{LazyLock, Arc, Mutex};
use tokio::sync::mpsc;
use scale::Encode;
use serai_client::{
primitives::Signature, validator_sets::primitives::Session,
primitives::Signature,
validator_sets::primitives::{Session, SlashReport},
in_instructions::primitives::SignedBatch,
};
use serai_cosign::SignedCosign;
use serai_db::{Get, DbTxn, Db, create_db, db_channel};
use scanner::ScannerFeed;
@@ -181,17 +183,11 @@ impl signers::Coordinator for CoordinatorSend {
fn publish_cosign(
&mut self,
block_number: u64,
block: [u8; 32],
signature: Signature,
cosign: SignedCosign,
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
async move {
self.send(&messages::ProcessorMessage::Coordinator(
messages::coordinator::ProcessorMessage::CosignedBlock {
block_number,
block,
signature: signature.encode(),
},
messages::coordinator::ProcessorMessage::CosignedBlock { cosign },
));
Ok(())
}
@@ -212,13 +208,15 @@ impl signers::Coordinator for CoordinatorSend {
fn publish_slash_report_signature(
&mut self,
session: Session,
slash_report: SlashReport,
signature: Signature,
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
async move {
self.send(&messages::ProcessorMessage::Coordinator(
messages::coordinator::ProcessorMessage::SignedSlashReport {
session,
signature: signature.encode(),
slash_report,
signature: signature.0,
},
));
Ok(())

View File

@@ -221,20 +221,16 @@ pub async fn main_loop<
signers.queue_message(txn, &msg)
}
messages::CoordinatorMessage::Coordinator(
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
session,
block_number,
block,
},
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { session, cosign },
) => {
let txn = txn.take().unwrap();
signers.cosign_block(txn, session, block_number, block)
signers.cosign_block(txn, session, &cosign)
}
messages::CoordinatorMessage::Coordinator(
messages::coordinator::CoordinatorMessage::SignSlashReport { session, report },
messages::coordinator::CoordinatorMessage::SignSlashReport { session, slash_report },
) => {
let txn = txn.take().unwrap();
signers.sign_slash_report(txn, session, &report)
signers.sign_slash_report(txn, session, &slash_report)
}
messages::CoordinatorMessage::Substrate(msg) => match msg {

View File

@@ -26,7 +26,7 @@ TODO
};
tx.gas_limit = 1_000_000u64.into();
tx.gas_price = 1_000_000_000u64.into();
let tx = ethereum_serai::crypto::deterministically_sign(&tx);
let tx = ethereum_serai::crypto::deterministically_sign(tx);
if self.provider.get_transaction_by_hash(*tx.hash()).await.unwrap().is_none() {
self

View File

@@ -43,89 +43,3 @@ pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey)
(keys, public_key)
}
// TODO: Use a proper error here
pub async fn send(
provider: &RootProvider<SimpleRequest>,
wallet: &k256::ecdsa::SigningKey,
mut tx: TxLegacy,
) -> Option<TransactionReceipt> {
let verifying_key = *wallet.verifying_key().as_affine();
let address = Address::from(address(&verifying_key.into()));
// https://github.com/alloy-rs/alloy/issues/539
// let chain_id = provider.get_chain_id().await.unwrap();
// tx.chain_id = Some(chain_id);
tx.chain_id = None;
tx.nonce = provider.get_transaction_count(address).await.unwrap();
// 100 gwei
tx.gas_price = 100_000_000_000u128;
let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap();
assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap());
assert!(
provider.get_balance(address).await.unwrap() >
((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value)
);
let mut bytes = vec![];
tx.encode_with_signature_fields(&Signature::from(sig), &mut bytes);
let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?;
pending_tx.get_receipt().await.ok()
}
pub async fn fund_account(
provider: &RootProvider<SimpleRequest>,
wallet: &k256::ecdsa::SigningKey,
to_fund: Address,
value: U256,
) -> Option<()> {
let funding_tx =
TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() };
assert!(send(provider, wallet, funding_tx).await.unwrap().status());
Some(())
}
// TODO: Use a proper error here
pub async fn deploy_contract(
client: Arc<RootProvider<SimpleRequest>>,
wallet: &k256::ecdsa::SigningKey,
name: &str,
) -> Option<Address> {
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
let hex_bin =
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
let bin = Bytes::from_hex(hex_bin).unwrap();
let deployment_tx = TxLegacy {
chain_id: None,
nonce: 0,
// 100 gwei
gas_price: 100_000_000_000u128,
gas_limit: 1_000_000,
to: TxKind::Create,
value: U256::ZERO,
input: bin,
};
let deployment_tx = deterministically_sign(&deployment_tx);
// Fund the deployer address
fund_account(
&client,
wallet,
deployment_tx.recover_signer().unwrap(),
U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price),
)
.await?;
let (deployment_tx, sig, _) = deployment_tx.into_parts();
let mut bytes = vec![];
deployment_tx.encode_with_signature_fields(&sig, &mut bytes);
let pending_tx = client.send_raw_transaction(&bytes).await.ok()?;
let receipt = pending_tx.get_receipt().await.ok()?;
assert!(receipt.status());
Some(receipt.contract_address.unwrap())
}

View File

@@ -1,185 +0,0 @@
// TODO
use std::{convert::TryFrom, sync::Arc, collections::HashMap};
use rand_core::OsRng;
use group::Group;
use k256::ProjectivePoint;
use frost::{
curve::Secp256k1,
Participant, ThresholdKeys,
algorithm::IetfSchnorr,
tests::{algorithm_machines, sign},
};
use alloy_core::primitives::{Address, U256};
use alloy_simple_request_transport::SimpleRequest;
use alloy_rpc_types_eth::BlockTransactionsKind;
use alloy_rpc_client::ClientBuilder;
use alloy_provider::{Provider, RootProvider};
use alloy_node_bindings::{Anvil, AnvilInstance};
use crate::{
crypto::*,
deployer::Deployer,
router::{Router, abi as router},
tests::{key_gen, send, fund_account},
};
async fn setup_test() -> (
AnvilInstance,
Arc<RootProvider<SimpleRequest>>,
u64,
Router,
HashMap<Participant, ThresholdKeys<Secp256k1>>,
PublicKey,
) {
let anvil = Anvil::new().spawn();
let provider = RootProvider::new(
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
);
let chain_id = provider.get_chain_id().await.unwrap();
let wallet = anvil.keys()[0].clone().into();
let client = Arc::new(provider);
// Make sure the Deployer constructor returns None, as it doesn't exist yet
assert!(Deployer::new(client.clone()).await.unwrap().is_none());
// Deploy the Deployer
let tx = Deployer::deployment_tx();
fund_account(
&client,
&wallet,
tx.recover_signer().unwrap(),
U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price),
)
.await
.unwrap();
let (tx, sig, _) = tx.into_parts();
let mut bytes = vec![];
tx.encode_with_signature_fields(&sig, &mut bytes);
let pending_tx = client.send_raw_transaction(&bytes).await.unwrap();
let receipt = pending_tx.get_receipt().await.unwrap();
assert!(receipt.status());
let deployer =
Deployer::new(client.clone()).await.expect("network error").expect("deployer wasn't deployed");
let (keys, public_key) = key_gen();
// Verify the Router constructor returns None, as it doesn't exist yet
assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none());
// Deploy the router
let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key))
.await
.unwrap();
assert!(receipt.status());
let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap();
(anvil, client, chain_id, contract, keys, public_key)
}
async fn latest_block_hash(client: &RootProvider<SimpleRequest>) -> [u8; 32] {
client
.get_block(client.get_block_number().await.unwrap().into(), BlockTransactionsKind::Hashes)
.await
.unwrap()
.unwrap()
.header
.hash
.0
}
#[tokio::test]
async fn test_deploy_contract() {
let (_anvil, client, _, router, _, public_key) = setup_test().await;
let block_hash = latest_block_hash(&client).await;
assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key);
assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
// TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis
}
pub fn hash_and_sign(
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
public_key: &PublicKey,
message: &[u8],
) -> Signature {
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig =
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message);
Signature::new(public_key, message, sig).unwrap()
}
#[tokio::test]
async fn test_router_update_serai_key() {
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
let next_key = loop {
let point = ProjectivePoint::random(&mut OsRng);
let Some(next_key) = PublicKey::new(point) else { continue };
break next_key;
};
let message = Router::update_serai_key_message(
U256::try_from(chain_id).unwrap(),
U256::try_from(1u64).unwrap(),
&next_key,
);
let sig = hash_and_sign(&keys, &public_key, &message);
let first_block_hash = latest_block_hash(&client).await;
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
let receipt =
send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig))
.await
.unwrap();
assert!(receipt.status());
let second_block_hash = latest_block_hash(&client).await;
assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key);
// Check this does still offer the historical state
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
// TODO: Check logs
println!("gas used: {:?}", receipt.gas_used);
// println!("logs: {:?}", receipt.logs);
}
#[tokio::test]
async fn test_router_execute() {
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
let to = Address::from([0; 20]);
let value = U256::ZERO;
let tx = router::OutInstruction { to, value, calls: vec![] };
let txs = vec![tx];
let first_block_hash = latest_block_hash(&client).await;
let nonce = contract.nonce(first_block_hash).await.unwrap();
assert_eq!(nonce, U256::try_from(1u64).unwrap());
let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone());
let sig = hash_and_sign(&keys, &public_key, &message);
let receipt =
send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap();
assert!(receipt.status());
let second_block_hash = latest_block_hash(&client).await;
assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap());
// Check this does still offer the historical state
assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
// TODO: Check logs
println!("gas used: {:?}", receipt.gas_used);
// println!("logs: {:?}", receipt.logs);
}

View File

@@ -33,3 +33,11 @@ ethereum-primitives = { package = "serai-processor-ethereum-primitives", path =
[build-dependencies]
build-solidity-contracts = { path = "../../../networks/ethereum/build-contracts", default-features = false }
[dev-dependencies]
alloy-rpc-client = { version = "0.9", default-features = false }
alloy-node-bindings = { version = "0.9", default-features = false }
tokio = { version = "1.0", default-features = false, features = ["rt-multi-thread", "macros"] }
ethereum-test-primitives = { package = "serai-ethereum-test-primitives", path = "../test-primitives" }

View File

@@ -4,20 +4,26 @@ The deployer for Serai's Ethereum contracts.
## Goals
It should be possible to efficiently locate the Serai Router on an blockchain with the EVM, without
relying on any centralized (or even federated) entities. While deploying and locating an instance of
the Router would be trivial, by using a fixed signature for the deployment transaction, the Router
must be constructed with the correct key for the Serai network (or set to have the correct key
post-construction). Since this cannot be guaranteed to occur, the process must be retryable and the
first successful invocation must be efficiently findable.
It should be possible to efficiently locate the Serai Router on a blockchain
with the EVM, without relying on any centralized (or even federated) entities.
While deploying and locating an instance of the Router would be trivial, by
using a fixed signature for the deployment transaction, the Router must be
constructed with the correct key for the Serai network (or set to have the
correct key post-construction). Since this cannot be guaranteed to occur, the
process must be retryable and the first successful invocation must be
efficiently findable.
## Methodology
We define a contract, the Deployer, to deploy the router. This contract could use `CREATE2` with the
key representing Serai as the salt, yet this would be open to collision attacks with just 2**80
complexity. Instead, we use `CREATE` which would require 2**80 on-chain transactions (infeasible) to
use as the basis of a collision.
We define a contract, the Deployer, to deploy the Router. This contract could
use `CREATE2` with the key representing Serai as the salt, yet this would be
open to collision attacks with just 2\*\*80 complexity. Instead, we use
`CREATE` which would require 2\*\*80 on-chain transactions (infeasible) to use
as the basis of a collision.
In order to efficiently find the contract for a key, the Deployer contract saves the addresses of
deployed contracts (indexed by the initialization code hash). This allows using a single call to a
contract with a known address to find the proper Router.
In order to efficiently find the contract for a key, the Deployer contract
saves the addresses of deployed contracts (indexed by the initialization code's
hash). This allows using a single call to a contract with a known address to
find the proper Router. Saving the address to the state enables finding the
Router's address even if the connected-to node's logs have been pruned for
historical blocks.

View File

@@ -4,29 +4,30 @@ pragma solidity ^0.8.26;
/*
The expected deployment process of Serai's Router is as follows:
1) A transaction deploying Deployer is made. Then, a deterministic signature is
created such that an account with an unknown private key is the creator of
the contract. Anyone can fund this address, and once anyone does, the
1) A transaction deploying Deployer is made. Then, a deterministic signature
is created such that an account with an unknown private key is the creator
of the contract. Anyone can fund this address, and once anyone does, the
transaction deploying Deployer can be published by anyone. No other
transaction may be made from that account.
2) Anyone deploys the Router through the Deployer. This uses a sequential nonce
such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible.
While such attacks would still be feasible if the Deployer's address was
controllable, the usage of a deterministic signature with a NUMS method
prevents that.
2) Anyone deploys the Router through the Deployer. This uses a sequential
nonce such that meet-in-the-middle attacks, with complexity 2**80, aren't
feasible. While such attacks would still be feasible if the Deployer's
address was controllable, the usage of a deterministic signature with a
NUMS method prevents that.
This doesn't have any denial-of-service risks and will resolve once anyone steps
forward as deployer. This does fail to guarantee an identical address across
every chain, though it enables letting anyone efficiently ask the Deployer for
the address (with the Deployer having an identical address on every chain).
This doesn't have any denial-of-service risks and will resolve once anyone
steps forward as deployer. This does fail to guarantee an identical address
for the Router across every chain, though it enables anyone to efficiently
ask the Deployer for the address (with the Deployer having an identical
address on every chain).
Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the
Deployer contract to use a consistent salt for the Router, yet the Router must
be deployed with a specific public key for Serai. Since Ethereum isn't able to
determine a valid public key (one the result of a Serai DKG) from a dishonest
public key, we have to allow multiple deployments with Serai being the one to
determine which to use.
Unfortunately, guaranteeing identical addresses for the Router isn't
feasible. We'd need the Deployer contract to use a consistent salt for the
Router, yet the Router must be deployed with a specific public key for Serai.
Since Ethereum isn't able to determine a valid public key (one the result of
a Serai DKG) from a dishonest public key (one arbitrary), we have to allow
multiple deployments with Serai being the one to determine which to use.
The alternative would be to have a council publish the Serai key on-Ethereum,
with Serai verifying the published result. This would introduce a DoS risk in
@@ -68,15 +69,18 @@ contract Deployer {
/*
Check this wasn't prior deployed.
This is a post-check, not a pre-check (in violation of the CEI pattern). If we used a
pre-check, a deployed contract could re-enter the Deployer to deploy the same contract
multiple times due to the inner call updating state and then the outer call overwriting it.
The post-check causes the outer call to error once the inner call updates state.
This is a post-check, not a pre-check (in violation of the CEI pattern).
If we used a pre-check, a deployed contract could re-enter the Deployer
to deploy the same contract multiple times due to the inner call updating
state and then the outer call overwriting it. The post-check causes the
outer call to error once the inner call updates state.
This does mean contract deployment may fail if deployment causes arbitrary execution which
maliciously nests deployment of the being-deployed contract. Such an inner call won't fail,
yet the outer call would. The usage of a re-entrancy guard would call the inner call to fail
while the outer call succeeds. This is considered so edge-case it isn't worth handling.
This does mean contract deployment may fail if deployment causes
arbitrary execution which maliciously nests deployment of the
being-deployed contract. Such an inner call won't fail, yet the outer
call would. The usage of a re-entrancy guard would cause the inner call
to fail while the outer call succeeds. This is considered so edge-case it
isn't worth handling.
*/
if (deployments[initCodeHash] != address(0)) {
revert PriorDeployed();

View File

@@ -4,7 +4,7 @@
use std::sync::Arc;
use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind};
use alloy_core::primitives::{hex, Address, U256, Bytes, TxKind};
use alloy_consensus::{Signed, TxLegacy};
use alloy_sol_types::SolCall;
@@ -14,6 +14,9 @@ use alloy_transport::{TransportErrorKind, RpcError};
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
#[cfg(test)]
mod tests;
#[rustfmt::skip]
#[expect(warnings)]
#[expect(needless_pass_by_value)]
@@ -24,6 +27,17 @@ mod abi {
alloy_sol_macro::sol!("contracts/Deployer.sol");
}
const BYTECODE: &[u8] = {
const BYTECODE_HEX: &[u8] =
include_bytes!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-deployer/Deployer.bin"));
const BYTECODE: [u8; BYTECODE_HEX.len() / 2] =
match hex::const_decode_to_array::<{ BYTECODE_HEX.len() / 2 }>(BYTECODE_HEX) {
Ok(bytecode) => bytecode,
Err(_) => panic!("Deployer.bin did not contain valid hex"),
};
&BYTECODE
};
/// The Deployer contract for the Serai Router contract.
///
/// This Deployer has a deterministic address, letting it be immediately identified on any instance
@@ -38,24 +52,45 @@ impl Deployer {
/// funded for this transaction to be submitted. This account has no known private key to anyone
/// so ETH sent can be neither misappropriated nor returned.
pub fn deployment_tx() -> Signed<TxLegacy> {
pub const BYTECODE: &[u8] =
include_bytes!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-deployer/Deployer.bin"));
let bytecode =
Bytes::from_hex(BYTECODE).expect("compiled-in Deployer bytecode wasn't valid hex");
let bytecode = Bytes::from(BYTECODE);
// Legacy transactions are used to ensure the widest possible degree of support across EVMs
let tx = TxLegacy {
chain_id: None,
nonce: 0,
// 100 gwei
/*
This needs to use a fixed gas price to achieve a deterministic address. The gas price is
fixed to 100 gwei, which should be generous, in order to make this unlikely to get stuck.
While potentially expensive, this only has to occur per chain this is deployed on.
If this is too low of a gas price, private mempools can be used, with other transactions in
the bundle raising the gas price to acceptable levels. While this strategy could be
entirely relied upon, allowing the gas price paid to reflect the network's actual gas
price, that wouldn't work for EVM networks without private mempools.
That leaves this as failing only if it violates a protocol constant, or if the gas price is
too low on a network without private mempools to publish via. In that case, this code
should to be forked to accept an enum of which network the deployment is for (with the gas
price derivative of that, as common as possible across networks to minimize the amount of
addresses representing the Deployer).
*/
gas_price: 100_000_000_000u128,
// TODO: Use a more accurate gas limit
gas_limit: 1_000_000u64,
/*
This is twice the cost of deployment as of Ethereum's Cancun upgrade. The wide margin is to
increase the likelihood of surviving changes to the cost of contract deployment (notably
the gas cost of calldata). While wasteful, this only has to be done once per chain and is
accepted accordingly.
If this is ever unacceptable, the parameterization suggested in case the `gas_price` is
unacceptable should be implemented.
*/
gas_limit: 300_698,
to: TxKind::Create,
value: U256::ZERO,
input: bytecode,
};
ethereum_primitives::deterministically_sign(&tx)
ethereum_primitives::deterministically_sign(tx)
}
/// Obtain the deterministic address for this contract.

View File

@@ -0,0 +1,107 @@
use std::sync::Arc;
use alloy_rpc_types_eth::{TransactionInput, TransactionRequest};
use alloy_simple_request_transport::SimpleRequest;
use alloy_rpc_client::ClientBuilder;
use alloy_provider::{Provider, RootProvider};
use alloy_node_bindings::Anvil;
use crate::{
abi::Deployer::{PriorDeployed, DeploymentFailed, DeployerErrors},
Deployer,
};
#[tokio::test]
async fn test_deployer() {
const CANCUN: &str = "cancun";
const LATEST: &str = "latest";
for network in [CANCUN, LATEST] {
let anvil = Anvil::new().arg("--hardfork").arg(network).spawn();
let provider = Arc::new(RootProvider::new(
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
));
// Deploy the Deployer
{
let deployment_tx = Deployer::deployment_tx();
let gas_programmed = deployment_tx.tx().gas_limit;
let receipt = ethereum_test_primitives::publish_tx(&provider, deployment_tx).await;
assert!(receipt.status());
assert_eq!(receipt.contract_address.unwrap(), Deployer::address());
if network == CANCUN {
// Check the gas programmed was twice the gas used
// We only check this for cancun as the constant was programmed per cancun's gas pricing
assert_eq!(2 * receipt.gas_used, gas_programmed);
}
}
// Deploy the deployer with the deployer
let mut deploy_tx = Deployer::deploy_tx(crate::BYTECODE.to_vec());
deploy_tx.gas_price = 100_000_000_000u128;
deploy_tx.gas_limit = 1_000_000;
{
let deploy_tx = ethereum_primitives::deterministically_sign(deploy_tx.clone());
let receipt = ethereum_test_primitives::publish_tx(&provider, deploy_tx).await;
assert!(receipt.status());
}
// Verify we can now find the deployer
{
let deployer = Deployer::new(provider.clone()).await.unwrap().unwrap();
let deployed_deployer = deployer
.find_deployment(ethereum_primitives::keccak256(crate::BYTECODE))
.await
.unwrap()
.unwrap();
assert_eq!(
provider.get_code_at(deployed_deployer).await.unwrap(),
provider.get_code_at(Deployer::address()).await.unwrap(),
);
assert!(deployed_deployer != Deployer::address());
}
// Verify deploying the same init code multiple times fails
{
let mut deploy_tx = deploy_tx;
// Change the gas price to cause a distinct message, and with it, a distinct signer
deploy_tx.gas_price += 1;
let deploy_tx = ethereum_primitives::deterministically_sign(deploy_tx);
let receipt = ethereum_test_primitives::publish_tx(&provider, deploy_tx.clone()).await;
assert!(!receipt.status());
let call = TransactionRequest::default()
.to(Deployer::address())
.input(TransactionInput::new(deploy_tx.tx().input.clone()));
let call_err = provider.call(&call).await.unwrap_err();
assert!(matches!(
call_err.as_error_resp().unwrap().as_decoded_error::<DeployerErrors>(true).unwrap(),
DeployerErrors::PriorDeployed(PriorDeployed {}),
));
}
// Verify deployment failures yield errors properly
{
// 0xfe is an invalid opcode which is guaranteed to remain invalid
let mut deploy_tx = Deployer::deploy_tx(vec![0xfe]);
deploy_tx.gas_price = 100_000_000_000u128;
deploy_tx.gas_limit = 1_000_000;
let deploy_tx = ethereum_primitives::deterministically_sign(deploy_tx);
let receipt = ethereum_test_primitives::publish_tx(&provider, deploy_tx.clone()).await;
assert!(!receipt.status());
let call = TransactionRequest::default()
.to(Deployer::address())
.input(TransactionInput::new(deploy_tx.tx().input.clone()));
let call_err = provider.call(&call).await.unwrap_err();
assert!(matches!(
call_err.as_error_resp().unwrap().as_decoded_error::<DeployerErrors>(true).unwrap(),
DeployerErrors::DeploymentFailed(DeploymentFailed {}),
));
}
}
}

View File

@@ -27,4 +27,6 @@ alloy-transport = { version = "0.9", default-features = false }
alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false }
alloy-provider = { version = "0.9", default-features = false }
tokio = { version = "1", default-features = false, features = ["rt"] }
ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false }
futures-util = { version = "0.3", default-features = false, features = ["std"] }

View File

@@ -18,3 +18,17 @@ interface IERC20 {
function approve(address spender, uint256 value) external returns (bool);
function allowance(address owner, address spender) external view returns (uint256);
}
interface SeraiIERC20 {
function transferWithInInstruction01BB244A8A(
address to,
uint256 value,
bytes calldata inInstruction
) external returns (bool);
function transferFromWithInInstruction00081948E0(
address from,
address to,
uint256 value,
bytes calldata inInstruction
) external returns (bool);
}

View File

@@ -2,22 +2,26 @@
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
use std::{sync::Arc, collections::HashSet};
use core::borrow::Borrow;
use std::{sync::Arc, collections::HashMap};
use alloy_core::primitives::{Address, B256, U256};
use alloy_core::primitives::{Address, U256};
use alloy_sol_types::{SolInterface, SolEvent};
use alloy_rpc_types_eth::{Filter, TransactionTrait};
use alloy_rpc_types_eth::{Log, Filter, TransactionTrait};
use alloy_transport::{TransportErrorKind, RpcError};
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
use tokio::task::JoinSet;
use ethereum_primitives::LogIndex;
use futures_util::stream::{StreamExt, FuturesUnordered};
#[rustfmt::skip]
#[expect(warnings)]
#[expect(needless_pass_by_value)]
#[expect(missing_docs)]
#[expect(clippy::all)]
#[expect(clippy::ignored_unit_patterns)]
#[expect(clippy::redundant_closure_for_method_calls)]
@@ -25,15 +29,28 @@ mod abi {
alloy_sol_macro::sol!("contracts/IERC20.sol");
}
use abi::IERC20::{IERC20Calls, transferCall, transferFromCall};
use abi::SeraiIERC20::SeraiIERC20Calls;
pub use abi::IERC20::Transfer;
pub use abi::SeraiIERC20::{
transferWithInInstruction01BB244A8ACall as transferWithInInstructionCall,
transferFromWithInInstruction00081948E0Call as transferFromWithInInstructionCall,
};
#[cfg(test)]
mod tests;
/// A top-level ERC20 transfer
///
/// This does not include `token`, `to` fields. Those are assumed contextual to the creation of
/// this.
#[derive(Clone, Debug)]
pub struct TopLevelTransfer {
/// The ID of the event for this transfer.
pub id: ([u8; 32], u64),
pub id: LogIndex,
/// The hash of the transaction which caused this transfer.
pub transaction_hash: [u8; 32],
/// The address which made the transfer.
pub from: [u8; 20],
pub from: Address,
/// The amount transferred.
pub amount: U256,
/// The data appended after the call itself.
@@ -42,156 +59,187 @@ pub struct TopLevelTransfer {
/// A view for an ERC20 contract.
#[derive(Clone, Debug)]
pub struct Erc20(Arc<RootProvider<SimpleRequest>>, Address);
pub struct Erc20 {
provider: Arc<RootProvider<SimpleRequest>>,
address: Address,
}
impl Erc20 {
/// Construct a new view of the specified ERC20 contract.
pub fn new(provider: Arc<RootProvider<SimpleRequest>>, address: [u8; 20]) -> Self {
Self(provider, Address::from(&address))
pub fn new(provider: Arc<RootProvider<SimpleRequest>>, address: Address) -> Self {
Self { provider, address }
}
/// Match a transaction for its top-level transfer to the specified address (if one exists).
pub async fn match_top_level_transfer(
/// The filter for transfer logs of the specified ERC20, to the specified recipient.
pub fn transfer_filter(from_block: u64, to_block: u64, erc20: Address, to: Address) -> Filter {
let filter = Filter::new().from_block(from_block).to_block(to_block);
filter.address(erc20).event_signature(Transfer::SIGNATURE_HASH).topic2(to.into_word())
}
/// Yield the top-level transfer for the specified transaction (if one exists).
///
/// The passed-in logs MUST be the logs for this transaction. The logs MUST be filtered to the
/// `Transfer` events of the intended token(s) and the intended `to` transferred to. These
/// properties are completely unchecked and assumed to be the case.
///
/// This does NOT yield THE top-level transfer. If multiple `Transfer` events have identical
/// structure to the top-level transfer call, the earliest `Transfer` event present in the logs
/// is considered the top-level transfer.
// Yielding THE top-level transfer would require tracing the transaction execution and isn't
// worth the effort.
pub async fn top_level_transfer(
provider: impl AsRef<RootProvider<SimpleRequest>>,
transaction_id: B256,
to: Address,
transaction_hash: [u8; 32],
mut transfer_logs: Vec<impl Borrow<Log>>,
) -> Result<Option<TopLevelTransfer>, RpcError<TransportErrorKind>> {
// Fetch the transaction
let transaction =
provider.as_ref().get_transaction_by_hash(transaction_id).await?.ok_or_else(|| {
TransportErrorKind::Custom(
"node didn't have the transaction which emitted a log it had".to_string().into(),
)
})?;
provider.as_ref().get_transaction_by_hash(transaction_hash.into()).await?.ok_or_else(
|| {
TransportErrorKind::Custom(
"node didn't have the transaction which emitted a log it had".to_string().into(),
)
},
)?;
// If this is a top-level call...
// Don't validate the encoding as this can't be re-encoded to an identical bytestring due
// to the `InInstruction` appended after the call itself
if let Ok(call) = IERC20Calls::abi_decode(transaction.inner.input(), false) {
// Extract the top-level call's from/to/value
let (from, call_to, value) = match call {
IERC20Calls::transfer(transferCall { to, value }) => (transaction.from, to, value),
IERC20Calls::transferFrom(transferFromCall { from, to, value }) => (from, to, value),
// Treat any other function selectors as unrecognized
_ => return Ok(None),
let Ok(call) = IERC20Calls::abi_decode(transaction.inner.input(), false) else {
return Ok(None);
};
// Extract the top-level call's from/to/value
let (from, to, value) = match call {
IERC20Calls::transfer(transferCall { to, value }) => (transaction.from, to, value),
IERC20Calls::transferFrom(transferFromCall { from, to, value }) => (from, to, value),
// Treat any other function selectors as unrecognized
_ => return Ok(None),
};
// Sort the logs to ensure the the earliest logs are first
transfer_logs.sort_by_key(|log| log.borrow().log_index);
// Find the log for this top-level transfer
for log in transfer_logs {
// Check the log is for the called contract
// This handles the edge case where we're checking if transfers of token X were top-level and
// a transfer of token Y (with equivalent structure) was top-level
if Some(log.borrow().address()) != transaction.inner.to() {
continue;
}
// Since the caller is responsible for filtering these to `Transfer` events, we can assume
// this is a non-compliant ERC20 or an error with the logs fetched. We assume ERC20
// compliance here, making this an RPC error
let log = log.borrow().log_decode::<Transfer>().map_err(|_| {
TransportErrorKind::Custom("log didn't include a valid transfer event".to_string().into())
})?;
let block_hash = log.block_hash.ok_or_else(|| {
TransportErrorKind::Custom("log didn't have its block hash set".to_string().into())
})?;
let log_index = log.log_index.ok_or_else(|| {
TransportErrorKind::Custom("log didn't have its index set".to_string().into())
})?;
let log = log.inner.data;
// Ensure the top-level transfer is equivalent to the transfer this log represents
if !((log.from == from) && (log.to == to) && (log.value == value)) {
continue;
}
// Read the data appended after
let data = if let Ok(call) = SeraiIERC20Calls::abi_decode(transaction.inner.input(), true) {
match call {
SeraiIERC20Calls::transferWithInInstruction01BB244A8A(
transferWithInInstructionCall { inInstruction, .. },
) |
SeraiIERC20Calls::transferFromWithInInstruction00081948E0(
transferFromWithInInstructionCall { inInstruction, .. },
) => Vec::from(inInstruction),
}
} else {
// We don't error here so this transfer is propagated up the stack, even without the
// InInstruction. In practice, Serai should acknowledge this and return it to the sender
vec![]
};
// If this isn't a transfer to the expected address, return None
if call_to != to {
return Ok(None);
}
// Fetch the transaction's logs
let receipt =
provider.as_ref().get_transaction_receipt(transaction_id).await?.ok_or_else(|| {
TransportErrorKind::Custom(
"node didn't have receipt for a transaction we were matching for a top-level transfer"
.to_string()
.into(),
)
})?;
// Find the log for this transfer
for log in receipt.inner.logs() {
// If this log was emitted by a different contract, continue
if Some(log.address()) != transaction.inner.to() {
continue;
}
// Check if this is actually a transfer log
// https://github.com/alloy-rs/core/issues/589
if log.topics().first() != Some(&Transfer::SIGNATURE_HASH) {
continue;
}
let log_index = log.log_index.ok_or_else(|| {
TransportErrorKind::Custom("log didn't have its index set".to_string().into())
})?;
let log = log
.log_decode::<Transfer>()
.map_err(|e| {
TransportErrorKind::Custom(format!("failed to decode Transfer log: {e:?}").into())
})?
.inner
.data;
// Ensure the top-level transfer is equivalent to the transfer this log represents. Since
// we can't find the exact top-level transfer without tracing the call, we just rule the
// first equivalent transfer as THE top-level transfer
if !((log.from == from) && (log.to == to) && (log.value == value)) {
continue;
}
// Read the data appended after
let encoded = call.abi_encode();
let data = transaction.inner.input().as_ref()[encoded.len() ..].to_vec();
return Ok(Some(TopLevelTransfer {
id: (*transaction_id, log_index),
from: *log.from.0,
amount: log.value,
data,
}));
}
return Ok(Some(TopLevelTransfer {
id: LogIndex { block_hash: *block_hash, index_within_block: log_index },
transaction_hash,
from: log.from,
amount: log.value,
data,
}));
}
Ok(None)
}
/// Fetch all top-level transfers to the specified address.
/// Fetch all top-level transfers to the specified address for this token.
///
/// The result of this function is unordered.
pub async fn top_level_transfers(
pub async fn top_level_transfers_unordered(
&self,
block: u64,
from_block: u64,
to_block: u64,
to: Address,
) -> Result<Vec<TopLevelTransfer>, RpcError<TransportErrorKind>> {
// Get all transfers within this block
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
let filter = filter.event_signature(Transfer::SIGNATURE_HASH);
let mut to_topic = [0; 32];
to_topic[12 ..].copy_from_slice(to.as_ref());
let filter = filter.topic2(B256::from(to_topic));
let logs = self.0.get_logs(&filter).await?;
// Get all transfers within these blocks
let logs = self
.provider
.get_logs(&Self::transfer_filter(from_block, to_block, self.address, to))
.await?;
// These logs are for all transactions which performed any transfer
// We now check each transaction for having a top-level transfer to the specified address
let tx_ids = logs
.into_iter()
.map(|log| {
// Double check the address which emitted this log
if log.address() != self.1 {
Err(TransportErrorKind::Custom(
"node returned logs for a different address than requested".to_string().into(),
))?;
}
// The logs, indexed by their transactions
let mut transaction_logs = HashMap::new();
// Index the logs by their transactions
for log in logs {
// Double check the address which emitted this log
if log.address() != self.address {
Err(TransportErrorKind::Custom(
"node returned logs for a different address than requested".to_string().into(),
))?;
}
// Double check the event signature for this log
if log.topics().first() != Some(&Transfer::SIGNATURE_HASH) {
Err(TransportErrorKind::Custom(
"node returned a log for a different topic than filtered to".to_string().into(),
))?;
}
// Double check the `to` topic
if log.topics().get(2) != Some(&to.into_word()) {
Err(TransportErrorKind::Custom(
"node returned a transfer for a different `to` than filtered to".to_string().into(),
))?;
}
log.transaction_hash.ok_or_else(|| {
let tx_id = log
.transaction_hash
.ok_or_else(|| {
TransportErrorKind::Custom("log didn't specify its transaction hash".to_string().into())
})
})
.collect::<Result<HashSet<_>, _>>()?;
})?
.0;
let mut join_set = JoinSet::new();
for tx_id in tx_ids {
join_set.spawn(Self::match_top_level_transfer(self.0.clone(), tx_id, to));
transaction_logs.entry(tx_id).or_insert_with(|| Vec::with_capacity(1)).push(log);
}
// Use `FuturesUnordered` so these RPC calls run in parallel
let mut futures = FuturesUnordered::new();
for (tx_id, transfer_logs) in transaction_logs {
futures.push(Self::top_level_transfer(&self.provider, tx_id, transfer_logs));
}
let mut top_level_transfers = vec![];
while let Some(top_level_transfer) = join_set.join_next().await {
// This is an error if a task panics or aborts
// Panicking on a task panic is desired behavior, and we haven't aborted any tasks
match top_level_transfer.unwrap() {
while let Some(top_level_transfer) = futures.next().await {
match top_level_transfer {
// Top-level transfer
Ok(Some(top_level_transfer)) => top_level_transfers.push(top_level_transfer),
// Not a top-level transfer
Ok(None) => continue,
// Failed to get this transaction's information so abort
Err(e) => {
join_set.abort_all();
Err(e)?
}
Err(e) => Err(e)?,
}
}
Ok(top_level_transfers)
}
}

View File

@@ -0,0 +1,13 @@
use alloy_sol_types::SolCall;
#[test]
fn selector_collisions() {
assert_eq!(
crate::abi::IERC20::transferCall::SELECTOR,
crate::abi::SeraiIERC20::transferWithInInstruction01BB244A8ACall::SELECTOR
);
assert_eq!(
crate::abi::IERC20::transferFromCall::SELECTOR,
crate::abi::SeraiIERC20::transferFromWithInInstruction00081948E0Call::SELECTOR
);
}

View File

@@ -17,8 +17,10 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
group = { version = "0.13", default-features = false }
k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic"] }
alloy-core = { version = "0.8", default-features = false }
alloy-primitives = { version = "0.8", default-features = false }
alloy-consensus = { version = "0.9", default-features = false, features = ["k256"] }

View File

@@ -0,0 +1,24 @@
use ::borsh::{io, BorshSerialize, BorshDeserialize};
use alloy_primitives::{U256, Address};
/// Serialize a U256 with a borsh-compatible API.
pub fn serialize_u256(value: &U256, writer: &mut impl io::Write) -> io::Result<()> {
let value: [u8; 32] = value.to_be_bytes();
value.serialize(writer)
}
/// Deserialize an address with a borsh-compatible API.
pub fn deserialize_u256(reader: &mut impl io::Read) -> io::Result<U256> {
<[u8; 32]>::deserialize_reader(reader).map(|value| U256::from_be_bytes(value))
}
/// Serialize an address with a borsh-compatible API.
pub fn serialize_address(address: &Address, writer: &mut impl io::Write) -> io::Result<()> {
<[u8; 20]>::from(address.0).serialize(writer)
}
/// Deserialize an address with a borsh-compatible API.
pub fn deserialize_address(reader: &mut impl io::Read) -> io::Result<Address> {
<[u8; 20]>::deserialize_reader(reader).map(|address| Address(address.into()))
}

View File

@@ -2,47 +2,94 @@
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
use ::borsh::{BorshSerialize, BorshDeserialize};
use group::ff::PrimeField;
use k256::Scalar;
use alloy_core::primitives::PrimitiveSignature;
use alloy_primitives::PrimitiveSignature;
use alloy_consensus::{SignableTransaction, Signed, TxLegacy};
mod borsh;
pub use borsh::*;
/// An index of a log within a block.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
#[borsh(crate = "::borsh")]
pub struct LogIndex {
/// The hash of the block which produced this log.
pub block_hash: [u8; 32],
/// The index of this log within the execution of the block.
pub index_within_block: u64,
}
/// The Keccak256 hash function.
pub fn keccak256(data: impl AsRef<[u8]>) -> [u8; 32] {
alloy_core::primitives::keccak256(data.as_ref()).into()
alloy_primitives::keccak256(data.as_ref()).into()
}
/// Deterministically sign a transaction.
///
/// This signs a transaction via setting `r = 1, s = 1`, and incrementing `r` until a signer is
/// recoverable from the signature for this transaction. The purpose of this is to be able to send
/// a transaction from a known account which no one knows the private key for.
/// This signs a transaction via setting a signature of `r = 1, s = 1`. The purpose of this is to
/// be able to send a transaction from an account which no one knows the private key for and no
/// other messages may be signed for from.
///
/// This function panics if passed a transaction with a non-None chain ID. This is because the
/// signer for this transaction is only singular across any/all EVM instances if it isn't binding
/// to an instance.
pub fn deterministically_sign(tx: &TxLegacy) -> Signed<TxLegacy> {
pub fn deterministically_sign(tx: TxLegacy) -> Signed<TxLegacy> {
assert!(
tx.chain_id.is_none(),
"chain ID was Some when deterministically signing a TX (causing a non-singular signer)"
);
let mut r = Scalar::ONE;
/*
ECDSA signatures are:
- x = private key
- k = rand()
- R = k * G
- r = R.x()
- s = (H(m) + (r * x)) * k.invert()
Key recovery is performed via:
- a = s * R = (H(m) + (r * x)) * G
- b = a - (H(m) * G) = (r * x) * G
- X = b / r = x * G
- X = ((s * R) - (H(m) * G)) * r.invert()
This requires `r` be non-zero and `R` be recoverable from `r` and the parity byte. For
`r = 1, s = 1`, this sets `X` to `R - (H(m) * G)`. Since there is an `R` recoverable for
`r = 1`, since the `R` is a point with an unknown discrete logarithm w.r.t. the generator, and
since the resulting key is dependent on the message signed for, this will always work to
the specification.
*/
let r = Scalar::ONE;
let s = Scalar::ONE;
loop {
// Create the signature
let r_bytes: [u8; 32] = r.to_repr().into();
let s_bytes: [u8; 32] = s.to_repr().into();
let signature =
PrimitiveSignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), false);
let r_bytes: [u8; 32] = r.to_repr().into();
let s_bytes: [u8; 32] = s.to_repr().into();
let signature =
PrimitiveSignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), false);
// Check if this is a valid signature
let tx = tx.clone().into_signed(signature);
if tx.recover_signer().is_ok() {
return tx;
}
r += Scalar::ONE;
}
let res = tx.into_signed(signature);
debug_assert!(res.recover_signer().is_ok());
res
}
#[test]
fn test_deterministically_sign() {
let tx = TxLegacy { chain_id: None, ..Default::default() };
let signed = deterministically_sign(tx.clone());
assert!(signed.recover_signer().is_ok());
let one = alloy_primitives::U256::from(1u64);
assert_eq!(signed.signature().r(), one);
assert_eq!(signed.signature().s(), one);
let mut other_tx = tx.clone();
other_tx.nonce += 1;
// Signing a distinct message should yield a distinct signer
assert!(
signed.recover_signer().unwrap() != deterministically_sign(other_tx).recover_signer().unwrap()
);
}

View File

@@ -17,6 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
group = { version = "0.13", default-features = false }
alloy-core = { version = "0.8", default-features = false }
@@ -37,8 +39,11 @@ ethereum-primitives = { package = "serai-processor-ethereum-primitives", path =
ethereum-deployer = { package = "serai-processor-ethereum-deployer", path = "../deployer", default-features = false }
erc20 = { package = "serai-processor-ethereum-erc20", path = "../erc20", default-features = false }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] }
serai-client = { path = "../../../substrate/client", default-features = false, features = ["ethereum"] }
futures-util = { version = "0.3", default-features = false, features = ["std"] }
[build-dependencies]
build-solidity-contracts = { path = "../../../networks/ethereum/build-contracts", default-features = false }

View File

@@ -34,15 +34,16 @@ interface IRouterWithoutCollisions {
* An `OutInstruction` is considered as having succeeded if the call transferring ETH doesn't
* fail, the ERC20 transfer doesn't fail, and any executed code doesn't revert.
*/
event Executed(uint256 indexed nonce, bytes32 indexed messageHash, bytes results);
event Batch(uint256 indexed nonce, bytes32 indexed messageHash, bytes results);
/// @notice Emitted when `escapeHatch` is invoked
/// @param escapeTo The address to escape to
event EscapeHatch(address indexed escapeTo);
event EscapeHatch(uint256 indexed nonce, address indexed escapeTo);
/// @notice Emitted when coins escape through the escape hatch
/// @param coin The coin which escaped
event Escaped(address indexed coin);
/// @param amount The amount which escaped
event Escaped(address indexed coin, uint256 amount);
/// @notice The key for Serai was invalid
/// @dev This is incomplete and not always guaranteed to be thrown upon an invalid key
@@ -57,13 +58,17 @@ interface IRouterWithoutCollisions {
/// @notice The call to an ERC20's `transferFrom` failed
error TransferFromFailed();
/// @notice `execute` was re-entered
error ReenteredExecute();
/// @notice A non-reentrant function was re-entered
error Reentered();
/// @notice An invalid address to escape to was specified.
error InvalidEscapeAddress();
/// @notice The escape address wasn't a contract.
error EscapeAddressWasNotAContract();
/// @notice Escaping when escape hatch wasn't invoked.
error EscapeHatchNotInvoked();
/// @notice Escaping failed to transfer out.
error EscapeFailed();
/// @notice Transfer coins into Serai with an instruction
/// @param coin The coin to transfer in (address(0) if Ether)
@@ -122,7 +127,10 @@ interface IRouter is IRouterWithoutCollisions {
}
/// @title The type of destination
/// @dev A destination is either an address or a blob of code to deploy and call
/**
* @dev A destination is either an ABI-encoded address or an ABI-encoded `CodeDestination`
* containing code to deploy (invoking its constructor).
*/
enum DestinationType {
Address,
Code

View File

@@ -25,13 +25,11 @@ import "IRouter.sol";
/// @author Luke Parker <lukeparker@serai.exchange>
/// @notice Intakes coins for the Serai network and handles relaying batches of transfers out
contract Router is IRouterWithoutCollisions {
/// @dev The code hash for a non-empty account without code
bytes32 constant ACCOUNT_WITHOUT_CODE_CODEHASH = keccak256("");
/// @dev The address in transient storage used for the reentrancy guard
bytes32 constant EXECUTE_REENTRANCY_GUARD_SLOT = bytes32(
/*
keccak256("ReentrancyGuard Router.execute") - 1
*/
0xcf124a063de1614fedbd6b47187f98bf8873a1ae83da5c179a5881162f5b2401
);
bytes32 constant REENTRANCY_GUARD_SLOT = bytes32(uint256(keccak256("ReentrancyGuard Router")) - 1);
/**
* @dev The next nonce used to determine the address of contracts deployed with CREATE. This is
@@ -65,6 +63,28 @@ contract Router is IRouterWithoutCollisions {
/// @dev The address escaped to
address private _escapedTo;
/// @dev Acquire the re-entrancy lock for the lifetime of this transaction
modifier nonReentrant() {
bytes32 reentrancyGuardSlot = REENTRANCY_GUARD_SLOT;
bytes32 priorEntered;
// slither-disable-next-line assembly
assembly {
priorEntered := tload(reentrancyGuardSlot)
tstore(reentrancyGuardSlot, 1)
}
if (priorEntered != bytes32(0)) {
revert Reentered();
}
_;
// Clear the re-entrancy guard to allow multiple transactions to non-re-entrant functions within
// a transaction
assembly {
tstore(reentrancyGuardSlot, 0)
}
}
/// @dev Set the next Serai key. This does not read from/write to `_nextNonce`
/// @param nonceUpdatedWith The nonce used to set the next key
/// @param nextSeraiKeyVar The key to set as next
@@ -140,15 +160,16 @@ contract Router is IRouterWithoutCollisions {
bytes32 signatureS;
// slither-disable-next-line assembly
uint256 chainID = block.chainid;
assembly {
// Read the signature (placed after the function signature)
signatureC := mload(add(message, 36))
signatureS := mload(add(message, 68))
// Overwrite the signature challenge with the nonce
mstore(add(message, 36), nonceUsed)
// Overwrite the signature response with 0
mstore(add(message, 68), 0)
// Overwrite the signature challenge with the chain ID
mstore(add(message, 36), chainID)
// Overwrite the signature response with the nonce
mstore(add(message, 68), nonceUsed)
// Calculate the message hash
messageHash := keccak256(add(message, 32), messageLen)
@@ -405,6 +426,12 @@ contract Router is IRouterWithoutCollisions {
* fee.
*
* The hex bytes are to cause a function selector collision with `IRouter.execute`.
*
* Re-entrancy is prevented because we emit a bitmask of which `OutInstruction`s succeeded. Doing
* that requires executing the `OutInstruction`s, which may re-enter here. While our application
* of CEI with `verifySignature` prevents replays, re-entrancy would allow out-of-order
* completion for the execution of batches (despite their in-order start of execution) which
* isn't a headache worth dealing with.
*/
// @param signature The signature by the current key for Serai's Ethereum validators
// @param coin The coin all of these `OutInstruction`s are for
@@ -412,26 +439,7 @@ contract Router is IRouterWithoutCollisions {
// @param outs The `OutInstruction`s to act on
// Each individual call is explicitly metered to ensure there isn't a DoS here
// slither-disable-next-line calls-loop,reentrancy-events
function execute4DE42904() external {
/*
Prevent re-entrancy.
We emit a bitmask of which `OutInstruction`s succeeded. Doing that requires executing the
`OutInstruction`s, which may re-enter here. While our application of CEI with verifySignature
prevents replays, re-entrancy would allow out-of-order execution of batches (despite their
in-order start of execution) which isn't a headache worth dealing with.
*/
bytes32 executeReentrancyGuardSlot = EXECUTE_REENTRANCY_GUARD_SLOT;
bytes32 priorEntered;
// slither-disable-next-line assembly
assembly {
priorEntered := tload(executeReentrancyGuardSlot)
tstore(executeReentrancyGuardSlot, 1)
}
if (priorEntered != bytes32(0)) {
revert ReenteredExecute();
}
function execute4DE42904() external nonReentrant {
(uint256 nonceUsed, bytes memory args, bytes32 message) = verifySignature(_seraiKey);
(,, address coin, uint256 fee, IRouter.OutInstruction[] memory outs) =
abi.decode(args, (bytes32, bytes32, address, uint256, IRouter.OutInstruction[]));
@@ -509,11 +517,11 @@ contract Router is IRouterWithoutCollisions {
}
/*
Emit execution with the status of all included events.
Emit batch execution with the status of all included events.
This is an effect after interactions yet we have a reentrancy guard making this safe.
*/
emit Executed(nonceUsed, message, results);
emit Batch(nonceUsed, message, results);
// Transfer the fee to the relayer
transferOut(msg.sender, coin, fee);
@@ -529,13 +537,35 @@ contract Router is IRouterWithoutCollisions {
// @param escapeTo The address to escape to
function escapeHatchDCDD91CC() external {
// Verify the signature
(, bytes memory args,) = verifySignature(_seraiKey);
(uint256 nonceUsed, bytes memory args,) = verifySignature(_seraiKey);
(,, address escapeTo) = abi.decode(args, (bytes32, bytes32, address));
if (escapeTo == address(0)) {
revert InvalidEscapeAddress();
}
/*
We could define the escape hatch as having its own confirmation flow, as new keys do, but new
contracts don't face all of the cryptographic concerns faced by new keys. New contracts also
would presumably be moved to after strict review, making the chance of specifying the wrong
contract incredibly unlikely.
The only check performed accordingly (with no confirmation flow) is that the new contract is
in fact a contract. This is done to confirm the contract was successfully deployed on this
blockchain.
This check is also comprehensive to the zero-address case, but this function doesn't have to
be perfectly optimized and it's better to explicitly handle that due to it being its own
invariant.
*/
{
bytes32 codehash = escapeTo.codehash;
if ((codehash == bytes32(0)) || (codehash == ACCOUNT_WITHOUT_CODE_CODEHASH)) {
revert EscapeAddressWasNotAContract();
}
}
/*
We want to define the escape hatch so coins here now, and latently received, can be forwarded.
If the last Serai key set could update the escape hatch, they could siphon off latently
@@ -546,7 +576,7 @@ contract Router is IRouterWithoutCollisions {
}
_escapedTo = escapeTo;
emit EscapeHatch(escapeTo);
emit EscapeHatch(nonceUsed, escapeTo);
}
/// @notice Escape coins after the escape hatch has been invoked
@@ -556,8 +586,6 @@ contract Router is IRouterWithoutCollisions {
revert EscapeHatchNotInvoked();
}
emit Escaped(coin);
// Fetch the amount to escape
uint256 amount = address(this).balance;
if (coin != address(0)) {
@@ -565,7 +593,13 @@ contract Router is IRouterWithoutCollisions {
}
// Perform the transfer
transferOut(_escapedTo, coin, amount);
// While this can be re-entered to try escaping our balance twice, the outer call will fail
if (!transferOut(_escapedTo, coin, amount)) {
revert EscapeFailed();
}
// Since we successfully escaped this amount, emit the event for it
emit Escaped(coin, amount);
}
/// @notice Fetch the next nonce to use by an action published to this contract

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
use alloy_sol_types::SolCall;
#[test]
fn selector_collisions() {
assert_eq!(
crate::_irouter_abi::IRouter::confirmNextSeraiKeyCall::SELECTOR,
crate::_router_abi::Router::confirmNextSeraiKey34AC53ACCall::SELECTOR
);
assert_eq!(
crate::_irouter_abi::IRouter::updateSeraiKeyCall::SELECTOR,
crate::_router_abi::Router::updateSeraiKey5A8542A2Call::SELECTOR
);
assert_eq!(
crate::_irouter_abi::IRouter::executeCall::SELECTOR,
crate::_router_abi::Router::execute4DE42904Call::SELECTOR
);
assert_eq!(
crate::_irouter_abi::IRouter::escapeHatchCall::SELECTOR,
crate::_router_abi::Router::escapeHatchDCDD91CCCall::SELECTOR
);
}

View File

@@ -10,49 +10,34 @@ use alloy_sol_types::SolCall;
use alloy_consensus::TxLegacy;
use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionReceipt};
#[rustfmt::skip]
use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionInput, TransactionRequest, TransactionReceipt};
use alloy_simple_request_transport::SimpleRequest;
use alloy_rpc_client::ClientBuilder;
use alloy_provider::RootProvider;
use alloy_provider::{Provider, RootProvider};
use alloy_node_bindings::{Anvil, AnvilInstance};
use scale::Encode;
use serai_client::{
primitives::SeraiAddress,
in_instructions::primitives::{
InInstruction as SeraiInInstruction, RefundableInInstruction, Shorthand,
},
};
use ethereum_primitives::LogIndex;
use ethereum_schnorr::{PublicKey, Signature};
use ethereum_deployer::Deployer;
use crate::{Coin, OutInstructions, Router};
use crate::{
_irouter_abi::IRouterWithoutCollisions::{
self as IRouter, IRouterWithoutCollisionsErrors as IRouterErrors,
},
Coin, InInstruction, OutInstructions, Router, Executed, Escape,
};
#[test]
fn execute_reentrancy_guard() {
let hash = alloy_core::primitives::keccak256(b"ReentrancyGuard Router.execute");
assert_eq!(
alloy_core::primitives::hex::encode(
(U256::from_be_slice(hash.as_ref()) - U256::from(1u8)).to_be_bytes::<32>()
),
// Constant from the Router contract
"cf124a063de1614fedbd6b47187f98bf8873a1ae83da5c179a5881162f5b2401",
);
}
#[test]
fn selector_collisions() {
assert_eq!(
crate::_irouter_abi::IRouter::confirmNextSeraiKeyCall::SELECTOR,
crate::_router_abi::Router::confirmNextSeraiKey34AC53ACCall::SELECTOR
);
assert_eq!(
crate::_irouter_abi::IRouter::updateSeraiKeyCall::SELECTOR,
crate::_router_abi::Router::updateSeraiKey5A8542A2Call::SELECTOR
);
assert_eq!(
crate::_irouter_abi::IRouter::executeCall::SELECTOR,
crate::_router_abi::Router::execute4DE42904Call::SELECTOR
);
assert_eq!(
crate::_irouter_abi::IRouter::escapeHatchCall::SELECTOR,
crate::_router_abi::Router::escapeHatchDCDD91CCCall::SELECTOR
);
}
mod constants;
pub(crate) fn test_key() -> (Scalar, PublicKey) {
loop {
@@ -64,114 +49,474 @@ pub(crate) fn test_key() -> (Scalar, PublicKey) {
}
}
async fn setup_test(
) -> (AnvilInstance, Arc<RootProvider<SimpleRequest>>, Router, (Scalar, PublicKey)) {
let anvil = Anvil::new().spawn();
fn sign(key: (Scalar, PublicKey), msg: &[u8]) -> Signature {
let nonce = Scalar::random(&mut OsRng);
let c = Signature::challenge(ProjectivePoint::GENERATOR * nonce, &key.1, msg);
let s = nonce + (c * key.0);
Signature::new(c, s).unwrap()
}
let provider = Arc::new(RootProvider::new(
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
));
/// Calculate the gas used by a transaction if none of its calldata's bytes were zero
struct CalldataAgnosticGas;
impl CalldataAgnosticGas {
fn calculate(tx: &TxLegacy, mut gas_used: u64) -> u64 {
const ZERO_BYTE_GAS_COST: u64 = 4;
const NON_ZERO_BYTE_GAS_COST: u64 = 16;
for b in &tx.input {
if *b == 0 {
gas_used += NON_ZERO_BYTE_GAS_COST - ZERO_BYTE_GAS_COST;
}
}
gas_used
}
}
let (private_key, public_key) = test_key();
assert!(Router::new(provider.clone(), &public_key).await.unwrap().is_none());
struct RouterState {
next_key: Option<(Scalar, PublicKey)>,
key: Option<(Scalar, PublicKey)>,
next_nonce: u64,
escaped_to: Option<Address>,
}
// Deploy the Deployer
let receipt = ethereum_test_primitives::publish_tx(&provider, Deployer::deployment_tx()).await;
assert!(receipt.status());
struct Test {
#[allow(unused)]
anvil: AnvilInstance,
provider: Arc<RootProvider<SimpleRequest>>,
chain_id: U256,
router: Router,
state: RouterState,
}
// Get the TX to deploy the Router
let mut tx = Router::deployment_tx(&public_key);
// Set a gas price (100 gwei)
tx.gas_price = 100_000_000_000;
// Sign it
let tx = ethereum_primitives::deterministically_sign(&tx);
// Publish it
let receipt = ethereum_test_primitives::publish_tx(&provider, tx).await;
assert!(receipt.status());
assert_eq!(u128::from(Router::DEPLOYMENT_GAS), ((receipt.gas_used + 1000) / 1000) * 1000);
impl Test {
async fn verify_state(&self) {
assert_eq!(
self.router.next_key(BlockNumberOrTag::Latest.into()).await.unwrap(),
self.state.next_key.map(|key| key.1)
);
assert_eq!(
self.router.key(BlockNumberOrTag::Latest.into()).await.unwrap(),
self.state.key.map(|key| key.1)
);
assert_eq!(
self.router.next_nonce(BlockNumberOrTag::Latest.into()).await.unwrap(),
self.state.next_nonce
);
assert_eq!(
self.router.escaped_to(BlockNumberOrTag::Latest.into()).await.unwrap(),
self.state.escaped_to,
);
}
let router = Router::new(provider.clone(), &public_key).await.unwrap().unwrap();
async fn new() -> Self {
// The following is explicitly only evaluated against the cancun network upgrade at this time
let anvil = Anvil::new().arg("--hardfork").arg("cancun").spawn();
(anvil, provider, router, (private_key, public_key))
let provider = Arc::new(RootProvider::new(
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
));
let chain_id = U256::from(provider.get_chain_id().await.unwrap());
let (private_key, public_key) = test_key();
assert!(Router::new(provider.clone(), &public_key).await.unwrap().is_none());
// Deploy the Deployer
let receipt = ethereum_test_primitives::publish_tx(&provider, Deployer::deployment_tx()).await;
assert!(receipt.status());
let mut tx = Router::deployment_tx(&public_key);
tx.gas_limit = 1_100_000;
tx.gas_price = 100_000_000_000;
let tx = ethereum_primitives::deterministically_sign(tx);
let receipt = ethereum_test_primitives::publish_tx(&provider, tx).await;
assert!(receipt.status());
let router = Router::new(provider.clone(), &public_key).await.unwrap().unwrap();
let state = RouterState {
next_key: Some((private_key, public_key)),
key: None,
// Nonce 0 should've been consumed by setting the next key to the key initialized with
next_nonce: 1,
escaped_to: None,
};
// Confirm nonce 0 was used as such
{
let block = receipt.block_number.unwrap();
let executed = router.executed(block, block).await.unwrap();
assert_eq!(executed.len(), 1);
assert_eq!(executed[0], Executed::NextSeraiKeySet { nonce: 0, key: public_key.eth_repr() });
}
let res = Test { anvil, provider, chain_id, router, state };
res.verify_state().await;
res
}
async fn call_and_decode_err(&self, tx: TxLegacy) -> IRouterErrors {
let call = TransactionRequest::default()
.to(self.router.address())
.input(TransactionInput::new(tx.input));
let call_err = self.provider.call(&call).await.unwrap_err();
call_err.as_error_resp().unwrap().as_decoded_error::<IRouterErrors>(true).unwrap()
}
fn confirm_next_serai_key_tx(&self) -> TxLegacy {
let msg = Router::confirm_next_serai_key_message(self.chain_id, self.state.next_nonce);
let sig = sign(self.state.next_key.unwrap(), &msg);
self.router.confirm_next_serai_key(&sig)
}
async fn confirm_next_serai_key(&mut self) {
let mut tx = self.confirm_next_serai_key_tx();
tx.gas_price = 100_000_000_000;
let tx = ethereum_primitives::deterministically_sign(tx);
let receipt = ethereum_test_primitives::publish_tx(&self.provider, tx.clone()).await;
assert!(receipt.status());
// Only check the gas is equal when writing to a previously unallocated storage slot, as this
// is the highest possible gas cost and what the constant is derived from
if self.state.key.is_none() {
assert_eq!(
CalldataAgnosticGas::calculate(tx.tx(), receipt.gas_used),
Router::CONFIRM_NEXT_SERAI_KEY_GAS,
);
} else {
assert!(
CalldataAgnosticGas::calculate(tx.tx(), receipt.gas_used) <
Router::CONFIRM_NEXT_SERAI_KEY_GAS
);
}
{
let block = receipt.block_number.unwrap();
let executed = self.router.executed(block, block).await.unwrap();
assert_eq!(executed.len(), 1);
assert_eq!(
executed[0],
Executed::SeraiKeyUpdated {
nonce: self.state.next_nonce,
key: self.state.next_key.unwrap().1.eth_repr()
}
);
}
self.state.next_nonce += 1;
self.state.key = self.state.next_key;
self.state.next_key = None;
self.verify_state().await;
}
fn update_serai_key_tx(&self) -> ((Scalar, PublicKey), TxLegacy) {
let next_key = test_key();
let msg = Router::update_serai_key_message(self.chain_id, self.state.next_nonce, &next_key.1);
let sig = sign(self.state.key.unwrap(), &msg);
(next_key, self.router.update_serai_key(&next_key.1, &sig))
}
async fn update_serai_key(&mut self) {
let (next_key, mut tx) = self.update_serai_key_tx();
tx.gas_price = 100_000_000_000;
let tx = ethereum_primitives::deterministically_sign(tx);
let receipt = ethereum_test_primitives::publish_tx(&self.provider, tx.clone()).await;
assert!(receipt.status());
assert_eq!(
CalldataAgnosticGas::calculate(tx.tx(), receipt.gas_used),
Router::UPDATE_SERAI_KEY_GAS,
);
{
let block = receipt.block_number.unwrap();
let executed = self.router.executed(block, block).await.unwrap();
assert_eq!(executed.len(), 1);
assert_eq!(
executed[0],
Executed::NextSeraiKeySet { nonce: self.state.next_nonce, key: next_key.1.eth_repr() }
);
}
self.state.next_nonce += 1;
self.state.next_key = Some(next_key);
self.verify_state().await;
}
fn eth_in_instruction_tx(&self) -> (Coin, U256, Shorthand, TxLegacy) {
let coin = Coin::Ether;
let amount = U256::from(1);
let shorthand = Shorthand::Raw(RefundableInInstruction {
origin: None,
instruction: SeraiInInstruction::Transfer(SeraiAddress([0xff; 32])),
});
let mut tx = self.router.in_instruction(coin, amount, &shorthand);
tx.gas_limit = 1_000_000;
tx.gas_price = 100_000_000_000;
(coin, amount, shorthand, tx)
}
fn escape_hatch_tx(&self, escape_to: Address) -> TxLegacy {
let msg = Router::escape_hatch_message(self.chain_id, self.state.next_nonce, escape_to);
let sig = sign(self.state.key.unwrap(), &msg);
self.router.escape_hatch(escape_to, &sig)
}
async fn escape_hatch(&mut self) {
let mut escape_to = [0; 20];
OsRng.fill_bytes(&mut escape_to);
let escape_to = Address(escape_to.into());
// Set the code of the address to escape to so it isn't flagged as a non-contract
let () = self.provider.raw_request("anvil_setCode".into(), (escape_to, [0])).await.unwrap();
let mut tx = self.escape_hatch_tx(escape_to);
tx.gas_price = 100_000_000_000;
let tx = ethereum_primitives::deterministically_sign(tx);
let receipt = ethereum_test_primitives::publish_tx(&self.provider, tx.clone()).await;
assert!(receipt.status());
assert_eq!(CalldataAgnosticGas::calculate(tx.tx(), receipt.gas_used), Router::ESCAPE_HATCH_GAS);
{
let block = receipt.block_number.unwrap();
let executed = self.router.executed(block, block).await.unwrap();
assert_eq!(executed.len(), 1);
assert_eq!(executed[0], Executed::EscapeHatch { nonce: self.state.next_nonce, escape_to });
}
self.state.next_nonce += 1;
self.state.escaped_to = Some(escape_to);
self.verify_state().await;
}
fn escape_tx(&self, coin: Coin) -> TxLegacy {
let mut tx = self.router.escape(coin);
tx.gas_limit = 100_000;
tx.gas_price = 100_000_000_000;
tx
}
}
#[tokio::test]
async fn test_constructor() {
let (_anvil, _provider, router, key) = setup_test().await;
assert_eq!(router.next_key(BlockNumberOrTag::Latest.into()).await.unwrap(), Some(key.1));
assert_eq!(router.key(BlockNumberOrTag::Latest.into()).await.unwrap(), None);
assert_eq!(router.next_nonce(BlockNumberOrTag::Latest.into()).await.unwrap(), 1);
assert_eq!(
router.escaped_to(BlockNumberOrTag::Latest.into()).await.unwrap(),
Address::from([0; 20])
);
}
async fn confirm_next_serai_key(
provider: &Arc<RootProvider<SimpleRequest>>,
router: &Router,
nonce: u64,
key: (Scalar, PublicKey),
) -> TransactionReceipt {
let msg = Router::confirm_next_serai_key_message(nonce);
let nonce = Scalar::random(&mut OsRng);
let c = Signature::challenge(ProjectivePoint::GENERATOR * nonce, &key.1, &msg);
let s = nonce + (c * key.0);
let sig = Signature::new(c, s).unwrap();
let mut tx = router.confirm_next_serai_key(&sig);
tx.gas_price = 100_000_000_000;
let tx = ethereum_primitives::deterministically_sign(&tx);
let receipt = ethereum_test_primitives::publish_tx(provider, tx).await;
assert!(receipt.status());
assert_eq!(
u128::from(Router::CONFIRM_NEXT_SERAI_KEY_GAS),
((receipt.gas_used + 1000) / 1000) * 1000
);
receipt
// `Test::new` internalizes all checks on initial state
Test::new().await;
}
#[tokio::test]
async fn test_confirm_next_serai_key() {
let (_anvil, provider, router, key) = setup_test().await;
assert_eq!(router.next_key(BlockNumberOrTag::Latest.into()).await.unwrap(), Some(key.1));
assert_eq!(router.key(BlockNumberOrTag::Latest.into()).await.unwrap(), None);
assert_eq!(router.next_nonce(BlockNumberOrTag::Latest.into()).await.unwrap(), 1);
let receipt = confirm_next_serai_key(&provider, &router, 1, key).await;
assert_eq!(router.next_key(receipt.block_hash.unwrap().into()).await.unwrap(), None);
assert_eq!(router.key(receipt.block_hash.unwrap().into()).await.unwrap(), Some(key.1));
assert_eq!(router.next_nonce(receipt.block_hash.unwrap().into()).await.unwrap(), 2);
let mut test = Test::new().await;
// TODO: Check all calls fail at this time, including inInstruction
test.confirm_next_serai_key().await;
}
#[tokio::test]
async fn test_update_serai_key() {
let (_anvil, provider, router, key) = setup_test().await;
confirm_next_serai_key(&provider, &router, 1, key).await;
let mut test = Test::new().await;
test.confirm_next_serai_key().await;
test.update_serai_key().await;
let update_to = test_key().1;
let msg = Router::update_serai_key_message(2, &update_to);
// Once we update to a new key, we should, of course, be able to continue to rotate keys
test.confirm_next_serai_key().await;
}
let nonce = Scalar::random(&mut OsRng);
let c = Signature::challenge(ProjectivePoint::GENERATOR * nonce, &key.1, &msg);
let s = nonce + (c * key.0);
#[tokio::test]
async fn test_eth_in_instruction() {
let mut test = Test::new().await;
test.confirm_next_serai_key().await;
let sig = Signature::new(c, s).unwrap();
let (coin, amount, shorthand, tx) = test.eth_in_instruction_tx();
let mut tx = router.update_serai_key(&update_to, &sig);
tx.gas_price = 100_000_000_000;
let tx = ethereum_primitives::deterministically_sign(&tx);
let receipt = ethereum_test_primitives::publish_tx(&provider, tx).await;
// This should fail if the value mismatches the amount
{
let mut tx = tx.clone();
tx.value = U256::ZERO;
assert!(matches!(
test.call_and_decode_err(tx).await,
IRouterErrors::AmountMismatchesMsgValue(IRouter::AmountMismatchesMsgValue {})
));
}
let tx = ethereum_primitives::deterministically_sign(tx);
let receipt = ethereum_test_primitives::publish_tx(&test.provider, tx.clone()).await;
assert!(receipt.status());
assert_eq!(u128::from(Router::UPDATE_SERAI_KEY_GAS), ((receipt.gas_used + 1000) / 1000) * 1000);
assert_eq!(router.key(receipt.block_hash.unwrap().into()).await.unwrap(), Some(key.1));
assert_eq!(router.next_key(receipt.block_hash.unwrap().into()).await.unwrap(), Some(update_to));
assert_eq!(router.next_nonce(receipt.block_hash.unwrap().into()).await.unwrap(), 3);
let block = receipt.block_number.unwrap();
let in_instructions =
test.router.in_instructions_unordered(block, block, &HashSet::new()).await.unwrap();
assert_eq!(in_instructions.len(), 1);
assert_eq!(
in_instructions[0],
InInstruction {
id: LogIndex {
block_hash: *receipt.block_hash.unwrap(),
index_within_block: receipt.inner.logs()[0].log_index.unwrap(),
},
transaction_hash: **tx.hash(),
from: tx.recover_signer().unwrap(),
coin,
amount,
data: shorthand.encode(),
}
);
}
#[tokio::test]
async fn test_erc20_in_instruction() {
todo!("TODO")
}
#[tokio::test]
async fn test_eth_address_out_instruction() {
todo!("TODO")
}
#[tokio::test]
async fn test_erc20_address_out_instruction() {
todo!("TODO")
}
#[tokio::test]
async fn test_eth_code_out_instruction() {
todo!("TODO")
}
#[tokio::test]
async fn test_erc20_code_out_instruction() {
todo!("TODO")
}
#[tokio::test]
async fn test_escape_hatch() {
let mut test = Test::new().await;
test.confirm_next_serai_key().await;
// Queue another key so the below test cases can run
test.update_serai_key().await;
{
// The zero address should be invalid to escape to
assert!(matches!(
test.call_and_decode_err(test.escape_hatch_tx([0; 20].into())).await,
IRouterErrors::InvalidEscapeAddress(IRouter::InvalidEscapeAddress {})
));
// Empty addresses should be invalid to escape to
assert!(matches!(
test.call_and_decode_err(test.escape_hatch_tx([1; 20].into())).await,
IRouterErrors::EscapeAddressWasNotAContract(IRouter::EscapeAddressWasNotAContract {})
));
// Non-empty addresses without code should be invalid to escape to
let tx = ethereum_primitives::deterministically_sign(TxLegacy {
to: Address([1; 20].into()).into(),
gas_limit: 21_000,
gas_price: 100_000_000_000u128,
value: U256::from(1),
..Default::default()
});
let receipt = ethereum_test_primitives::publish_tx(&test.provider, tx.clone()).await;
assert!(receipt.status());
assert!(matches!(
test.call_and_decode_err(test.escape_hatch_tx([1; 20].into())).await,
IRouterErrors::EscapeAddressWasNotAContract(IRouter::EscapeAddressWasNotAContract {})
));
// Escaping at this point in time should fail
assert!(matches!(
test.call_and_decode_err(test.router.escape(Coin::Ether)).await,
IRouterErrors::EscapeHatchNotInvoked(IRouter::EscapeHatchNotInvoked {})
));
}
// Invoke the escape hatch
test.escape_hatch().await;
// Now that the escape hatch has been invoked, all of the following calls should fail
{
assert!(matches!(
test.call_and_decode_err(test.update_serai_key_tx().1).await,
IRouterErrors::EscapeHatchInvoked(IRouter::EscapeHatchInvoked {})
));
assert!(matches!(
test.call_and_decode_err(test.confirm_next_serai_key_tx()).await,
IRouterErrors::EscapeHatchInvoked(IRouter::EscapeHatchInvoked {})
));
assert!(matches!(
test.call_and_decode_err(test.eth_in_instruction_tx().3).await,
IRouterErrors::EscapeHatchInvoked(IRouter::EscapeHatchInvoked {})
));
// TODO execute
// We reject further attempts to update the escape hatch to prevent the last key from being
// able to switch from the honest escape hatch to siphoning via a malicious escape hatch (such
// as after the validators represented unstake)
assert!(matches!(
test.call_and_decode_err(test.escape_hatch_tx(test.state.escaped_to.unwrap())).await,
IRouterErrors::EscapeHatchInvoked(IRouter::EscapeHatchInvoked {})
));
}
// Check the escape fn itself
// ETH
{
let () = test
.provider
.raw_request("anvil_setBalance".into(), (test.router.address(), 1))
.await
.unwrap();
let tx = ethereum_primitives::deterministically_sign(test.escape_tx(Coin::Ether));
let receipt = ethereum_test_primitives::publish_tx(&test.provider, tx.clone()).await;
assert!(receipt.status());
let block = receipt.block_number.unwrap();
assert_eq!(
test.router.escapes(block, block).await.unwrap(),
vec![Escape { coin: Coin::Ether, amount: U256::from(1) }],
);
assert!(test.provider.get_balance(test.router.address()).await.unwrap() == U256::from(0));
assert!(
test.provider.get_balance(test.state.escaped_to.unwrap()).await.unwrap() == U256::from(1)
);
}
// TODO ERC20 escape
}
/*
event InInstruction(
address indexed from, address indexed coin, uint256 amount, bytes instruction
);
event Batch(uint256 indexed nonce, bytes32 indexed messageHash, bytes results);
error InvalidSeraiKey();
error InvalidSignature();
error AmountMismatchesMsgValue();
error TransferFromFailed();
error Reentered();
error EscapeFailed();
function executeArbitraryCode(bytes memory code) external payable;
struct Signature {
bytes32 c;
bytes32 s;
}
enum DestinationType {
Address,
Code
}
struct CodeDestination {
uint32 gasLimit;
bytes code;
}
struct OutInstruction {
DestinationType destinationType;
bytes destination;
uint256 amount;
}
function execute(
Signature calldata signature,
address coin,
uint256 fee,
OutInstruction[] calldata outs
) external;
}
#[tokio::test]
@@ -191,7 +536,7 @@ async fn test_eth_in_instruction() {
gas_limit: 1_000_000,
to: TxKind::Call(router.address()),
value: amount,
input: crate::abi::inInstructionCall::new((
input: crate::_irouter_abi::inInstructionCall::new((
[0; 20].into(),
amount,
in_instruction.clone().into(),
@@ -199,7 +544,7 @@ async fn test_eth_in_instruction() {
.abi_encode()
.into(),
};
let tx = ethereum_primitives::deterministically_sign(&tx);
let tx = ethereum_primitives::deterministically_sign(tx);
let signer = tx.recover_signer().unwrap();
let receipt = ethereum_test_primitives::publish_tx(&provider, tx).await;
@@ -218,7 +563,10 @@ async fn test_eth_in_instruction() {
assert_eq!(parsed_in_instructions.len(), 1);
assert_eq!(
parsed_in_instructions[0].id,
(<[u8; 32]>::from(receipt.block_hash.unwrap()), receipt.inner.logs()[0].log_index.unwrap())
LogIndex {
block_hash: *receipt.block_hash.unwrap(),
index_within_block: receipt.inner.logs()[0].log_index.unwrap(),
},
);
assert_eq!(parsed_in_instructions[0].from, signer);
assert_eq!(parsed_in_instructions[0].coin, Coin::Ether);
@@ -226,11 +574,6 @@ async fn test_eth_in_instruction() {
assert_eq!(parsed_in_instructions[0].data, in_instruction);
}
#[tokio::test]
async fn test_erc20_in_instruction() {
todo!("TODO")
}
async fn publish_outs(
provider: &RootProvider<SimpleRequest>,
router: &Router,
@@ -250,7 +593,7 @@ async fn publish_outs(
let mut tx = router.execute(coin, fee, outs, &sig);
tx.gas_price = 100_000_000_000;
let tx = ethereum_primitives::deterministically_sign(&tx);
let tx = ethereum_primitives::deterministically_sign(tx);
ethereum_test_primitives::publish_tx(provider, tx).await
}
@@ -270,72 +613,8 @@ async fn test_eth_address_out_instruction() {
let instructions = OutInstructions::from([].as_slice());
let receipt = publish_outs(&provider, &router, key, 2, Coin::Ether, fee, instructions).await;
assert!(receipt.status());
assert_eq!(u128::from(Router::EXECUTE_BASE_GAS), ((receipt.gas_used + 1000) / 1000) * 1000);
assert_eq!(Router::EXECUTE_BASE_GAS, ((receipt.gas_used + 1000) / 1000) * 1000);
assert_eq!(router.next_nonce(receipt.block_hash.unwrap().into()).await.unwrap(), 3);
}
#[tokio::test]
async fn test_erc20_address_out_instruction() {
todo!("TODO")
}
#[tokio::test]
async fn test_eth_code_out_instruction() {
todo!("TODO")
}
#[tokio::test]
async fn test_erc20_code_out_instruction() {
todo!("TODO")
}
async fn escape_hatch(
provider: &Arc<RootProvider<SimpleRequest>>,
router: &Router,
nonce: u64,
key: (Scalar, PublicKey),
escape_to: Address,
) -> TransactionReceipt {
let msg = Router::escape_hatch_message(nonce, escape_to);
let nonce = Scalar::random(&mut OsRng);
let c = Signature::challenge(ProjectivePoint::GENERATOR * nonce, &key.1, &msg);
let s = nonce + (c * key.0);
let sig = Signature::new(c, s).unwrap();
let mut tx = router.escape_hatch(escape_to, &sig);
tx.gas_price = 100_000_000_000;
let tx = ethereum_primitives::deterministically_sign(&tx);
let receipt = ethereum_test_primitives::publish_tx(provider, tx).await;
assert!(receipt.status());
assert_eq!(u128::from(Router::ESCAPE_HATCH_GAS), ((receipt.gas_used + 1000) / 1000) * 1000);
receipt
}
async fn escape(
provider: &Arc<RootProvider<SimpleRequest>>,
router: &Router,
coin: Coin,
) -> TransactionReceipt {
let mut tx = router.escape(coin.address());
tx.gas_price = 100_000_000_000;
let tx = ethereum_primitives::deterministically_sign(&tx);
let receipt = ethereum_test_primitives::publish_tx(provider, tx).await;
assert!(receipt.status());
receipt
}
#[tokio::test]
async fn test_escape_hatch() {
let (_anvil, provider, router, key) = setup_test().await;
confirm_next_serai_key(&provider, &router, 1, key).await;
let escape_to: Address = {
let mut escape_to = [0; 20];
OsRng.fill_bytes(&mut escape_to);
escape_to.into()
};
escape_hatch(&provider, &router, 2, key, escape_to).await;
escape(&provider, &router, Coin::Ether).await;
}
*/

View File

@@ -6,11 +6,13 @@
static ALLOCATOR: zalloc::ZeroizingAlloc<std::alloc::System> =
zalloc::ZeroizingAlloc(std::alloc::System);
use core::time::Duration;
use std::sync::Arc;
use alloy_core::primitives::U256;
use alloy_simple_request_transport::SimpleRequest;
use alloy_rpc_client::ClientBuilder;
use alloy_provider::RootProvider;
use alloy_provider::{Provider, RootProvider};
use serai_client::validator_sets::primitives::Session;
@@ -62,10 +64,26 @@ async fn main() {
ClientBuilder::default().transport(SimpleRequest::new(bin::url()), true),
));
let chain_id = {
let mut delay = Duration::from_secs(5);
loop {
match provider.get_chain_id().await {
Ok(chain_id) => break chain_id,
Err(e) => {
log::error!("failed to fetch the chain ID on boot: {e:?}");
tokio::time::sleep(delay).await;
delay = (delay + Duration::from_secs(5)).max(Duration::from_secs(120));
}
}
}
};
bin::main_loop::<SetInitialKey, _, KeyGenParams, _>(
db.clone(),
Rpc { db: db.clone(), provider: provider.clone() },
Scheduler::<bin::Db>::new(SmartContract),
Scheduler::<bin::Db>::new(SmartContract {
chain_id: U256::from_le_slice(&chain_id.to_le_bytes()),
}),
TransactionPublisher::new(db, provider, {
let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME")
.expect("ethereum relayer hostname wasn't specified")

View File

@@ -32,6 +32,7 @@ impl primitives::BlockHeader for Epoch {
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) struct FullEpoch {
pub(crate) epoch: Epoch,
/// The unordered list of `InInstruction`s within this epoch
pub(crate) instructions: Vec<EthereumInInstruction>,
pub(crate) executed: Vec<Executed>,
}
@@ -99,6 +100,7 @@ impl primitives::Block for FullEpoch {
let Some(expected) =
eventualities.active_eventualities.remove(executed.nonce().to_le_bytes().as_slice())
else {
// TODO: Why is this a continue, not an assert?
continue;
};
assert_eq!(

View File

@@ -81,8 +81,8 @@ impl ReceivedOutput<<Secp256k1 as Ciphersuite>::G, Address> for Output {
match self {
Output::Output { key: _, instruction } => {
let mut id = [0; 40];
id[.. 32].copy_from_slice(&instruction.id.0);
id[32 ..].copy_from_slice(&instruction.id.1.to_le_bytes());
id[.. 32].copy_from_slice(&instruction.id.block_hash);
id[32 ..].copy_from_slice(&instruction.id.index_within_block.to_le_bytes());
OutputId(id)
}
// Yet upon Eventuality completions, we report a Change output to ensure synchrony per the
@@ -97,7 +97,7 @@ impl ReceivedOutput<<Secp256k1 as Ciphersuite>::G, Address> for Output {
fn transaction_id(&self) -> Self::TransactionId {
match self {
Output::Output { key: _, instruction } => instruction.id.0,
Output::Output { key: _, instruction } => instruction.transaction_hash,
Output::Eventuality { key: _, nonce } => {
let mut id = [0; 32];
id[.. 8].copy_from_slice(&nonce.to_le_bytes());
@@ -114,7 +114,7 @@ impl ReceivedOutput<<Secp256k1 as Ciphersuite>::G, Address> for Output {
fn presumed_origin(&self) -> Option<Address> {
match self {
Output::Output { key: _, instruction } => Some(Address::from(instruction.from)),
Output::Output { key: _, instruction } => Some(Address::Address(*instruction.from.0)),
Output::Eventuality { .. } => None,
}
}
@@ -145,7 +145,7 @@ impl ReceivedOutput<<Secp256k1 as Ciphersuite>::G, Address> for Output {
Output::Output { key, instruction } => {
writer.write_all(&[0])?;
writer.write_all(key.to_bytes().as_ref())?;
instruction.write(writer)
instruction.serialize(writer)
}
Output::Eventuality { key, nonce } => {
writer.write_all(&[1])?;
@@ -164,7 +164,7 @@ impl ReceivedOutput<<Secp256k1 as Ciphersuite>::G, Address> for Output {
Ok(match kind[0] {
0 => {
let key = Secp256k1::read_G(reader)?;
let instruction = EthereumInInstruction::read(reader)?;
let instruction = EthereumInInstruction::deserialize_reader(reader)?;
Self::Output { key, instruction }
}
1 => {

View File

@@ -17,8 +17,8 @@ use crate::{output::OutputId, machine::ClonableTransctionMachine};
#[derive(Clone, PartialEq, Debug)]
pub(crate) enum Action {
SetKey { nonce: u64, key: PublicKey },
Batch { nonce: u64, coin: Coin, fee: U256, outs: Vec<(Address, U256)> },
SetKey { chain_id: U256, nonce: u64, key: PublicKey },
Batch { chain_id: U256, nonce: u64, coin: Coin, fee: U256, outs: Vec<(Address, U256)> },
}
#[derive(Clone, PartialEq, Eq, Debug)]
@@ -33,17 +33,25 @@ impl Action {
pub(crate) fn message(&self) -> Vec<u8> {
match self {
Action::SetKey { nonce, key } => Router::update_serai_key_message(*nonce, key),
Action::Batch { nonce, coin, fee, outs } => {
Router::execute_message(*nonce, *coin, *fee, OutInstructions::from(outs.as_ref()))
Action::SetKey { chain_id, nonce, key } => {
Router::update_serai_key_message(*chain_id, *nonce, key)
}
Action::Batch { chain_id, nonce, coin, fee, outs } => Router::execute_message(
*chain_id,
*nonce,
*coin,
*fee,
OutInstructions::from(outs.as_ref()),
),
}
}
pub(crate) fn eventuality(&self) -> Eventuality {
Eventuality(match self {
Self::SetKey { nonce, key } => Executed::SetKey { nonce: *nonce, key: key.eth_repr() },
Self::Batch { nonce, .. } => {
Self::SetKey { chain_id: _, nonce, key } => {
Executed::NextSeraiKeySet { nonce: *nonce, key: key.eth_repr() }
}
Self::Batch { chain_id: _, nonce, .. } => {
Executed::Batch { nonce: *nonce, message_hash: keccak256(self.message()) }
}
})
@@ -77,6 +85,10 @@ impl SignableTransaction for Action {
Err(io::Error::other("unrecognized Action type"))?;
}
let mut chain_id = [0; 32];
reader.read_exact(&mut chain_id)?;
let chain_id = U256::from_be_bytes(chain_id);
let mut nonce = [0; 8];
reader.read_exact(&mut nonce)?;
let nonce = u64::from_le_bytes(nonce);
@@ -88,10 +100,10 @@ impl SignableTransaction for Action {
let key =
PublicKey::from_eth_repr(key).ok_or_else(|| io::Error::other("invalid key in Action"))?;
Action::SetKey { nonce, key }
Action::SetKey { chain_id, nonce, key }
}
1 => {
let coin = Coin::read(reader)?;
let coin = borsh::from_reader(reader)?;
let mut fee = [0; 32];
reader.read_exact(&mut fee)?;
@@ -111,22 +123,24 @@ impl SignableTransaction for Action {
outs.push((address, amount));
}
Action::Batch { nonce, coin, fee, outs }
Action::Batch { chain_id, nonce, coin, fee, outs }
}
_ => unreachable!(),
})
}
fn write(&self, writer: &mut impl io::Write) -> io::Result<()> {
match self {
Self::SetKey { nonce, key } => {
Self::SetKey { chain_id, nonce, key } => {
writer.write_all(&[0])?;
writer.write_all(&chain_id.to_be_bytes::<32>())?;
writer.write_all(&nonce.to_le_bytes())?;
writer.write_all(&key.eth_repr())
}
Self::Batch { nonce, coin, fee, outs } => {
Self::Batch { chain_id, nonce, coin, fee, outs } => {
writer.write_all(&[1])?;
writer.write_all(&chain_id.to_be_bytes::<32>())?;
writer.write_all(&nonce.to_le_bytes())?;
coin.write(writer)?;
borsh::BorshSerialize::serialize(coin, writer)?;
writer.write_all(&fee.as_le_bytes())?;
writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?;
for (address, amount) in outs {
@@ -167,9 +181,9 @@ impl primitives::Eventuality for Eventuality {
}
fn read(reader: &mut impl io::Read) -> io::Result<Self> {
Executed::read(reader).map(Self)
Ok(Self(borsh::from_reader(reader)?))
}
fn write(&self, writer: &mut impl io::Write) -> io::Result<()> {
self.0.write(writer)
borsh::BorshSerialize::serialize(&self.0, writer)
}
}

View File

@@ -88,8 +88,8 @@ impl<D: Db> signers::TransactionPublisher<Transaction> for TransactionPublisher<
let nonce = tx.0.nonce();
// Convert from an Action (an internal representation of a signable event) to a TxLegacy
let tx = match tx.0 {
Action::SetKey { nonce: _, key } => router.update_serai_key(&key, &tx.1),
Action::Batch { nonce: _, coin, fee, outs } => {
Action::SetKey { chain_id: _, nonce: _, key } => router.update_serai_key(&key, &tx.1),
Action::Batch { chain_id: _, nonce: _, coin, fee, outs } => {
router.execute(coin, fee, OutInstructions::from(outs.as_ref()), &tx.1)
}
};

View File

@@ -162,15 +162,19 @@ impl<D: Db> ScannerFeed for Rpc<D> {
router: Router,
block: Header,
) -> Result<(Vec<EthereumInInstruction>, Vec<Executed>), RpcError<TransportErrorKind>> {
let mut instructions = router.in_instructions(block.number, &HashSet::from(TOKENS)).await?;
let mut instructions = router
.in_instructions_unordered(block.number, block.number, &HashSet::from(TOKENS))
.await?;
for token in TOKENS {
for TopLevelTransfer { id, from, amount, data } in Erc20::new(provider.clone(), **token)
.top_level_transfers(block.number, router.address())
.await?
for TopLevelTransfer { id, transaction_hash, from, amount, data } in
Erc20::new(provider.clone(), token)
.top_level_transfers_unordered(block.number, block.number, router.address())
.await?
{
instructions.push(EthereumInInstruction {
id,
transaction_hash,
from,
coin: EthereumCoin::Erc20(token),
amount,
@@ -179,7 +183,7 @@ impl<D: Db> ScannerFeed for Rpc<D> {
}
}
let executed = router.executed(block.number).await?;
let executed = router.executed(block.number, block.number).await?;
Ok((instructions, executed))
}

View File

@@ -36,7 +36,9 @@ fn balance_to_ethereum_amount(balance: Balance) -> U256 {
}
#[derive(Clone)]
pub(crate) struct SmartContract;
pub(crate) struct SmartContract {
pub(crate) chain_id: U256,
}
impl<D: Db> smart_contract_scheduler::SmartContract<Rpc<D>> for SmartContract {
type SignableTransaction = Action;
@@ -46,8 +48,11 @@ impl<D: Db> smart_contract_scheduler::SmartContract<Rpc<D>> for SmartContract {
_retiring_key: KeyFor<Rpc<D>>,
new_key: KeyFor<Rpc<D>>,
) -> (Self::SignableTransaction, EventualityFor<Rpc<D>>) {
let action =
Action::SetKey { nonce, key: PublicKey::new(new_key).expect("rotating to an invald key") };
let action = Action::SetKey {
chain_id: self.chain_id,
nonce,
key: PublicKey::new(new_key).expect("rotating to an invald key"),
};
(action.clone(), action.eventuality())
}
@@ -133,6 +138,7 @@ impl<D: Db> smart_contract_scheduler::SmartContract<Rpc<D>> for SmartContract {
}
res.push(Action::Batch {
chain_id: self.chain_id,
nonce,
coin: coin_to_ethereum_coin(coin),
fee: U256::try_from(total_gas).unwrap() * fee_per_gas,

View File

@@ -76,7 +76,7 @@ pub async fn deploy_contract(
input: bin.into(),
};
let deployment_tx = deterministically_sign(&deployment_tx);
let deployment_tx = deterministically_sign(deployment_tx);
let receipt = publish_tx(provider, deployment_tx).await;
assert!(receipt.status());

View File

@@ -29,8 +29,8 @@ pub(crate) fn generators<C: EvrfCurve>() -> &'static EvrfGenerators<C> {
.or_insert_with(|| {
// If we haven't prior needed generators for this Ciphersuite, generate new ones
Box::leak(Box::new(EvrfGenerators::<C>::new(
((MAX_KEY_SHARES_PER_SET * 2 / 3) + 1).try_into().unwrap(),
MAX_KEY_SHARES_PER_SET.try_into().unwrap(),
(MAX_KEY_SHARES_PER_SET * 2 / 3) + 1,
MAX_KEY_SHARES_PER_SET,
)))
})
.downcast_ref()

View File

@@ -7,11 +7,11 @@ use borsh::{BorshSerialize, BorshDeserialize};
use dkg::Participant;
use serai_primitives::BlockHash;
use validator_sets_primitives::{Session, KeyPair, Slash};
use validator_sets_primitives::{Session, KeyPair, SlashReport};
use coins_primitives::OutInstructionWithBalance;
use in_instructions_primitives::SignedBatch;
use serai_cosign::{CosignIntent, SignedCosign};
use serai_cosign::{Cosign, SignedCosign};
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct SubstrateContext {
@@ -100,7 +100,9 @@ pub mod sign {
Self::Cosign(cosign) => {
f.debug_struct("VariantSignId::Cosign").field("0", &cosign).finish()
}
Self::Batch(batch) => f.debug_struct("VariantSignId::Batch").field("0", &batch).finish(),
Self::Batch(batch) => {
f.debug_struct("VariantSignId::Batch").field("0", &hex::encode(batch)).finish()
}
Self::SlashReport => f.debug_struct("VariantSignId::SlashReport").finish(),
Self::Transaction(tx) => {
f.debug_struct("VariantSignId::Transaction").field("0", &hex::encode(tx)).finish()
@@ -164,11 +166,11 @@ pub mod coordinator {
/// Cosign the specified Substrate block.
///
/// This is sent by the Coordinator's Tributary scanner.
CosignSubstrateBlock { session: Session, intent: CosignIntent },
CosignSubstrateBlock { session: Session, cosign: Cosign },
/// Sign the slash report for this session.
///
/// This is sent by the Coordinator's Tributary scanner.
SignSlashReport { session: Session, report: Vec<Slash> },
SignSlashReport { session: Session, slash_report: SlashReport },
}
// This set of messages is sent entirely and solely by serai-processor-bin's implementation of
@@ -178,7 +180,7 @@ pub mod coordinator {
pub enum ProcessorMessage {
CosignedBlock { cosign: SignedCosign },
SignedBatch { batch: SignedBatch },
SignedSlashReport { session: Session, signature: Vec<u8> },
SignedSlashReport { session: Session, slash_report: SlashReport, signature: [u8; 64] },
}
}
@@ -320,8 +322,8 @@ impl CoordinatorMessage {
CoordinatorMessage::Coordinator(msg) => {
let (sub, id) = match msg {
// We only cosign a block once, and Reattempt is a separate message
coordinator::CoordinatorMessage::CosignSubstrateBlock { intent, .. } => {
(0, intent.block_number.encode())
coordinator::CoordinatorMessage::CosignSubstrateBlock { cosign, .. } => {
(0, cosign.block_number.encode())
}
// We only sign one slash report, and Reattempt is a separate message
coordinator::CoordinatorMessage::SignSlashReport { session, .. } => (1, session.encode()),

View File

@@ -5,6 +5,7 @@ use blake2::{digest::typenum::U32, Digest, Blake2b};
use scale::Encode;
use serai_db::{DbTxn, Db};
use serai_primitives::BlockHash;
use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch};
use primitives::{
@@ -106,7 +107,7 @@ impl<D: Db, S: ScannerFeed> ContinuallyRan for BatchTask<D, S> {
// If this block is notable, create the Batch(s) for it
if notable {
let network = S::NETWORK;
let external_network_block_hash = index::block_id(&txn, block_number);
let external_network_block_hash = BlockHash(index::block_id(&txn, block_number));
let mut batch_id = BatchDb::<S>::acquire_batch_id(&mut txn);
// start with empty batch

View File

@@ -102,6 +102,7 @@ pub trait TransactionPlanner<S: ScannerFeed, A>: 'static + Send + Sync {
///
/// Returns `None` if the fee exceeded the inputs, or `Some` otherwise.
// TODO: Enum for Change of None, Some, Mandatory
#[allow(clippy::type_complexity)]
fn plan_transaction_with_fee_amortization(
&self,
operating_costs: &mut u64,

View File

@@ -40,6 +40,7 @@ serai-db = { path = "../../common/db" }
log = { version = "0.4", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
serai-cosign = { path = "../../coordinator/cosign" }
messages = { package = "serai-processor-messages", path = "../messages" }
primitives = { package = "serai-processor-primitives", path = "../primitives" }
scanner = { package = "serai-processor-scanner", path = "../scanner" }

View File

@@ -69,7 +69,12 @@ impl<D: Db, E: GroupEncoding> BatchSignerTask<D, E> {
let mut machines = Vec::with_capacity(keys.len());
for keys in &keys {
machines.push(WrappedSchnorrkelMachine::new(keys.clone(), batch_message(&batch)));
// TODO: Fetch the context for this from a constant instead of re-defining it
machines.push(WrappedSchnorrkelMachine::new(
keys.clone(),
b"substrate",
batch_message(&batch),
));
}
attempt_manager.register(VariantSignId::Batch(id), machines);
}
@@ -106,7 +111,12 @@ impl<D: Db, E: Send + GroupEncoding> ContinuallyRan for BatchSignerTask<D, E> {
let mut machines = Vec::with_capacity(self.keys.len());
for keys in &self.keys {
machines.push(WrappedSchnorrkelMachine::new(keys.clone(), batch_message(&batch)));
// TODO: Also fetch the constant here
machines.push(WrappedSchnorrkelMachine::new(
keys.clone(),
b"substrate",
batch_message(&batch),
));
}
for msg in self.attempt_manager.register(VariantSignId::Batch(batch_hash), machines) {
BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg);

View File

@@ -1,6 +1,7 @@
use core::future::Future;
use scale::Decode;
use serai_primitives::Signature;
use serai_db::{DbTxn, Db};
use primitives::task::ContinuallyRan;
@@ -99,17 +100,11 @@ impl<D: Db, C: Coordinator> ContinuallyRan for CoordinatorTask<D, C> {
// Publish the cosigns from this session
{
let mut txn = self.db.txn();
while let Some(((block_number, block_id), signature)) =
Cosign::try_recv(&mut txn, session)
{
while let Some(signed_cosign) = Cosign::try_recv(&mut txn, session) {
iterated = true;
self
.coordinator
.publish_cosign(
block_number,
block_id,
<_>::decode(&mut signature.as_slice()).unwrap(),
)
.publish_cosign(signed_cosign)
.await
.map_err(|e| format!("couldn't publish Cosign: {e:?}"))?;
}
@@ -119,15 +114,12 @@ impl<D: Db, C: Coordinator> ContinuallyRan for CoordinatorTask<D, C> {
// If this session signed its slash report, publish its signature
{
let mut txn = self.db.txn();
if let Some(slash_report_signature) = SlashReportSignature::try_recv(&mut txn, session) {
if let Some((slash_report, signature)) = SignedSlashReport::try_recv(&mut txn, session) {
iterated = true;
self
.coordinator
.publish_slash_report_signature(
session,
<_>::decode(&mut slash_report_signature.as_slice()).unwrap(),
)
.publish_slash_report_signature(session, slash_report, Signature(signature))
.await
.map_err(|e| {
format!("couldn't send slash report signature to the coordinator: {e:?}")

View File

@@ -9,7 +9,8 @@ use serai_validator_sets_primitives::Session;
use serai_db::{DbTxn, Db};
use messages::{sign::VariantSignId, coordinator::cosign_block_msg};
use serai_cosign::{COSIGN_CONTEXT, Cosign as CosignStruct, SignedCosign};
use messages::sign::VariantSignId;
use primitives::task::{DoesNotError, ContinuallyRan};
@@ -34,7 +35,7 @@ pub(crate) struct CosignerTask<D: Db> {
session: Session,
keys: Vec<ThresholdKeys<Ristretto>>,
current_cosign: Option<(u64, [u8; 32])>,
current_cosign: Option<CosignStruct>,
attempt_manager: AttemptManager<D, WrappedSchnorrkelMachine>,
}
@@ -62,26 +63,34 @@ impl<D: Db> ContinuallyRan for CosignerTask<D> {
let mut txn = self.db.txn();
if let Some(cosign) = ToCosign::get(&txn, self.session) {
// If this wasn't already signed for...
if LatestCosigned::get(&txn, self.session) < Some(cosign.0) {
if LatestCosigned::get(&txn, self.session) < Some(cosign.block_number) {
// If this isn't the cosign we're currently working on, meaning it's fresh
if self.current_cosign != Some(cosign) {
if self.current_cosign.as_ref() != Some(&cosign) {
// Retire the current cosign
if let Some(current_cosign) = self.current_cosign {
assert!(current_cosign.0 < cosign.0);
self.attempt_manager.retire(&mut txn, VariantSignId::Cosign(current_cosign.0));
if let Some(current_cosign) = &self.current_cosign {
assert!(current_cosign.block_number < cosign.block_number);
self
.attempt_manager
.retire(&mut txn, VariantSignId::Cosign(current_cosign.block_number));
}
// Set the cosign being worked on
self.current_cosign = Some(cosign);
self.current_cosign = Some(cosign.clone());
let mut machines = Vec::with_capacity(self.keys.len());
{
let message = cosign_block_msg(cosign.0, cosign.1);
let message = cosign.signature_message();
for keys in &self.keys {
machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone()));
machines.push(WrappedSchnorrkelMachine::new(
keys.clone(),
COSIGN_CONTEXT,
message.clone(),
));
}
}
for msg in self.attempt_manager.register(VariantSignId::Cosign(cosign.0), machines) {
for msg in
self.attempt_manager.register(VariantSignId::Cosign(cosign.block_number), machines)
{
CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg);
}
@@ -109,12 +118,19 @@ impl<D: Db> ContinuallyRan for CosignerTask<D> {
let VariantSignId::Cosign(block_number) = id else {
panic!("CosignerTask signed a non-Cosign")
};
assert_eq!(Some(block_number), self.current_cosign.map(|cosign| cosign.0));
assert_eq!(
Some(block_number),
self.current_cosign.as_ref().map(|cosign| cosign.block_number)
);
let cosign = self.current_cosign.take().unwrap();
LatestCosigned::set(&mut txn, self.session, &cosign.0);
LatestCosigned::set(&mut txn, self.session, &cosign.block_number);
let cosign = SignedCosign {
cosign,
signature: Signature::from(signature).encode().try_into().unwrap(),
};
// Send the cosign
Cosign::send(&mut txn, self.session, &(cosign, Signature::from(signature).encode()));
Cosign::send(&mut txn, self.session, &cosign);
}
}

View File

@@ -1,7 +1,9 @@
use serai_validator_sets_primitives::{Session, Slash};
use serai_validator_sets_primitives::{Session, SlashReport as SlashReportStruct};
use serai_db::{Get, DbTxn, create_db, db_channel};
use serai_cosign::{Cosign as CosignStruct, SignedCosign};
use messages::sign::{ProcessorMessage, CoordinatorMessage};
create_db! {
@@ -11,16 +13,16 @@ create_db! {
LatestRetiredSession: () -> Session,
ToCleanup: () -> Vec<(Session, Vec<u8>)>,
ToCosign: (session: Session) -> (u64, [u8; 32]),
ToCosign: (session: Session) -> CosignStruct,
}
}
db_channel! {
SignersGlobal {
Cosign: (session: Session) -> ((u64, [u8; 32]), Vec<u8>),
Cosign: (session: Session) -> SignedCosign,
SlashReport: (session: Session) -> Vec<Slash>,
SlashReportSignature: (session: Session) -> Vec<u8>,
SlashReport: (session: Session) -> SlashReportStruct,
SignedSlashReport: (session: Session) -> (SlashReportStruct, [u8; 64]),
/*
TODO: Most of these are pointless? We drop all active signing sessions on reboot. It's

View File

@@ -11,11 +11,13 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::dkg::{ThresholdCore, ThresholdKeys};
use serai_primitives::Signature;
use serai_validator_sets_primitives::{Session, Slash};
use serai_validator_sets_primitives::{Session, SlashReport};
use serai_in_instructions_primitives::SignedBatch;
use serai_db::{DbTxn, Db};
use serai_cosign::{Cosign, SignedCosign};
use messages::sign::{VariantSignId, ProcessorMessage, CoordinatorMessage};
use primitives::task::{Task, TaskHandle, ContinuallyRan};
@@ -59,9 +61,7 @@ pub trait Coordinator: 'static + Send + Sync {
/// Publish a cosign.
fn publish_cosign(
&mut self,
block_number: u64,
block_id: [u8; 32],
signature: Signature,
signed_cosign: SignedCosign,
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>>;
/// Publish a `SignedBatch`.
@@ -74,6 +74,7 @@ pub trait Coordinator: 'static + Send + Sync {
fn publish_slash_report_signature(
&mut self,
session: Session,
slash_report: SlashReport,
signature: Signature,
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>>;
}
@@ -408,19 +409,13 @@ impl<
/// Cosign a block.
///
/// This is a cheap call and able to be done inline from a higher-level loop.
pub fn cosign_block(
&mut self,
mut txn: impl DbTxn,
session: Session,
block_number: u64,
block: [u8; 32],
) {
pub fn cosign_block(&mut self, mut txn: impl DbTxn, session: Session, cosign: &Cosign) {
// Don't cosign blocks with already retired keys
if Some(session.0) <= db::LatestRetiredSession::get(&txn).map(|session| session.0) {
return;
}
db::ToCosign::set(&mut txn, session, &(block_number, block));
db::ToCosign::set(&mut txn, session, cosign);
txn.commit();
if let Some(tasks) = self.tasks.get(&session) {
@@ -435,7 +430,7 @@ impl<
&mut self,
mut txn: impl DbTxn,
session: Session,
slash_report: &Vec<Slash>,
slash_report: &SlashReport,
) {
// Don't sign slash reports with already retired keys
if Some(session.0) <= db::LatestRetiredSession::get(&txn).map(|session| session.0) {

View File

@@ -3,11 +3,8 @@ use core::{marker::PhantomData, future::Future};
use ciphersuite::Ristretto;
use frost::dkg::ThresholdKeys;
use scale::Encode;
use serai_primitives::Signature;
use serai_validator_sets_primitives::{
Session, ValidatorSet, SlashReport as SlashReportStruct, report_slashes_message,
};
use serai_validator_sets_primitives::Session;
use serai_db::{DbTxn, Db};
@@ -20,7 +17,7 @@ use frost_attempt_manager::*;
use crate::{
db::{
SlashReport, SlashReportSignature, CoordinatorToSlashReportSignerMessages,
SlashReport, SignedSlashReport, CoordinatorToSlashReportSignerMessages,
SlashReportSignerToCoordinatorMessages,
},
WrappedSchnorrkelMachine,
@@ -72,12 +69,14 @@ impl<D: Db, S: ScannerFeed> ContinuallyRan for SlashReportSignerTask<D, S> {
let mut machines = Vec::with_capacity(self.keys.len());
{
let message = report_slashes_message(
&ValidatorSet { network: S::NETWORK, session: self.session },
&SlashReportStruct(slash_report.try_into().unwrap()),
);
let message = slash_report.report_slashes_message();
for keys in &self.keys {
machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone()));
// TODO: Fetch this constant from somewhere instead of inlining it
machines.push(WrappedSchnorrkelMachine::new(
keys.clone(),
b"substrate",
message.clone(),
));
}
}
let mut txn = self.db.txn();
@@ -105,12 +104,12 @@ impl<D: Db, S: ScannerFeed> ContinuallyRan for SlashReportSignerTask<D, S> {
Response::Signature { id, signature } => {
assert_eq!(id, VariantSignId::SlashReport);
// Drain the channel
SlashReport::try_recv(&mut txn, self.session).unwrap();
let slash_report = SlashReport::try_recv(&mut txn, self.session).unwrap();
// Send the signature
SlashReportSignature::send(
SignedSlashReport::send(
&mut txn,
self.session,
&Signature::from(signature).encode(),
&(slash_report, Signature::from(signature).0),
);
}
}

View File

@@ -16,10 +16,10 @@ use frost_schnorrkel::Schnorrkel;
// This wraps a Schnorrkel sign machine into one with a preset message.
#[derive(Clone)]
pub(crate) struct WrappedSchnorrkelMachine(ThresholdKeys<Ristretto>, Vec<u8>);
pub(crate) struct WrappedSchnorrkelMachine(ThresholdKeys<Ristretto>, &'static [u8], Vec<u8>);
impl WrappedSchnorrkelMachine {
pub(crate) fn new(keys: ThresholdKeys<Ristretto>, msg: Vec<u8>) -> Self {
Self(keys, msg)
pub(crate) fn new(keys: ThresholdKeys<Ristretto>, context: &'static [u8], msg: Vec<u8>) -> Self {
Self(keys, context, msg)
}
}
@@ -39,10 +39,10 @@ impl PreprocessMachine for WrappedSchnorrkelMachine {
rng: &mut R,
) -> (Self::SignMachine, Preprocess<Ristretto, <Schnorrkel as Algorithm<Ristretto>>::Addendum>)
{
let WrappedSchnorrkelMachine(keys, batch) = self;
let WrappedSchnorrkelMachine(keys, context, msg) = self;
let (machine, preprocess) =
AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys).preprocess(rng);
(WrappedSchnorrkelSignMachine(machine, batch), preprocess)
AlgorithmMachine::new(Schnorrkel::new(context), keys).preprocess(rng);
(WrappedSchnorrkelSignMachine(machine, msg), preprocess)
}
}

View File

@@ -20,7 +20,7 @@ pub enum Event {
network: NetworkId,
publishing_session: Session,
id: u32,
external_network_block_hash: [u8; 32],
external_network_block_hash: BlockHash,
in_instructions_hash: [u8; 32],
in_instruction_results: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
},

View File

@@ -21,7 +21,7 @@ pub enum Call {
},
report_slashes {
network: NetworkId,
slashes: BoundedVec<(SeraiAddress, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 / 3 }>>,
slashes: SlashReport,
signature: Signature,
},
allocate {

View File

@@ -12,7 +12,7 @@ pub type CoinsEvent = serai_abi::coins::Event;
#[derive(Clone, Copy)]
pub struct SeraiCoins<'a>(pub(crate) &'a TemporalSerai<'a>);
impl<'a> SeraiCoins<'a> {
impl SeraiCoins<'_> {
pub async fn mint_events(&self) -> Result<Vec<CoinsEvent>, SeraiError> {
self
.0

View File

@@ -9,7 +9,7 @@ const PALLET: &str = "Dex";
#[derive(Clone, Copy)]
pub struct SeraiDex<'a>(pub(crate) &'a TemporalSerai<'a>);
impl<'a> SeraiDex<'a> {
impl SeraiDex<'_> {
pub async fn events(&self) -> Result<Vec<DexEvent>, SeraiError> {
self
.0

View File

@@ -15,7 +15,7 @@ const PALLET: &str = "GenesisLiquidity";
#[derive(Clone, Copy)]
pub struct SeraiGenesisLiquidity<'a>(pub(crate) &'a TemporalSerai<'a>);
impl<'a> SeraiGenesisLiquidity<'a> {
impl SeraiGenesisLiquidity<'_> {
pub async fn events(&self) -> Result<Vec<GenesisLiquidityEvent>, SeraiError> {
self
.0

View File

@@ -9,7 +9,7 @@ const PALLET: &str = "InInstructions";
#[derive(Clone, Copy)]
pub struct SeraiInInstructions<'a>(pub(crate) &'a TemporalSerai<'a>);
impl<'a> SeraiInInstructions<'a> {
impl SeraiInInstructions<'_> {
pub async fn last_batch_for_network(
&self,
network: NetworkId,

View File

@@ -8,7 +8,7 @@ const PALLET: &str = "LiquidityTokens";
#[derive(Clone, Copy)]
pub struct SeraiLiquidityTokens<'a>(pub(crate) &'a TemporalSerai<'a>);
impl<'a> SeraiLiquidityTokens<'a> {
impl SeraiLiquidityTokens<'_> {
pub async fn token_supply(&self, coin: Coin) -> Result<Amount, SeraiError> {
Ok(self.0.storage(PALLET, "Supply", coin).await?.unwrap_or(Amount(0)))
}

View File

@@ -80,7 +80,7 @@ pub struct TemporalSerai<'a> {
block: [u8; 32],
events: RwLock<Option<EventsInBlock>>,
}
impl<'a> Clone for TemporalSerai<'a> {
impl Clone for TemporalSerai<'_> {
fn clone(&self) -> Self {
Self { serai: self.serai, block: self.block, events: RwLock::new(None) }
}
@@ -319,7 +319,7 @@ impl Serai {
}
}
impl<'a> TemporalSerai<'a> {
impl TemporalSerai<'_> {
async fn events<E>(
&self,
filter_map: impl Fn(&Event) -> Option<E>,
@@ -389,27 +389,27 @@ impl<'a> TemporalSerai<'a> {
})
}
pub fn coins(&'a self) -> SeraiCoins<'a> {
pub fn coins(&self) -> SeraiCoins<'_> {
SeraiCoins(self)
}
pub fn dex(&'a self) -> SeraiDex<'a> {
pub fn dex(&self) -> SeraiDex<'_> {
SeraiDex(self)
}
pub fn in_instructions(&'a self) -> SeraiInInstructions<'a> {
pub fn in_instructions(&self) -> SeraiInInstructions<'_> {
SeraiInInstructions(self)
}
pub fn validator_sets(&'a self) -> SeraiValidatorSets<'a> {
pub fn validator_sets(&self) -> SeraiValidatorSets<'_> {
SeraiValidatorSets(self)
}
pub fn genesis_liquidity(&'a self) -> SeraiGenesisLiquidity {
pub fn genesis_liquidity(&self) -> SeraiGenesisLiquidity {
SeraiGenesisLiquidity(self)
}
pub fn liquidity_tokens(&'a self) -> SeraiLiquidityTokens {
pub fn liquidity_tokens(&self) -> SeraiLiquidityTokens {
SeraiLiquidityTokens(self)
}
}

Some files were not shown because too many files have changed in this diff Show More