mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-12 14:09:25 +00:00
Compare commits
72 Commits
bcd3f14f4f
...
undroppabl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce3b90541e | ||
|
|
cb410cc4e0 | ||
|
|
6c145a5ec3 | ||
|
|
a7fef2ba7a | ||
|
|
291ebf5e24 | ||
|
|
5e0e91c85d | ||
|
|
b5a6b0693e | ||
|
|
3cc2abfedc | ||
|
|
0ce9aad9b2 | ||
|
|
e35aa04afb | ||
|
|
e7de5125a2 | ||
|
|
158140c3a7 | ||
|
|
df9a9adaa8 | ||
|
|
d854807edd | ||
|
|
f501d46d44 | ||
|
|
74106b025f | ||
|
|
e731b546ab | ||
|
|
77d60660d2 | ||
|
|
3c664ff05f | ||
|
|
c05b0c9eba | ||
|
|
6d5049cab2 | ||
|
|
1419ba570a | ||
|
|
542bf2170a | ||
|
|
378d6b90cf | ||
|
|
cbe83956aa | ||
|
|
091d485fd8 | ||
|
|
2a3eaf4d7e | ||
|
|
23122712cb | ||
|
|
47eb793ce9 | ||
|
|
9b0b5fd1e2 | ||
|
|
893a24a1cc | ||
|
|
b101e2211a | ||
|
|
201a444e89 | ||
|
|
9833911e06 | ||
|
|
465e8498c4 | ||
|
|
adf20773ac | ||
|
|
295c1bd044 | ||
|
|
dda6e3e899 | ||
|
|
75a00f2a1a | ||
|
|
6cde2bb6ef | ||
|
|
20326bba73 | ||
|
|
ce83b41712 | ||
|
|
b2bd5d3a44 | ||
|
|
de2d6568a4 | ||
|
|
fd9b464b35 | ||
|
|
376a66b000 | ||
|
|
2121a9b131 | ||
|
|
419223c54e | ||
|
|
a731c0005d | ||
|
|
f27e4e3202 | ||
|
|
f55165e016 | ||
|
|
d9e9887d34 | ||
|
|
82e753db30 | ||
|
|
052388285b | ||
|
|
47a4e534ef | ||
|
|
257f691277 | ||
|
|
c6d0fb477c | ||
|
|
96518500b1 | ||
|
|
2b8f481364 | ||
|
|
479ca0410a | ||
|
|
9a5a661d04 | ||
|
|
3daeea09e6 | ||
|
|
a64e2004ab | ||
|
|
f9f6d40695 | ||
|
|
4836c1676b | ||
|
|
985261574c | ||
|
|
3f3b0255f8 | ||
|
|
5fc8500f8d | ||
|
|
49c221cca2 | ||
|
|
906e2fb669 | ||
|
|
ce676efb1f | ||
|
|
0a611cb155 |
7
.github/workflows/msrv.yml
vendored
7
.github/workflows/msrv.yml
vendored
@@ -173,10 +173,13 @@ jobs:
|
||||
|
||||
- name: Run cargo msrv on coordinator
|
||||
run: |
|
||||
cargo msrv verify --manifest-path coordinator/tributary/tendermint/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/tributary-sdk/tendermint/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/tributary-sdk/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/substrate/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/p2p/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/p2p/libp2p/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/Cargo.toml
|
||||
|
||||
msrv-substrate:
|
||||
|
||||
5
.github/workflows/tests.yml
vendored
5
.github/workflows/tests.yml
vendored
@@ -60,9 +60,12 @@ jobs:
|
||||
-p serai-ethereum-processor \
|
||||
-p serai-monero-processor \
|
||||
-p tendermint-machine \
|
||||
-p tributary-chain \
|
||||
-p tributary-sdk \
|
||||
-p serai-cosign \
|
||||
-p serai-coordinator-substrate \
|
||||
-p serai-coordinator-tributary \
|
||||
-p serai-coordinator-p2p \
|
||||
-p serai-coordinator-libp2p-p2p \
|
||||
-p serai-coordinator \
|
||||
-p serai-orchestrator \
|
||||
-p serai-docker-tests
|
||||
|
||||
343
Cargo.lock
generated
343
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -96,10 +96,13 @@ members = [
|
||||
"processor/ethereum",
|
||||
"processor/monero",
|
||||
|
||||
"coordinator/tributary/tendermint",
|
||||
"coordinator/tributary",
|
||||
"coordinator/tributary-sdk/tendermint",
|
||||
"coordinator/tributary-sdk",
|
||||
"coordinator/cosign",
|
||||
"coordinator/substrate",
|
||||
"coordinator/tributary",
|
||||
"coordinator/p2p",
|
||||
"coordinator/p2p/libp2p",
|
||||
"coordinator",
|
||||
|
||||
"substrate/primitives",
|
||||
|
||||
@@ -30,13 +30,53 @@ pub trait Get {
|
||||
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
||||
/// randomly, or any other action, at time of write or at time of commit.
|
||||
#[must_use]
|
||||
pub trait DbTxn: Send + Get {
|
||||
pub trait DbTxn: Sized + Send + Get {
|
||||
/// Write a value to this key.
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
||||
/// Delete the value from this key.
|
||||
fn del(&mut self, key: impl AsRef<[u8]>);
|
||||
/// Commit this transaction.
|
||||
fn commit(self);
|
||||
/// Close this transaction.
|
||||
///
|
||||
/// This is equivalent to `Drop` on transactions which can be dropped. This is explicit and works
|
||||
/// with transactions which can't be dropped.
|
||||
fn close(self) {
|
||||
drop(self);
|
||||
}
|
||||
}
|
||||
|
||||
// Credit for the idea goes to https://jack.wrenn.fyi/blog/undroppable
|
||||
pub struct Undroppable<T>(Option<T>);
|
||||
impl<T> Drop for Undroppable<T> {
|
||||
fn drop(&mut self) {
|
||||
// Use an assertion at compile time to prevent this code from compiling if generated
|
||||
#[allow(clippy::assertions_on_constants)]
|
||||
const {
|
||||
assert!(false, "Undroppable DbTxn was dropped. Ensure all code paths call commit or close");
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<T: DbTxn> Get for Undroppable<T> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
self.0.as_ref().unwrap().get(key)
|
||||
}
|
||||
}
|
||||
impl<T: DbTxn> DbTxn for Undroppable<T> {
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||
self.0.as_mut().unwrap().put(key, value);
|
||||
}
|
||||
fn del(&mut self, key: impl AsRef<[u8]>) {
|
||||
self.0.as_mut().unwrap().del(key);
|
||||
}
|
||||
fn commit(mut self) {
|
||||
self.0.take().unwrap().commit();
|
||||
let _ = core::mem::ManuallyDrop::new(self);
|
||||
}
|
||||
fn close(mut self) {
|
||||
drop(self.0.take().unwrap());
|
||||
let _ = core::mem::ManuallyDrop::new(self);
|
||||
}
|
||||
}
|
||||
|
||||
/// A database supporting atomic transaction.
|
||||
@@ -51,6 +91,10 @@ pub trait Db: 'static + Send + Sync + Clone + Get {
|
||||
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
||||
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
||||
}
|
||||
/// Open a new transaction.
|
||||
fn txn(&mut self) -> Self::Transaction<'_>;
|
||||
/// Open a new transaction which may be dropped.
|
||||
fn unsafe_txn(&mut self) -> Self::Transaction<'_>;
|
||||
/// Open a new transaction which must be committed or closed.
|
||||
fn txn(&mut self) -> Undroppable<Self::Transaction<'_>> {
|
||||
Undroppable(Some(self.unsafe_txn()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ impl Get for MemDb {
|
||||
}
|
||||
impl Db for MemDb {
|
||||
type Transaction<'a> = MemDbTxn<'a>;
|
||||
fn txn(&mut self) -> MemDbTxn<'_> {
|
||||
fn unsafe_txn(&mut self) -> MemDbTxn<'_> {
|
||||
MemDbTxn(self, HashMap::new(), HashSet::new())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ impl Get for Arc<ParityDb> {
|
||||
}
|
||||
impl Db for Arc<ParityDb> {
|
||||
type Transaction<'a> = Transaction<'a>;
|
||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
||||
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
|
||||
Transaction(self, vec![])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
|
||||
}
|
||||
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
||||
type Transaction<'a> = Transaction<'a, T>;
|
||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
||||
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
|
||||
let mut opts = WriteOptions::default();
|
||||
opts.set_sync(true);
|
||||
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
||||
|
||||
@@ -2,28 +2,36 @@
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::{future::Future, time::Duration};
|
||||
use std::sync::Arc;
|
||||
use core::{
|
||||
fmt::{self, Debug},
|
||||
future::Future,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use tokio::sync::{mpsc, oneshot, Mutex};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
enum Closed {
|
||||
NotClosed(Option<oneshot::Receiver<()>>),
|
||||
Closed,
|
||||
}
|
||||
mod type_name;
|
||||
|
||||
/// A handle for a task.
|
||||
///
|
||||
/// The task will only stop running once all handles for it are dropped.
|
||||
//
|
||||
// `run_now` isn't infallible if the task may have been closed. `run_now` on a closed task would
|
||||
// either need to panic (historic behavior), silently drop the fact the task can't be run, or
|
||||
// return an error. Instead of having a potential panic, and instead of modeling the error
|
||||
// behavior, this task can't be closed unless all handles are dropped, ensuring calls to `run_now`
|
||||
// are infallible.
|
||||
#[derive(Clone)]
|
||||
pub struct TaskHandle {
|
||||
run_now: mpsc::Sender<()>,
|
||||
#[allow(dead_code)] // This is used to track if all handles have been dropped
|
||||
close: mpsc::Sender<()>,
|
||||
closed: Arc<Mutex<Closed>>,
|
||||
}
|
||||
|
||||
/// A task's internal structures.
|
||||
pub struct Task {
|
||||
run_now: mpsc::Receiver<()>,
|
||||
close: mpsc::Receiver<()>,
|
||||
closed: oneshot::Sender<()>,
|
||||
}
|
||||
|
||||
impl Task {
|
||||
@@ -34,22 +42,15 @@ impl Task {
|
||||
let (run_now_send, run_now_recv) = mpsc::channel(1);
|
||||
// And any call to close satisfies all calls to close
|
||||
let (close_send, close_recv) = mpsc::channel(1);
|
||||
let (closed_send, closed_recv) = oneshot::channel();
|
||||
(
|
||||
Self { run_now: run_now_recv, close: close_recv, closed: closed_send },
|
||||
TaskHandle {
|
||||
run_now: run_now_send,
|
||||
close: close_send,
|
||||
closed: Arc::new(Mutex::new(Closed::NotClosed(Some(closed_recv)))),
|
||||
},
|
||||
Self { run_now: run_now_recv, close: close_recv },
|
||||
TaskHandle { run_now: run_now_send, close: close_send },
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl TaskHandle {
|
||||
/// Tell the task to run now (and not whenever its next iteration on a timer is).
|
||||
///
|
||||
/// Panics if the task has been dropped.
|
||||
pub fn run_now(&self) {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self.run_now.try_send(()) {
|
||||
@@ -57,27 +58,19 @@ impl TaskHandle {
|
||||
// NOP on full, as this task will already be ran as soon as possible
|
||||
Err(mpsc::error::TrySendError::Full(())) => {}
|
||||
Err(mpsc::error::TrySendError::Closed(())) => {
|
||||
// The task should only be closed if all handles are dropped, and this one hasn't been
|
||||
panic!("task was unexpectedly closed when calling run_now")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Close the task.
|
||||
///
|
||||
/// Returns once the task shuts down after it finishes its current iteration (which may be of
|
||||
/// unbounded time).
|
||||
pub async fn close(self) {
|
||||
// If another instance of the handle called tfhis, don't error
|
||||
let _ = self.close.send(()).await;
|
||||
// Wait until we receive the closed message
|
||||
let mut closed = self.closed.lock().await;
|
||||
match &mut *closed {
|
||||
Closed::NotClosed(ref mut recv) => {
|
||||
assert_eq!(recv.take().unwrap().await, Ok(()), "continually ran task dropped itself?");
|
||||
*closed = Closed::Closed;
|
||||
}
|
||||
Closed::Closed => {}
|
||||
}
|
||||
/// An enum which can't be constructed, representing that the task does not error.
|
||||
pub enum DoesNotError {}
|
||||
impl Debug for DoesNotError {
|
||||
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
// This type can't be constructed so we'll never have a `&self` to call this fn with
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,11 +83,14 @@ pub trait ContinuallyRan: Sized + Send {
|
||||
/// Upon error, the amount of time waited will be linearly increased until this limit.
|
||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120;
|
||||
|
||||
/// The error potentially yielded upon running an iteration of this task.
|
||||
type Error: Debug;
|
||||
|
||||
/// Run an iteration of the task.
|
||||
///
|
||||
/// If this returns `true`, all dependents of the task will immediately have a new iteration ran
|
||||
/// (without waiting for whatever timer they were already on).
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>>;
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>>;
|
||||
|
||||
/// Continually run the task.
|
||||
fn continually_run(
|
||||
@@ -136,12 +132,20 @@ pub trait ContinuallyRan: Sized + Send {
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!("{}", e);
|
||||
// Get the type name
|
||||
let type_name = type_name::strip_type_name(core::any::type_name::<Self>());
|
||||
// Print the error as a warning, prefixed by the task's type
|
||||
log::warn!("{type_name}: {e:?}");
|
||||
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
|
||||
}
|
||||
}
|
||||
|
||||
// Don't run the task again for another few seconds UNLESS told to run now
|
||||
/*
|
||||
We could replace tokio::mpsc with async_channel, tokio::time::sleep with
|
||||
patchable_async_sleep::sleep, and tokio::select with futures_lite::future::or
|
||||
It isn't worth the effort when patchable_async_sleep::sleep will still resolve to tokio
|
||||
*/
|
||||
tokio::select! {
|
||||
() = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {},
|
||||
msg = task.run_now.recv() => {
|
||||
@@ -152,8 +156,6 @@ pub trait ContinuallyRan: Sized + Send {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
task.closed.send(()).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
31
common/task/src/type_name.rs
Normal file
31
common/task/src/type_name.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
/// Strip the modules from a type name.
|
||||
// This may be of the form `a::b::C`, in which case we only want `C`
|
||||
pub(crate) fn strip_type_name(full_type_name: &'static str) -> String {
|
||||
// It also may be `a::b::C<d::e::F>`, in which case, we only attempt to strip `a::b`
|
||||
let mut by_generics = full_type_name.split('<');
|
||||
|
||||
// Strip to just `C`
|
||||
let full_outer_object_name = by_generics.next().unwrap();
|
||||
let mut outer_object_name_parts = full_outer_object_name.split("::");
|
||||
let mut last_part_in_outer_object_name = outer_object_name_parts.next().unwrap();
|
||||
for part in outer_object_name_parts {
|
||||
last_part_in_outer_object_name = part;
|
||||
}
|
||||
|
||||
// Push back on the generic terms
|
||||
let mut type_name = last_part_in_outer_object_name.to_string();
|
||||
for generic in by_generics {
|
||||
type_name.push('<');
|
||||
type_name.push_str(generic);
|
||||
}
|
||||
type_name
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strip_type_name() {
|
||||
assert_eq!(strip_type_name("core::option::Option"), "Option");
|
||||
assert_eq!(
|
||||
strip_type_name("core::option::Option<alloc::string::String>"),
|
||||
"Option<alloc::string::String>"
|
||||
);
|
||||
}
|
||||
@@ -18,32 +18,29 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||
bitvec = { version = "1", default-features = false, features = ["std"] }
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
|
||||
frost = { package = "modular-frost", path = "../crypto/frost" }
|
||||
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||
|
||||
zalloc = { path = "../common/zalloc" }
|
||||
serai-db = { path = "../common/db" }
|
||||
serai-env = { path = "../common/env" }
|
||||
serai-task = { path = "../common/task", version = "0.1" }
|
||||
|
||||
processor-messages = { package = "serai-processor-messages", path = "../processor/messages" }
|
||||
messages = { package = "serai-processor-messages", path = "../processor/messages" }
|
||||
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||
tributary = { package = "tributary-chain", path = "./tributary" }
|
||||
tributary-sdk = { path = "./tributary-sdk" }
|
||||
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
@@ -52,16 +49,15 @@ borsh = { version = "1", default-features = false, features = ["std", "derive",
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] }
|
||||
tokio = { version = "1", default-features = false, features = ["time", "sync", "macros", "rt-multi-thread"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
serai-cosign = { path = "./cosign" }
|
||||
serai-coordinator-substrate = { path = "./substrate" }
|
||||
serai-coordinator-tributary = { path = "./tributary" }
|
||||
serai-coordinator-p2p = { path = "./p2p" }
|
||||
serai-coordinator-libp2p-p2p = { path = "./p2p/libp2p" }
|
||||
|
||||
[features]
|
||||
longer-reattempts = []
|
||||
longer-reattempts = ["serai-coordinator-tributary/longer-reattempts"]
|
||||
parity-db = ["serai-db/parity-db"]
|
||||
rocksdb = ["serai-db/rocksdb"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2023-2024 Luke Parker
|
||||
Copyright (c) 2023-2025 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
|
||||
@@ -1,19 +1,29 @@
|
||||
# Coordinator
|
||||
|
||||
- [`tendermint`](/tributary/tendermint) is an implementation of the Tendermint BFT algorithm.
|
||||
- [`tendermint`](/tributary/tendermint) is an implementation of the Tendermint
|
||||
BFT algorithm.
|
||||
|
||||
- [`tributary`](./tributary) is a micro-blockchain framework. Instead of a producing a blockchain
|
||||
daemon like the Polkadot SDK or Cosmos SDK intend to, `tributary` is solely intended to be an
|
||||
embedded asynchronous task within an application.
|
||||
- [`tributary-sdk`](./tributary-sdk) is a micro-blockchain framework. Instead
|
||||
of a producing a blockchain daemon like the Polkadot SDK or Cosmos SDK intend
|
||||
to, `tributary` is solely intended to be an embedded asynchronous task within
|
||||
an application.
|
||||
|
||||
The Serai coordinator spawns a tributary for each validator set it's coordinating. This allows
|
||||
the participating validators to communicate in a byzantine-fault-tolerant manner (relying on
|
||||
Tendermint for consensus).
|
||||
The Serai coordinator spawns a tributary for each validator set it's
|
||||
coordinating. This allows the participating validators to communicate in a
|
||||
byzantine-fault-tolerant manner (relying on Tendermint for consensus).
|
||||
|
||||
- [`cosign`](./cosign) contains a library to decide which Substrate blocks should be cosigned and
|
||||
to evaluate cosigns.
|
||||
- [`cosign`](./cosign) contains a library to decide which Substrate blocks
|
||||
should be cosigned and to evaluate cosigns.
|
||||
|
||||
- [`substrate`](./substrate) contains a library to index the Substrate blockchain and handle its
|
||||
events.
|
||||
- [`substrate`](./substrate) contains a library to index the Substrate
|
||||
blockchain and handle its events.
|
||||
|
||||
- [`tributary`](./tributary) is our instantiation of the Tributary SDK for the
|
||||
Serai processor. It includes the `Transaction` definition and deferred
|
||||
execution logic.
|
||||
|
||||
- [`p2p`](./p2p) is our abstract P2P API to service the Coordinator.
|
||||
|
||||
- [`libp2p`](./p2p/libp2p) is our libp2p-backed implementation of the P2P API.
|
||||
|
||||
- [`src`](./src) contains the source code for the Coordinator binary itself.
|
||||
|
||||
@@ -2,7 +2,7 @@ use core::future::Future;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
use serai_task::{DoesNotError, ContinuallyRan};
|
||||
|
||||
use crate::evaluator::CosignedBlocks;
|
||||
|
||||
@@ -24,8 +24,19 @@ pub(crate) struct CosignDelayTask<D: Db> {
|
||||
pub(crate) db: D,
|
||||
}
|
||||
|
||||
struct AwaitUndroppable<T: DbTxn>(Option<core::mem::ManuallyDrop<Undroppable<T>>>);
|
||||
impl<T: DbTxn> Drop for AwaitUndroppable<T> {
|
||||
fn drop(&mut self) {
|
||||
if let Some(mut txn) = self.0.take() {
|
||||
(unsafe { core::mem::ManuallyDrop::take(&mut txn) }).close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
type Error = DoesNotError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
@@ -33,14 +44,18 @@ impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
||||
|
||||
// Receive the next block to mark as cosigned
|
||||
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
||||
txn.close();
|
||||
break;
|
||||
};
|
||||
|
||||
// Calculate when we should mark it as valid
|
||||
let time_valid =
|
||||
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
||||
// Sleep until then
|
||||
let mut txn = AwaitUndroppable(Some(core::mem::ManuallyDrop::new(txn)));
|
||||
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
||||
.await;
|
||||
let mut txn = core::mem::ManuallyDrop::into_inner(txn.0.take().unwrap());
|
||||
|
||||
// Set the cosigned block
|
||||
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
||||
|
||||
@@ -80,12 +80,14 @@ pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
||||
}
|
||||
|
||||
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut known_cosign = None;
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
let mut txn = self.db.txn();
|
||||
let mut txn = self.db.unsafe_txn();
|
||||
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
||||
else {
|
||||
break;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use core::future::Future;
|
||||
use std::collections::HashMap;
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{SeraiAddress, Amount},
|
||||
@@ -57,18 +57,20 @@ async fn block_has_events_justifying_a_cosign(
|
||||
/// A task to determine which blocks we should intend to cosign.
|
||||
pub(crate) struct CosignIntendTask<D: Db> {
|
||||
pub(crate) db: D,
|
||||
pub(crate) serai: Serai,
|
||||
pub(crate) serai: Arc<Serai>,
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
|
||||
let latest_block_number =
|
||||
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
||||
|
||||
for block_number in start_block_number ..= latest_block_number {
|
||||
let mut txn = self.db.txn();
|
||||
let mut txn = self.db.unsafe_txn();
|
||||
|
||||
let (block, mut has_events) =
|
||||
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
||||
@@ -78,7 +80,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
||||
// Check we are indexing a linear chain
|
||||
if (block_number > 1) &&
|
||||
(<[u8; 32]>::from(block.header.parent_hash) !=
|
||||
SubstrateBlocks::get(&txn, block_number - 1)
|
||||
SubstrateBlockHash::get(&txn, block_number - 1)
|
||||
.expect("indexing a block but haven't indexed its parent"))
|
||||
{
|
||||
Err(format!(
|
||||
@@ -86,14 +88,15 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
||||
block_number - 1
|
||||
))?;
|
||||
}
|
||||
SubstrateBlocks::set(&mut txn, block_number, &block.hash());
|
||||
let block_hash = block.hash();
|
||||
SubstrateBlockHash::set(&mut txn, block_number, &block_hash);
|
||||
|
||||
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
|
||||
|
||||
// If this is notable, it creates a new global session, which we index into the database
|
||||
// now
|
||||
if has_events == HasEvents::Notable {
|
||||
let serai = self.serai.as_of(block.hash());
|
||||
let serai = self.serai.as_of(block_hash);
|
||||
let sets_and_keys = cosigning_sets(&serai).await?;
|
||||
let global_session =
|
||||
GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
|
||||
@@ -159,7 +162,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
||||
&CosignIntent {
|
||||
global_session: global_session_for_this_block,
|
||||
block_number,
|
||||
block_hash: block.hash(),
|
||||
block_hash,
|
||||
notable: has_events == HasEvents::Notable,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::{fmt::Debug, future::Future};
|
||||
use std::collections::HashMap;
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
@@ -29,7 +29,7 @@ pub use delay::BROADCAST_FREQUENCY;
|
||||
use delay::LatestCosignedBlockNumber;
|
||||
|
||||
/// The schnorrkel context to used when signing a cosign.
|
||||
pub const COSIGN_CONTEXT: &[u8] = b"serai-cosign";
|
||||
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
|
||||
|
||||
/// A 'global session', defined as all validator sets used for cosigning at a given moment.
|
||||
///
|
||||
@@ -82,13 +82,13 @@ enum HasEvents {
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct CosignIntent {
|
||||
/// The global session this cosign is being performed under.
|
||||
global_session: [u8; 32],
|
||||
pub global_session: [u8; 32],
|
||||
/// The number of the block to cosign.
|
||||
block_number: u64,
|
||||
pub block_number: u64,
|
||||
/// The hash of the block to cosign.
|
||||
block_hash: [u8; 32],
|
||||
pub block_hash: [u8; 32],
|
||||
/// If this cosign must be handled before further cosigns are.
|
||||
notable: bool,
|
||||
pub notable: bool,
|
||||
}
|
||||
|
||||
/// A cosign.
|
||||
@@ -127,7 +127,7 @@ create_db! {
|
||||
// The following are populated by the intend task and used throughout the library
|
||||
|
||||
// An index of Substrate blocks
|
||||
SubstrateBlocks: (block_number: u64) -> [u8; 32],
|
||||
SubstrateBlockHash: (block_number: u64) -> [u8; 32],
|
||||
// A mapping from a global session's ID to its relevant information.
|
||||
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
|
||||
// The last block to be cosigned by a global session.
|
||||
@@ -161,6 +161,11 @@ async fn keys_for_network(
|
||||
serai: &TemporalSerai<'_>,
|
||||
network: NetworkId,
|
||||
) -> Result<Option<(Session, KeyPair)>, String> {
|
||||
// The Serai network never cosigns so it has no keys for cosigning
|
||||
if network == NetworkId::Serai {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let Some(latest_session) =
|
||||
serai.validator_sets().session(network).await.map_err(|e| format!("{e:?}"))?
|
||||
else {
|
||||
@@ -223,6 +228,43 @@ pub trait RequestNotableCosigns: 'static + Send {
|
||||
#[derive(Debug)]
|
||||
pub struct Faulted;
|
||||
|
||||
/// An error incurred while intaking a cosign.
|
||||
#[derive(Debug)]
|
||||
pub enum IntakeCosignError {
|
||||
/// Cosign is for a not-yet-indexed block
|
||||
NotYetIndexedBlock,
|
||||
/// A later cosign for this cosigner has already been handled
|
||||
StaleCosign,
|
||||
/// The cosign's global session isn't recognized
|
||||
UnrecognizedGlobalSession,
|
||||
/// The cosign is for a block before its global session starts
|
||||
BeforeGlobalSessionStart,
|
||||
/// The cosign is for a block after its global session ends
|
||||
AfterGlobalSessionEnd,
|
||||
/// The cosign's signing network wasn't a participant in this global session
|
||||
NonParticipatingNetwork,
|
||||
/// The cosign had an invalid signature
|
||||
InvalidSignature,
|
||||
/// The cosign is for a global session which has yet to have its declaration block cosigned
|
||||
FutureGlobalSession,
|
||||
}
|
||||
|
||||
impl IntakeCosignError {
|
||||
/// If this error is temporal to the local view
|
||||
pub fn temporal(&self) -> bool {
|
||||
match self {
|
||||
IntakeCosignError::NotYetIndexedBlock |
|
||||
IntakeCosignError::StaleCosign |
|
||||
IntakeCosignError::UnrecognizedGlobalSession |
|
||||
IntakeCosignError::FutureGlobalSession => true,
|
||||
IntakeCosignError::BeforeGlobalSessionStart |
|
||||
IntakeCosignError::AfterGlobalSessionEnd |
|
||||
IntakeCosignError::NonParticipatingNetwork |
|
||||
IntakeCosignError::InvalidSignature => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The interface to manage cosigning with.
|
||||
pub struct Cosigning<D: Db> {
|
||||
db: D,
|
||||
@@ -234,7 +276,7 @@ impl<D: Db> Cosigning<D> {
|
||||
/// only used once at any given time.
|
||||
pub fn spawn<R: RequestNotableCosigns>(
|
||||
db: D,
|
||||
serai: Serai,
|
||||
serai: Arc<Serai>,
|
||||
request: R,
|
||||
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
|
||||
) -> Self {
|
||||
@@ -265,14 +307,14 @@ impl<D: Db> Cosigning<D> {
|
||||
Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0))
|
||||
}
|
||||
|
||||
/// Fetch an cosigned Substrate block by its block number.
|
||||
/// Fetch a cosigned Substrate block's hash by its block number.
|
||||
pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result<Option<[u8; 32]>, Faulted> {
|
||||
if block_number > Self::latest_cosigned_block_number(getter)? {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
Ok(Some(
|
||||
SubstrateBlocks::get(getter, block_number).expect("cosigned block but didn't index it"),
|
||||
SubstrateBlockHash::get(getter, block_number).expect("cosigned block but didn't index it"),
|
||||
))
|
||||
}
|
||||
|
||||
@@ -280,10 +322,10 @@ impl<D: Db> Cosigning<D> {
|
||||
///
|
||||
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
||||
/// cosigns for this session.
|
||||
pub fn notable_cosigns(&self, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
||||
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
|
||||
cosigns.push(cosign);
|
||||
}
|
||||
}
|
||||
@@ -321,27 +363,16 @@ impl<D: Db> Cosigning<D> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Intake a cosign from the Serai network.
|
||||
///
|
||||
/// - Returns Err(_) if there was an error trying to validate the cosign and it should be retired
|
||||
/// later.
|
||||
/// - Returns Ok(true) if the cosign was successfully handled or could not be handled at this
|
||||
/// time.
|
||||
/// - Returns Ok(false) if the cosign was invalid.
|
||||
//
|
||||
// We collapse a cosign which shouldn't be handled yet into a valid cosign (`Ok(true)`) as we
|
||||
// assume we'll either explicitly request it if we need it or we'll naturally see it (or a later,
|
||||
// more relevant, cosign) again.
|
||||
/// Intake a cosign.
|
||||
//
|
||||
// Takes `&mut self` as this should only be called once at any given moment.
|
||||
// TODO: Don't overload bool here
|
||||
pub fn intake_cosign(&mut self, signed_cosign: &SignedCosign) -> Result<bool, String> {
|
||||
pub fn intake_cosign(&mut self, signed_cosign: &SignedCosign) -> Result<(), IntakeCosignError> {
|
||||
let cosign = &signed_cosign.cosign;
|
||||
let network = cosign.cosigner;
|
||||
|
||||
// Check our indexed blockchain includes a block with this block number
|
||||
let Some(our_block_hash) = SubstrateBlocks::get(&self.db, cosign.block_number) else {
|
||||
return Ok(true);
|
||||
let Some(our_block_hash) = SubstrateBlockHash::get(&self.db, cosign.block_number) else {
|
||||
Err(IntakeCosignError::NotYetIndexedBlock)?
|
||||
};
|
||||
let faulty = cosign.block_hash != our_block_hash;
|
||||
|
||||
@@ -351,20 +382,19 @@ impl<D: Db> Cosigning<D> {
|
||||
NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network)
|
||||
{
|
||||
if existing.cosign.block_number >= cosign.block_number {
|
||||
return Ok(true);
|
||||
Err(IntakeCosignError::StaleCosign)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else {
|
||||
// Unrecognized global session
|
||||
return Ok(true);
|
||||
Err(IntakeCosignError::UnrecognizedGlobalSession)?
|
||||
};
|
||||
|
||||
// Check the cosigned block number is in range to the global session
|
||||
if cosign.block_number < global_session.start_block_number {
|
||||
// Cosign is for a block predating the global session
|
||||
return Ok(false);
|
||||
Err(IntakeCosignError::BeforeGlobalSessionStart)?;
|
||||
}
|
||||
if !faulty {
|
||||
// This prevents a malicious validator set, on the same chain, from producing a cosign after
|
||||
@@ -372,7 +402,7 @@ impl<D: Db> Cosigning<D> {
|
||||
if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) {
|
||||
if cosign.block_number > last_block {
|
||||
// Cosign is for a block after the last block this global session should have signed
|
||||
return Ok(false);
|
||||
Err(IntakeCosignError::AfterGlobalSessionEnd)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -381,20 +411,20 @@ impl<D: Db> Cosigning<D> {
|
||||
{
|
||||
let key = Public::from({
|
||||
let Some(key) = global_session.keys.get(&network) else {
|
||||
return Ok(false);
|
||||
Err(IntakeCosignError::NonParticipatingNetwork)?
|
||||
};
|
||||
*key
|
||||
});
|
||||
|
||||
if !signed_cosign.verify_signature(key) {
|
||||
return Ok(false);
|
||||
Err(IntakeCosignError::InvalidSignature)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
||||
// cosign
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
let mut txn = self.db.unsafe_txn();
|
||||
|
||||
if !faulty {
|
||||
// If this is for a future global session, we don't acknowledge this cosign at this time
|
||||
@@ -403,7 +433,7 @@ impl<D: Db> Cosigning<D> {
|
||||
// block declaring it was cosigned
|
||||
if (global_session.start_block_number - 1) > latest_cosigned_block_number {
|
||||
drop(txn);
|
||||
return Ok(true);
|
||||
return Err(IntakeCosignError::FutureGlobalSession);
|
||||
}
|
||||
|
||||
// This is safe as it's in-range and newer, as prior checked since it isn't faulty
|
||||
@@ -417,9 +447,10 @@ impl<D: Db> Cosigning<D> {
|
||||
|
||||
let mut weight_cosigned = 0;
|
||||
for fault in &faults {
|
||||
let Some(stake) = global_session.stakes.get(&fault.cosign.cosigner) else {
|
||||
Err("cosigner with recognized key didn't have a stake entry saved".to_string())?
|
||||
};
|
||||
let stake = global_session
|
||||
.stakes
|
||||
.get(&fault.cosign.cosigner)
|
||||
.expect("cosigner with recognized key didn't have a stake entry saved");
|
||||
weight_cosigned += stake;
|
||||
}
|
||||
|
||||
@@ -431,7 +462,7 @@ impl<D: Db> Cosigning<D> {
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
Ok(true)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Receive intended cosigns to produce for this ValidatorSet.
|
||||
@@ -449,3 +480,30 @@ impl<D: Db> Cosigning<D> {
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
struct RNC;
|
||||
impl RequestNotableCosigns for RNC {
|
||||
/// The error type which may be encountered when requesting notable cosigns.
|
||||
type Error = ();
|
||||
|
||||
/// Request the notable cosigns for this global session.
|
||||
fn request_notable_cosigns(
|
||||
&self,
|
||||
global_session: [u8; 32],
|
||||
) -> impl Send + Future<Output = Result<(), Self::Error>> {
|
||||
async move { Ok(()) }
|
||||
}
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn test() {
|
||||
let db: serai_db::MemDb = serai_db::MemDb::new();
|
||||
let serai = unsafe { core::mem::transmute(0u64) };
|
||||
let request = RNC;
|
||||
let tasks = vec![];
|
||||
let _ = Cosigning::spawn(db, serai, request, tasks);
|
||||
core::future::pending().await
|
||||
}
|
||||
}
|
||||
|
||||
33
coordinator/p2p/Cargo.toml
Normal file
33
coordinator/p2p/Cargo.toml
Normal file
@@ -0,0 +1,33 @@
|
||||
[package]
|
||||
name = "serai-coordinator-p2p"
|
||||
version = "0.1.0"
|
||||
description = "Serai coordinator's P2P abstraction"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/p2p"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
serai-db = { path = "../../common/db", version = "0.1" }
|
||||
|
||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
serai-cosign = { path = "../cosign" }
|
||||
tributary-sdk = { path = "../tributary-sdk" }
|
||||
|
||||
futures-lite = { version = "2", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["sync", "macros"] }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
serai-task = { path = "../../common/task", version = "0.1" }
|
||||
15
coordinator/p2p/LICENSE
Normal file
15
coordinator/p2p/LICENSE
Normal file
@@ -0,0 +1,15 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2023-2025 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
3
coordinator/p2p/README.md
Normal file
3
coordinator/p2p/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Serai Coordinator P2P
|
||||
|
||||
The P2P abstraction used by Serai's coordinator, and tasks over it.
|
||||
42
coordinator/p2p/libp2p/Cargo.toml
Normal file
42
coordinator/p2p/libp2p/Cargo.toml
Normal file
@@ -0,0 +1,42 @@
|
||||
[package]
|
||||
name = "serai-coordinator-libp2p-p2p"
|
||||
version = "0.1.0"
|
||||
description = "Serai coordinator's libp2p-based P2P backend"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/p2p/libp2p"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
serai-cosign = { path = "../../cosign" }
|
||||
tributary-sdk = { path = "../../tributary-sdk" }
|
||||
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
serai-task = { path = "../../../common/task", version = "0.1" }
|
||||
serai-coordinator-p2p = { path = "../" }
|
||||
15
coordinator/p2p/libp2p/LICENSE
Normal file
15
coordinator/p2p/libp2p/LICENSE
Normal file
@@ -0,0 +1,15 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2023-2025 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
14
coordinator/p2p/libp2p/README.md
Normal file
14
coordinator/p2p/libp2p/README.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Serai Coordinator libp2p P2P
|
||||
|
||||
A libp2p-backed P2P instantiation for Serai's coordinator.
|
||||
|
||||
The libp2p swarm is limited to validators from the Serai network. The swarm
|
||||
does not maintain any of its own peer finding/routing infrastructure, instead
|
||||
relying on the Serai network's connection information to dial peers. This does
|
||||
limit the listening peers to only the peers immediately reachable via the same
|
||||
IP address (despite the two distinct services), not hidden behind a NAT, yet is
|
||||
also quite simple and gives full control of who to connect to to us.
|
||||
|
||||
Peers are decided via the internal `DialTask` which aims to maintain a target
|
||||
amount of peers for each external network. This ensures cosigns are able to
|
||||
propagate across the external networks which sign them.
|
||||
176
coordinator/p2p/libp2p/src/authenticate.rs
Normal file
176
coordinator/p2p/libp2p/src/authenticate.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
use core::{pin::Pin, future::Future};
|
||||
use std::io;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
use schnorrkel::{Keypair, PublicKey, Signature};
|
||||
|
||||
use serai_client::primitives::PublicKey as Public;
|
||||
|
||||
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use libp2p::{
|
||||
core::UpgradeInfo,
|
||||
InboundUpgrade, OutboundUpgrade,
|
||||
identity::{self, PeerId},
|
||||
noise,
|
||||
};
|
||||
|
||||
use crate::peer_id_from_public;
|
||||
|
||||
const PROTOCOL: &str = "/serai/coordinator/validators";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct OnlyValidators {
|
||||
pub(crate) serai_key: Zeroizing<Keypair>,
|
||||
pub(crate) noise_keypair: identity::Keypair,
|
||||
}
|
||||
|
||||
impl OnlyValidators {
|
||||
/// The ephemeral challenge protocol for authentication.
|
||||
///
|
||||
/// We use ephemeral challenges to prevent replaying signatures from historic sessions.
|
||||
///
|
||||
/// We don't immediately send the challenge. We only send a commitment to it. This prevents our
|
||||
/// remote peer from choosing their challenge in response to our challenge, in case there was any
|
||||
/// benefit to doing so.
|
||||
async fn challenges<S: 'static + Send + Unpin + AsyncRead + AsyncWrite>(
|
||||
socket: &mut noise::Output<S>,
|
||||
) -> io::Result<([u8; 32], [u8; 32])> {
|
||||
let mut our_challenge = [0; 32];
|
||||
OsRng.fill_bytes(&mut our_challenge);
|
||||
|
||||
// Write the hash of our challenge
|
||||
socket.write_all(&Blake2s256::digest(our_challenge)).await?;
|
||||
|
||||
// Read the hash of their challenge
|
||||
let mut their_challenge_commitment = [0; 32];
|
||||
socket.read_exact(&mut their_challenge_commitment).await?;
|
||||
|
||||
// Reveal our challenge
|
||||
socket.write_all(&our_challenge).await?;
|
||||
|
||||
// Read their challenge
|
||||
let mut their_challenge = [0; 32];
|
||||
socket.read_exact(&mut their_challenge).await?;
|
||||
|
||||
// Verify their challenge
|
||||
if <[u8; 32]>::from(Blake2s256::digest(their_challenge)) != their_challenge_commitment {
|
||||
Err(io::Error::other("challenge didn't match challenge commitment"))?;
|
||||
}
|
||||
|
||||
Ok((our_challenge, their_challenge))
|
||||
}
|
||||
|
||||
// We sign the two noise peer IDs and the ephemeral challenges.
|
||||
//
|
||||
// Signing the noise peer IDs ensures we're authenticating this noise connection. The only
|
||||
// expectations placed on noise are for it to prevent a MITM from impersonating the other end or
|
||||
// modifying any messages sent.
|
||||
//
|
||||
// Signing the ephemeral challenges prevents any replays. While that should be unnecessary, as
|
||||
// noise MAY prevent replays across sessions (even when the same key is used), and noise IDs
|
||||
// shouldn't be reused (so it should be fine to reuse an existing signature for these noise IDs),
|
||||
// it doesn't hurt.
|
||||
async fn authenticate<S: 'static + Send + Unpin + AsyncRead + AsyncWrite>(
|
||||
&self,
|
||||
socket: &mut noise::Output<S>,
|
||||
dialer_peer_id: PeerId,
|
||||
dialer_challenge: [u8; 32],
|
||||
listener_peer_id: PeerId,
|
||||
listener_challenge: [u8; 32],
|
||||
) -> io::Result<PeerId> {
|
||||
// Write our public key
|
||||
socket.write_all(&self.serai_key.public.to_bytes()).await?;
|
||||
|
||||
let msg = borsh::to_vec(&(
|
||||
dialer_peer_id.to_bytes(),
|
||||
dialer_challenge,
|
||||
listener_peer_id.to_bytes(),
|
||||
listener_challenge,
|
||||
))
|
||||
.unwrap();
|
||||
let signature = self.serai_key.sign_simple(PROTOCOL.as_bytes(), &msg);
|
||||
socket.write_all(&signature.to_bytes()).await?;
|
||||
|
||||
let mut public_key_and_sig = [0; 96];
|
||||
socket.read_exact(&mut public_key_and_sig).await?;
|
||||
let public_key = PublicKey::from_bytes(&public_key_and_sig[.. 32])
|
||||
.map_err(|_| io::Error::other("invalid public key"))?;
|
||||
let sig = Signature::from_bytes(&public_key_and_sig[32 ..])
|
||||
.map_err(|_| io::Error::other("invalid signature serialization"))?;
|
||||
|
||||
public_key
|
||||
.verify_simple(PROTOCOL.as_bytes(), &msg, &sig)
|
||||
.map_err(|_| io::Error::other("invalid signature"))?;
|
||||
|
||||
Ok(peer_id_from_public(Public::from_raw(public_key.to_bytes())))
|
||||
}
|
||||
}
|
||||
|
||||
impl UpgradeInfo for OnlyValidators {
|
||||
type Info = <noise::Config as UpgradeInfo>::Info;
|
||||
type InfoIter = <noise::Config as UpgradeInfo>::InfoIter;
|
||||
fn protocol_info(&self) -> Self::InfoIter {
|
||||
// A keypair only causes an error if its sign operation fails, which is only possible with RSA,
|
||||
// which isn't used within this codebase
|
||||
noise::Config::new(&self.noise_keypair).unwrap().protocol_info()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundUpgrade<S> for OnlyValidators {
|
||||
type Output = (PeerId, noise::Output<S>);
|
||||
type Error = io::Error;
|
||||
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||
|
||||
fn upgrade_inbound(self, socket: S, info: Self::Info) -> Self::Future {
|
||||
Box::pin(async move {
|
||||
let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||
.unwrap()
|
||||
.upgrade_inbound(socket, info)
|
||||
.await
|
||||
.map_err(io::Error::other)?;
|
||||
|
||||
let (our_challenge, dialer_challenge) = OnlyValidators::challenges(&mut socket).await?;
|
||||
let dialer_serai_validator = self
|
||||
.authenticate(
|
||||
&mut socket,
|
||||
dialer_noise_peer_id,
|
||||
dialer_challenge,
|
||||
PeerId::from_public_key(&self.noise_keypair.public()),
|
||||
our_challenge,
|
||||
)
|
||||
.await?;
|
||||
Ok((dialer_serai_validator, socket))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundUpgrade<S> for OnlyValidators {
|
||||
type Output = (PeerId, noise::Output<S>);
|
||||
type Error = io::Error;
|
||||
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||
|
||||
fn upgrade_outbound(self, socket: S, info: Self::Info) -> Self::Future {
|
||||
Box::pin(async move {
|
||||
let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||
.unwrap()
|
||||
.upgrade_outbound(socket, info)
|
||||
.await
|
||||
.map_err(io::Error::other)?;
|
||||
|
||||
let (our_challenge, listener_challenge) = OnlyValidators::challenges(&mut socket).await?;
|
||||
let listener_serai_validator = self
|
||||
.authenticate(
|
||||
&mut socket,
|
||||
PeerId::from_public_key(&self.noise_keypair.public()),
|
||||
our_challenge,
|
||||
listener_noise_peer_id,
|
||||
listener_challenge,
|
||||
)
|
||||
.await?;
|
||||
Ok((listener_serai_validator, socket))
|
||||
})
|
||||
}
|
||||
}
|
||||
127
coordinator/p2p/libp2p/src/dial.rs
Normal file
127
coordinator/p2p/libp2p/src/dial.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
use core::future::Future;
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use serai_client::{SeraiError, Serai};
|
||||
|
||||
use libp2p::{
|
||||
core::multiaddr::{Protocol, Multiaddr},
|
||||
swarm::dial_opts::DialOpts,
|
||||
};
|
||||
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::{PORT, Peers, validators::Validators};
|
||||
|
||||
const TARGET_PEERS_PER_NETWORK: usize = 5;
|
||||
/*
|
||||
If we only tracked the target amount of peers per network, we'd risk being eclipsed by an
|
||||
adversary who immediately connects to us with their array of validators upon our boot. Their
|
||||
array would satisfy our target amount of peers, so we'd never seek more, enabling the adversary
|
||||
to be the only entity we peered with.
|
||||
|
||||
We solve this by additionally requiring an explicit amount of peers we dialed. That means we
|
||||
randomly chose to connect to these peers.
|
||||
*/
|
||||
// TODO const TARGET_DIALED_PEERS_PER_NETWORK: usize = 3;
|
||||
|
||||
pub(crate) struct DialTask {
|
||||
serai: Arc<Serai>,
|
||||
validators: Validators,
|
||||
peers: Peers,
|
||||
to_dial: mpsc::UnboundedSender<DialOpts>,
|
||||
}
|
||||
|
||||
impl DialTask {
|
||||
pub(crate) fn new(
|
||||
serai: Arc<Serai>,
|
||||
peers: Peers,
|
||||
to_dial: mpsc::UnboundedSender<DialOpts>,
|
||||
) -> Self {
|
||||
DialTask { serai: serai.clone(), validators: Validators::new(serai).0, peers, to_dial }
|
||||
}
|
||||
}
|
||||
|
||||
impl ContinuallyRan for DialTask {
|
||||
// Only run every five minutes, not the default of every five seconds
|
||||
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
|
||||
|
||||
type Error = SeraiError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
self.validators.update().await?;
|
||||
|
||||
// If any of our peers is lacking, try to connect to more
|
||||
let mut dialed = false;
|
||||
let peer_counts = self
|
||||
.peers
|
||||
.peers
|
||||
.read()
|
||||
.await
|
||||
.iter()
|
||||
.map(|(network, peers)| (*network, peers.len()))
|
||||
.collect::<Vec<_>>();
|
||||
for (network, peer_count) in peer_counts {
|
||||
/*
|
||||
If we don't have the target amount of peers, and we don't have all the validators in the
|
||||
set but one, attempt to connect to more validators within this set.
|
||||
|
||||
The latter clause is so if there's a set with only 3 validators, we don't infinitely try
|
||||
to connect to the target amount of peers for this network as we never will. Instead, we
|
||||
only try to connect to most of the validators actually present.
|
||||
*/
|
||||
if (peer_count < TARGET_PEERS_PER_NETWORK) &&
|
||||
(peer_count <
|
||||
self
|
||||
.validators
|
||||
.by_network()
|
||||
.get(&network)
|
||||
.map(HashSet::len)
|
||||
.unwrap_or(0)
|
||||
.saturating_sub(1))
|
||||
{
|
||||
let mut potential_peers = self.serai.p2p_validators(network).await?;
|
||||
for _ in 0 .. (TARGET_PEERS_PER_NETWORK - peer_count) {
|
||||
if potential_peers.is_empty() {
|
||||
break;
|
||||
}
|
||||
let index_to_dial =
|
||||
usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap())
|
||||
.unwrap();
|
||||
let randomly_selected_peer = potential_peers.swap_remove(index_to_dial);
|
||||
|
||||
log::info!("found peer from substrate: {randomly_selected_peer}");
|
||||
|
||||
// Map the peer from a Substrate P2P network peer to a Coordinator P2P network peer
|
||||
let mapped_peer = randomly_selected_peer
|
||||
.into_iter()
|
||||
.filter_map(|protocol| match protocol {
|
||||
// Drop PeerIds from the Substrate P2p network
|
||||
Protocol::P2p(_) => None,
|
||||
// Use our own TCP port
|
||||
Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),
|
||||
// Pass-through any other specifications (IPv4, IPv6, etc)
|
||||
other => Some(other),
|
||||
})
|
||||
.collect::<Multiaddr>();
|
||||
|
||||
log::debug!("mapped found peer: {mapped_peer}");
|
||||
|
||||
self
|
||||
.to_dial
|
||||
.send(DialOpts::unknown_peer_id().address(mapped_peer).build())
|
||||
.expect("dial receiver closed?");
|
||||
dialed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(dialed)
|
||||
}
|
||||
}
|
||||
}
|
||||
75
coordinator/p2p/libp2p/src/gossip.rs
Normal file
75
coordinator/p2p/libp2p/src/gossip.rs
Normal file
@@ -0,0 +1,75 @@
|
||||
use core::time::Duration;
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use libp2p::gossipsub::{
|
||||
IdentTopic, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder, IdentityTransform,
|
||||
AllowAllSubscriptionFilter, Behaviour,
|
||||
};
|
||||
pub use libp2p::gossipsub::Event;
|
||||
|
||||
use serai_cosign::SignedCosign;
|
||||
|
||||
// Block size limit + 16 KB of space for signatures/metadata
|
||||
pub(crate) const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary_sdk::BLOCK_SIZE_LIMIT + 16384;
|
||||
|
||||
const LIBP2P_PROTOCOL: &str = "/serai/coordinator/gossip/1.0.0";
|
||||
const BASE_TOPIC: &str = "/";
|
||||
|
||||
fn topic_for_tributary(tributary: [u8; 32]) -> IdentTopic {
|
||||
IdentTopic::new(format!("/tributary/{}", hex::encode(tributary)))
|
||||
}
|
||||
|
||||
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) enum Message {
|
||||
Tributary { tributary: [u8; 32], message: Vec<u8> },
|
||||
Cosign(SignedCosign),
|
||||
}
|
||||
|
||||
impl Message {
|
||||
pub(crate) fn topic(&self) -> IdentTopic {
|
||||
match self {
|
||||
Message::Tributary { tributary, .. } => topic_for_tributary(*tributary),
|
||||
Message::Cosign(_) => IdentTopic::new(BASE_TOPIC),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type Behavior = Behaviour<IdentityTransform, AllowAllSubscriptionFilter>;
|
||||
|
||||
pub(crate) fn new_behavior() -> Behavior {
|
||||
// The latency used by the Tendermint protocol, used here as the gossip epoch duration
|
||||
// libp2p-rs defaults to 1 second, whereas ours will be ~2
|
||||
let heartbeat_interval = tributary_sdk::tendermint::LATENCY_TIME;
|
||||
// The amount of heartbeats which will occur within a single Tributary block
|
||||
let heartbeats_per_block =
|
||||
tributary_sdk::tendermint::TARGET_BLOCK_TIME.div_ceil(heartbeat_interval);
|
||||
// libp2p-rs defaults to 5, whereas ours will be ~8
|
||||
let heartbeats_to_keep = 2 * heartbeats_per_block;
|
||||
// libp2p-rs defaults to 3 whereas ours will be ~4
|
||||
let heartbeats_to_gossip = heartbeats_per_block;
|
||||
|
||||
let config = ConfigBuilder::default()
|
||||
.protocol_id_prefix(LIBP2P_PROTOCOL)
|
||||
.history_length(usize::try_from(heartbeats_to_keep).unwrap())
|
||||
.history_gossip(usize::try_from(heartbeats_to_gossip).unwrap())
|
||||
.heartbeat_interval(Duration::from_millis(heartbeat_interval.into()))
|
||||
.max_transmit_size(MAX_LIBP2P_GOSSIP_MESSAGE_SIZE)
|
||||
.duplicate_cache_time(Duration::from_millis((heartbeats_to_keep * heartbeat_interval).into()))
|
||||
.validation_mode(ValidationMode::Anonymous)
|
||||
// Uses a content based message ID to avoid duplicates as much as possible
|
||||
.message_id_fn(|msg| {
|
||||
MessageId::new(&Blake2s256::digest([msg.topic.as_str().as_bytes(), &msg.data].concat()))
|
||||
})
|
||||
.build();
|
||||
|
||||
let mut gossip = Behavior::new(MessageAuthenticity::Anonymous, config.unwrap()).unwrap();
|
||||
|
||||
// Subscribe to the base topic
|
||||
let topic = IdentTopic::new(BASE_TOPIC);
|
||||
let _ = gossip.subscribe(&topic);
|
||||
|
||||
gossip
|
||||
}
|
||||
433
coordinator/p2p/libp2p/src/lib.rs
Normal file
433
coordinator/p2p/libp2p/src/lib.rs
Normal file
@@ -0,0 +1,433 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::{future::Future, time::Duration};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use schnorrkel::Keypair;
|
||||
|
||||
use serai_client::{
|
||||
primitives::{NetworkId, PublicKey},
|
||||
validator_sets::primitives::ValidatorSet,
|
||||
Serai,
|
||||
};
|
||||
|
||||
use tokio::sync::{mpsc, oneshot, Mutex, RwLock};
|
||||
|
||||
use serai_task::{Task, ContinuallyRan};
|
||||
|
||||
use serai_cosign::SignedCosign;
|
||||
|
||||
use libp2p::{
|
||||
multihash::Multihash,
|
||||
identity::{self, PeerId},
|
||||
tcp::Config as TcpConfig,
|
||||
yamux, allow_block_list,
|
||||
connection_limits::{self, ConnectionLimits},
|
||||
swarm::NetworkBehaviour,
|
||||
SwarmBuilder,
|
||||
};
|
||||
|
||||
use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit};
|
||||
|
||||
/// A struct to sync the validators from the Serai node in order to keep track of them.
|
||||
mod validators;
|
||||
use validators::UpdateValidatorsTask;
|
||||
|
||||
/// The authentication protocol upgrade to limit the P2P network to active validators.
|
||||
mod authenticate;
|
||||
use authenticate::OnlyValidators;
|
||||
|
||||
/// The ping behavior, used to ensure connection latency is below the limit
|
||||
mod ping;
|
||||
|
||||
/// The request-response messages and behavior
|
||||
mod reqres;
|
||||
use reqres::{RequestId, Request, Response};
|
||||
|
||||
/// The gossip messages and behavior
|
||||
mod gossip;
|
||||
use gossip::Message;
|
||||
|
||||
/// The swarm task, running it and dispatching to/from it
|
||||
mod swarm;
|
||||
use swarm::SwarmTask;
|
||||
|
||||
/// The dial task, to find new peers to connect to
|
||||
mod dial;
|
||||
use dial::DialTask;
|
||||
|
||||
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
||||
|
||||
// usize::max, manually implemented, as max isn't a const fn
|
||||
const MAX_LIBP2P_MESSAGE_SIZE: usize =
|
||||
if gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE > reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE {
|
||||
gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE
|
||||
} else {
|
||||
reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE
|
||||
};
|
||||
|
||||
fn peer_id_from_public(public: PublicKey) -> PeerId {
|
||||
// 0 represents the identity Multihash, that no hash was performed
|
||||
// It's an internal constant so we can't refer to the constant inside libp2p
|
||||
PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap()
|
||||
}
|
||||
|
||||
/// The representation of a peer.
|
||||
pub struct Peer<'a> {
|
||||
outbound_requests: &'a mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||
id: PeerId,
|
||||
}
|
||||
impl serai_coordinator_p2p::Peer<'_> for Peer<'_> {
|
||||
fn send_heartbeat(
|
||||
&self,
|
||||
heartbeat: Heartbeat,
|
||||
) -> impl Send + Future<Output = Option<Vec<TributaryBlockWithCommit>>> {
|
||||
async move {
|
||||
const HEARTBEAT_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
let request = Request::Heartbeat(heartbeat);
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
self
|
||||
.outbound_requests
|
||||
.send((self.id, request, sender))
|
||||
.expect("outbound requests recv channel was dropped?");
|
||||
if let Ok(Ok(Response::Blocks(blocks))) =
|
||||
tokio::time::timeout(HEARTBEAT_TIMEOUT, receiver).await
|
||||
{
|
||||
Some(blocks)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Peers {
|
||||
peers: Arc<RwLock<HashMap<NetworkId, HashSet<PeerId>>>>,
|
||||
}
|
||||
|
||||
// Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai
|
||||
// network for peers, we could use it solely for bootstrapping/as a fallback.
|
||||
#[derive(NetworkBehaviour)]
|
||||
struct Behavior {
|
||||
// Used to only allow Serai validators as peers
|
||||
allow_list: allow_block_list::Behaviour<allow_block_list::AllowedPeers>,
|
||||
// Used to limit each peer to a single connection
|
||||
connection_limits: connection_limits::Behaviour,
|
||||
// Used to ensure connection latency is within tolerances
|
||||
ping: ping::Behavior,
|
||||
// Used to request data from specific peers
|
||||
reqres: reqres::Behavior,
|
||||
// Used to broadcast messages to all other peers subscribed to a topic
|
||||
gossip: gossip::Behavior,
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
struct Libp2pInner {
|
||||
peers: Peers,
|
||||
|
||||
gossip: mpsc::UnboundedSender<Message>,
|
||||
outbound_requests: mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||
|
||||
tributary_gossip: Mutex<mpsc::UnboundedReceiver<([u8; 32], Vec<u8>)>>,
|
||||
|
||||
signed_cosigns: Mutex<mpsc::UnboundedReceiver<SignedCosign>>,
|
||||
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
|
||||
|
||||
heartbeat_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, ValidatorSet, [u8; 32])>>,
|
||||
notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, [u8; 32])>>,
|
||||
inbound_request_responses: mpsc::UnboundedSender<(RequestId, Response)>,
|
||||
}
|
||||
|
||||
/// The libp2p-backed P2P implementation.
|
||||
///
|
||||
/// The P2p trait implementation does not support backpressure and is expected to be fully
|
||||
/// utilized. Failure to poll the entire API will cause unbounded memory growth.
|
||||
#[derive(Clone)]
|
||||
pub struct Libp2p(Arc<Libp2pInner>);
|
||||
|
||||
impl Libp2p {
|
||||
/// Create a new libp2p-backed P2P instance.
|
||||
///
|
||||
/// This will spawn all of the internal tasks necessary for functioning.
|
||||
pub fn new(serai_key: &Zeroizing<Keypair>, serai: Arc<Serai>) -> Libp2p {
|
||||
// Define the object we track peers with
|
||||
let peers = Peers { peers: Arc::new(RwLock::new(HashMap::new())) };
|
||||
|
||||
// Define the dial task
|
||||
let (dial_task_def, dial_task) = Task::new();
|
||||
let (to_dial_send, to_dial_recv) = mpsc::unbounded_channel();
|
||||
tokio::spawn(
|
||||
DialTask::new(serai.clone(), peers.clone(), to_dial_send)
|
||||
.continually_run(dial_task_def, vec![]),
|
||||
);
|
||||
|
||||
let swarm = {
|
||||
let new_only_validators = |noise_keypair: &identity::Keypair| -> Result<_, ()> {
|
||||
Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() })
|
||||
};
|
||||
|
||||
let new_yamux = || {
|
||||
let mut config = yamux::Config::default();
|
||||
// 1 MiB default + max message size
|
||||
config.set_max_buffer_size((1024 * 1024) + MAX_LIBP2P_MESSAGE_SIZE);
|
||||
// 256 KiB default + max message size
|
||||
config
|
||||
.set_receive_window_size(((256 * 1024) + MAX_LIBP2P_MESSAGE_SIZE).try_into().unwrap());
|
||||
config
|
||||
};
|
||||
|
||||
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
|
||||
.with_tokio()
|
||||
.with_tcp(TcpConfig::default().nodelay(true), new_only_validators, new_yamux)
|
||||
.unwrap()
|
||||
.with_behaviour(|_| Behavior {
|
||||
allow_list: allow_block_list::Behaviour::default(),
|
||||
// Limit each per to a single connection
|
||||
connection_limits: connection_limits::Behaviour::new(
|
||||
ConnectionLimits::default().with_max_established_per_peer(Some(1)),
|
||||
),
|
||||
ping: ping::new_behavior(),
|
||||
reqres: reqres::new_behavior(),
|
||||
gossip: gossip::new_behavior(),
|
||||
})
|
||||
.unwrap()
|
||||
.with_swarm_config(|config| {
|
||||
config
|
||||
.with_idle_connection_timeout(ping::INTERVAL + ping::TIMEOUT + Duration::from_secs(5))
|
||||
})
|
||||
.build();
|
||||
swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap();
|
||||
swarm.listen_on(format!("/ip6/::/tcp/{PORT}").parse().unwrap()).unwrap();
|
||||
swarm
|
||||
};
|
||||
|
||||
let (swarm_validators, validator_changes) = UpdateValidatorsTask::spawn(serai);
|
||||
|
||||
let (gossip_send, gossip_recv) = mpsc::unbounded_channel();
|
||||
let (signed_cosigns_send, signed_cosigns_recv) = mpsc::unbounded_channel();
|
||||
let (tributary_gossip_send, tributary_gossip_recv) = mpsc::unbounded_channel();
|
||||
|
||||
let (outbound_requests_send, outbound_requests_recv) = mpsc::unbounded_channel();
|
||||
|
||||
let (heartbeat_requests_send, heartbeat_requests_recv) = mpsc::unbounded_channel();
|
||||
let (notable_cosign_requests_send, notable_cosign_requests_recv) = mpsc::unbounded_channel();
|
||||
let (inbound_request_responses_send, inbound_request_responses_recv) =
|
||||
mpsc::unbounded_channel();
|
||||
|
||||
// Create the swarm task
|
||||
SwarmTask::spawn(
|
||||
dial_task,
|
||||
to_dial_recv,
|
||||
swarm_validators,
|
||||
validator_changes,
|
||||
peers.clone(),
|
||||
swarm,
|
||||
gossip_recv,
|
||||
signed_cosigns_send.clone(),
|
||||
tributary_gossip_send,
|
||||
outbound_requests_recv,
|
||||
heartbeat_requests_send,
|
||||
notable_cosign_requests_send,
|
||||
inbound_request_responses_recv,
|
||||
);
|
||||
|
||||
Libp2p(Arc::new(Libp2pInner {
|
||||
peers,
|
||||
|
||||
gossip: gossip_send,
|
||||
outbound_requests: outbound_requests_send,
|
||||
|
||||
tributary_gossip: Mutex::new(tributary_gossip_recv),
|
||||
|
||||
signed_cosigns: Mutex::new(signed_cosigns_recv),
|
||||
signed_cosigns_send,
|
||||
|
||||
heartbeat_requests: Mutex::new(heartbeat_requests_recv),
|
||||
notable_cosign_requests: Mutex::new(notable_cosign_requests_recv),
|
||||
inbound_request_responses: inbound_request_responses_send,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl tributary_sdk::P2p for Libp2p {
|
||||
fn broadcast(&self, tributary: [u8; 32], message: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||
async move {
|
||||
self
|
||||
.0
|
||||
.gossip
|
||||
.send(Message::Tributary { tributary, message })
|
||||
.expect("gossip recv channel was dropped?");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl serai_cosign::RequestNotableCosigns for Libp2p {
|
||||
type Error = ();
|
||||
|
||||
fn request_notable_cosigns(
|
||||
&self,
|
||||
global_session: [u8; 32],
|
||||
) -> impl Send + Future<Output = Result<(), Self::Error>> {
|
||||
async move {
|
||||
const AMOUNT_OF_PEERS_TO_REQUEST_FROM: usize = 3;
|
||||
const NOTABLE_COSIGNS_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
let request = Request::NotableCosigns { global_session };
|
||||
|
||||
let peers = self.0.peers.peers.read().await.clone();
|
||||
// HashSet of all peers
|
||||
let peers = peers.into_values().flat_map(<_>::into_iter).collect::<HashSet<_>>();
|
||||
// Vec of all peers
|
||||
let mut peers = peers.into_iter().collect::<Vec<_>>();
|
||||
|
||||
let mut channels = Vec::with_capacity(AMOUNT_OF_PEERS_TO_REQUEST_FROM);
|
||||
for _ in 0 .. AMOUNT_OF_PEERS_TO_REQUEST_FROM {
|
||||
if peers.is_empty() {
|
||||
break;
|
||||
}
|
||||
let i = usize::try_from(OsRng.next_u64() % u64::try_from(peers.len()).unwrap()).unwrap();
|
||||
let peer = peers.swap_remove(i);
|
||||
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
self
|
||||
.0
|
||||
.outbound_requests
|
||||
.send((peer, request, sender))
|
||||
.expect("outbound requests recv channel was dropped?");
|
||||
channels.push(receiver);
|
||||
}
|
||||
|
||||
// We could reduce our latency by using FuturesUnordered here but the latency isn't a concern
|
||||
for channel in channels {
|
||||
if let Ok(Ok(Response::NotableCosigns(cosigns))) =
|
||||
tokio::time::timeout(NOTABLE_COSIGNS_TIMEOUT, channel).await
|
||||
{
|
||||
for cosign in cosigns {
|
||||
self
|
||||
.0
|
||||
.signed_cosigns_send
|
||||
.send(cosign)
|
||||
.expect("signed_cosigns recv in this object was dropped?");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl serai_coordinator_p2p::P2p for Libp2p {
|
||||
type Peer<'a> = Peer<'a>;
|
||||
|
||||
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
|
||||
async move {
|
||||
let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else {
|
||||
return vec![];
|
||||
};
|
||||
let mut res = vec![];
|
||||
for id in peer_ids {
|
||||
res.push(Peer { outbound_requests: &self.0.outbound_requests, id });
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()> {
|
||||
async move {
|
||||
self.0.gossip.send(Message::Cosign(cosign)).expect("gossip recv channel was dropped?");
|
||||
}
|
||||
}
|
||||
|
||||
fn heartbeat(
|
||||
&self,
|
||||
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)> {
|
||||
async move {
|
||||
let (request_id, set, latest_block_hash) = self
|
||||
.0
|
||||
.heartbeat_requests
|
||||
.lock()
|
||||
.await
|
||||
.recv()
|
||||
.await
|
||||
.expect("heartbeat_requests_send was dropped?");
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
tokio::spawn({
|
||||
let respond = self.0.inbound_request_responses.clone();
|
||||
async move {
|
||||
// The swarm task expects us to respond to every request. If the caller drops this
|
||||
// channel, we'll receive `Err` and respond with `vec![]`, safely satisfying that bound
|
||||
// without requiring the caller send a value down this channel
|
||||
let response = if let Ok(blocks) = receiver.await {
|
||||
Response::Blocks(blocks)
|
||||
} else {
|
||||
Response::Blocks(vec![])
|
||||
};
|
||||
respond
|
||||
.send((request_id, response))
|
||||
.expect("inbound_request_responses_recv was dropped?");
|
||||
}
|
||||
});
|
||||
(Heartbeat { set, latest_block_hash }, sender)
|
||||
}
|
||||
}
|
||||
|
||||
fn notable_cosigns_request(
|
||||
&self,
|
||||
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)> {
|
||||
async move {
|
||||
let (request_id, global_session) = self
|
||||
.0
|
||||
.notable_cosign_requests
|
||||
.lock()
|
||||
.await
|
||||
.recv()
|
||||
.await
|
||||
.expect("notable_cosign_requests_send was dropped?");
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
tokio::spawn({
|
||||
let respond = self.0.inbound_request_responses.clone();
|
||||
async move {
|
||||
let response = if let Ok(notable_cosigns) = receiver.await {
|
||||
Response::NotableCosigns(notable_cosigns)
|
||||
} else {
|
||||
Response::NotableCosigns(vec![])
|
||||
};
|
||||
respond
|
||||
.send((request_id, response))
|
||||
.expect("inbound_request_responses_recv was dropped?");
|
||||
}
|
||||
});
|
||||
(global_session, sender)
|
||||
}
|
||||
}
|
||||
|
||||
fn tributary_message(&self) -> impl Send + Future<Output = ([u8; 32], Vec<u8>)> {
|
||||
async move {
|
||||
self.0.tributary_gossip.lock().await.recv().await.expect("tributary_gossip send was dropped?")
|
||||
}
|
||||
}
|
||||
|
||||
fn cosign(&self) -> impl Send + Future<Output = SignedCosign> {
|
||||
async move {
|
||||
self
|
||||
.0
|
||||
.signed_cosigns
|
||||
.lock()
|
||||
.await
|
||||
.recv()
|
||||
.await
|
||||
.expect("signed_cosigns couldn't recv despite send in same object?")
|
||||
}
|
||||
}
|
||||
}
|
||||
17
coordinator/p2p/libp2p/src/ping.rs
Normal file
17
coordinator/p2p/libp2p/src/ping.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
use core::time::Duration;
|
||||
|
||||
use tributary_sdk::tendermint::LATENCY_TIME;
|
||||
|
||||
use libp2p::ping::{self, Config, Behaviour};
|
||||
pub use ping::Event;
|
||||
|
||||
pub(crate) const INTERVAL: Duration = Duration::from_secs(30);
|
||||
// LATENCY_TIME represents the maximum latency for message delivery. Sending the ping, and
|
||||
// receiving the pong, each have to occur within this time bound to validate the connection. We
|
||||
// enforce that, as best we can, by requiring the round-trip be within twice the allowed latency.
|
||||
pub(crate) const TIMEOUT: Duration = Duration::from_millis((2 * LATENCY_TIME) as u64);
|
||||
|
||||
pub(crate) type Behavior = Behaviour;
|
||||
pub(crate) fn new_behavior() -> Behavior {
|
||||
Behavior::new(Config::default().with_interval(INTERVAL).with_timeout(TIMEOUT))
|
||||
}
|
||||
135
coordinator/p2p/libp2p/src/reqres.rs
Normal file
135
coordinator/p2p/libp2p/src/reqres.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
use core::{fmt, time::Duration};
|
||||
use std::io;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
|
||||
use libp2p::request_response::{
|
||||
self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport,
|
||||
};
|
||||
pub use request_response::{RequestId, Message};
|
||||
|
||||
use serai_cosign::SignedCosign;
|
||||
|
||||
use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit};
|
||||
|
||||
/// The maximum message size for the request-response protocol
|
||||
// This is derived from the heartbeat message size as it's our largest message
|
||||
pub(crate) const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize =
|
||||
1024 + serai_coordinator_p2p::heartbeat::BATCH_SIZE_LIMIT;
|
||||
|
||||
const PROTOCOL: &str = "/serai/coordinator/reqres/1.0.0";
|
||||
|
||||
/// Requests which can be made via the request-response protocol.
|
||||
#[derive(Clone, Copy, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) enum Request {
|
||||
/// A heartbeat informing our peers of our latest block, for the specified blockchain, on regular
|
||||
/// intervals.
|
||||
///
|
||||
/// If our peers have more blocks than us, they're expected to respond with those blocks.
|
||||
Heartbeat(Heartbeat),
|
||||
/// A request for the notable cosigns for a global session.
|
||||
NotableCosigns { global_session: [u8; 32] },
|
||||
}
|
||||
|
||||
/// Responses which can be received via the request-response protocol.
|
||||
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) enum Response {
|
||||
None,
|
||||
Blocks(Vec<TributaryBlockWithCommit>),
|
||||
NotableCosigns(Vec<SignedCosign>),
|
||||
}
|
||||
impl fmt::Debug for Response {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Response::None => fmt.debug_struct("Response::None").finish(),
|
||||
Response::Blocks(_) => fmt.debug_struct("Response::Block").finish_non_exhaustive(),
|
||||
Response::NotableCosigns(_) => {
|
||||
fmt.debug_struct("Response::NotableCosigns").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The codec used for the request-response protocol.
|
||||
///
|
||||
/// We don't use CBOR or JSON, but use borsh to create `Vec<u8>`s we then length-prefix. While
|
||||
/// ideally, we'd use borsh directly with the `io` traits defined here, they're async and there
|
||||
/// isn't an amenable API within borsh for incremental deserialization.
|
||||
#[derive(Default, Clone, Copy, Debug)]
|
||||
pub(crate) struct Codec;
|
||||
impl Codec {
|
||||
async fn read<M: BorshDeserialize>(io: &mut (impl Unpin + AsyncRead)) -> io::Result<M> {
|
||||
let mut len = [0; 4];
|
||||
io.read_exact(&mut len).await?;
|
||||
let len = usize::try_from(u32::from_le_bytes(len)).expect("not at least a 32-bit platform?");
|
||||
if len > MAX_LIBP2P_REQRES_MESSAGE_SIZE {
|
||||
Err(io::Error::other("request length exceeded MAX_LIBP2P_REQRES_MESSAGE_SIZE"))?;
|
||||
}
|
||||
// This may be a non-trivial allocation easily causable
|
||||
// While we could chunk the read, meaning we only perform the allocation as bandwidth is used,
|
||||
// the max message size should be sufficiently sane
|
||||
let mut buf = vec![0; len];
|
||||
io.read_exact(&mut buf).await?;
|
||||
let mut buf = buf.as_slice();
|
||||
let res = M::deserialize(&mut buf)?;
|
||||
if !buf.is_empty() {
|
||||
Err(io::Error::other("p2p message had extra data appended to it"))?;
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
async fn write(io: &mut (impl Unpin + AsyncWrite), msg: &impl BorshSerialize) -> io::Result<()> {
|
||||
let msg = borsh::to_vec(msg).unwrap();
|
||||
io.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await?;
|
||||
io.write_all(&msg).await
|
||||
}
|
||||
}
|
||||
#[async_trait]
|
||||
impl CodecTrait for Codec {
|
||||
type Protocol = &'static str;
|
||||
type Request = Request;
|
||||
type Response = Response;
|
||||
|
||||
async fn read_request<R: Send + Unpin + AsyncRead>(
|
||||
&mut self,
|
||||
_: &Self::Protocol,
|
||||
io: &mut R,
|
||||
) -> io::Result<Request> {
|
||||
Self::read(io).await
|
||||
}
|
||||
async fn read_response<R: Send + Unpin + AsyncRead>(
|
||||
&mut self,
|
||||
_: &Self::Protocol,
|
||||
io: &mut R,
|
||||
) -> io::Result<Response> {
|
||||
Self::read(io).await
|
||||
}
|
||||
async fn write_request<W: Send + Unpin + AsyncWrite>(
|
||||
&mut self,
|
||||
_: &Self::Protocol,
|
||||
io: &mut W,
|
||||
req: Request,
|
||||
) -> io::Result<()> {
|
||||
Self::write(io, &req).await
|
||||
}
|
||||
async fn write_response<W: Send + Unpin + AsyncWrite>(
|
||||
&mut self,
|
||||
_: &Self::Protocol,
|
||||
io: &mut W,
|
||||
res: Response,
|
||||
) -> io::Result<()> {
|
||||
Self::write(io, &res).await
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type Event = GenericEvent<Request, Response>;
|
||||
|
||||
pub(crate) type Behavior = Behaviour<Codec>;
|
||||
pub(crate) fn new_behavior() -> Behavior {
|
||||
let mut config = Config::default();
|
||||
config.set_request_timeout(Duration::from_secs(5));
|
||||
Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config)
|
||||
}
|
||||
356
coordinator/p2p/libp2p/src/swarm.rs
Normal file
356
coordinator/p2p/libp2p/src/swarm.rs
Normal file
@@ -0,0 +1,356 @@
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use borsh::BorshDeserialize;
|
||||
|
||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||
|
||||
use tokio::sync::{mpsc, oneshot, RwLock};
|
||||
|
||||
use serai_task::TaskHandle;
|
||||
|
||||
use serai_cosign::SignedCosign;
|
||||
|
||||
use futures_util::StreamExt;
|
||||
use libp2p::{
|
||||
identity::PeerId,
|
||||
request_response::{RequestId, ResponseChannel},
|
||||
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
|
||||
};
|
||||
|
||||
use serai_coordinator_p2p::Heartbeat;
|
||||
|
||||
use crate::{
|
||||
Peers, BehaviorEvent, Behavior,
|
||||
validators::{self, Validators},
|
||||
ping,
|
||||
reqres::{self, Request, Response},
|
||||
gossip,
|
||||
};
|
||||
|
||||
const TIME_BETWEEN_REBUILD_PEERS: Duration = Duration::from_secs(10 * 60);
|
||||
|
||||
/*
|
||||
`SwarmTask` handles everything we need the `Swarm` object for. The goal is to minimize the
|
||||
contention on this task. Unfortunately, the `Swarm` object itself is needed for a variety of
|
||||
purposes making this a rather large task.
|
||||
|
||||
Responsibilities include:
|
||||
- Actually dialing new peers (the selection process occurs in another task)
|
||||
- Maintaining the peers structure (as we need the Swarm object to see who our peers are)
|
||||
- Gossiping messages
|
||||
- Dispatching gossiped messages
|
||||
- Sending requests
|
||||
- Dispatching responses to requests
|
||||
- Dispatching received requests
|
||||
- Sending responses
|
||||
*/
|
||||
pub(crate) struct SwarmTask {
|
||||
dial_task: TaskHandle,
|
||||
to_dial: mpsc::UnboundedReceiver<DialOpts>,
|
||||
last_dial_task_run: Instant,
|
||||
|
||||
validators: Arc<RwLock<Validators>>,
|
||||
validator_changes: mpsc::UnboundedReceiver<validators::Changes>,
|
||||
peers: Peers,
|
||||
rebuild_peers_at: Instant,
|
||||
|
||||
swarm: Swarm<Behavior>,
|
||||
|
||||
gossip: mpsc::UnboundedReceiver<gossip::Message>,
|
||||
signed_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
|
||||
|
||||
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||
outbound_request_responses: HashMap<RequestId, oneshot::Sender<Response>>,
|
||||
|
||||
inbound_request_response_channels: HashMap<RequestId, ResponseChannel<Response>>,
|
||||
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
||||
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
||||
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
||||
}
|
||||
|
||||
impl SwarmTask {
|
||||
fn handle_gossip(&mut self, event: gossip::Event) {
|
||||
match event {
|
||||
gossip::Event::Message { message, .. } => {
|
||||
let Ok(message) = gossip::Message::deserialize(&mut message.data.as_slice()) else {
|
||||
// TODO: Penalize the PeerId which created this message, which requires authenticating
|
||||
// each message OR moving to explicit acknowledgement before re-gossiping
|
||||
return;
|
||||
};
|
||||
match message {
|
||||
gossip::Message::Tributary { tributary, message } => {
|
||||
let _: Result<_, _> = self.tributary_gossip.send((tributary, message));
|
||||
}
|
||||
gossip::Message::Cosign(signed_cosign) => {
|
||||
let _: Result<_, _> = self.signed_cosigns.send(signed_cosign);
|
||||
}
|
||||
}
|
||||
}
|
||||
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
|
||||
gossip::Event::GossipsubNotSupported { peer_id } => {
|
||||
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_reqres(&mut self, event: reqres::Event) {
|
||||
match event {
|
||||
reqres::Event::Message { message, .. } => match message {
|
||||
reqres::Message::Request { request_id, request, channel } => match request {
|
||||
reqres::Request::Heartbeat(Heartbeat { set, latest_block_hash }) => {
|
||||
self.inbound_request_response_channels.insert(request_id, channel);
|
||||
let _: Result<_, _> =
|
||||
self.heartbeat_requests.send((request_id, set, latest_block_hash));
|
||||
}
|
||||
reqres::Request::NotableCosigns { global_session } => {
|
||||
self.inbound_request_response_channels.insert(request_id, channel);
|
||||
let _: Result<_, _> = self.notable_cosign_requests.send((request_id, global_session));
|
||||
}
|
||||
},
|
||||
reqres::Message::Response { request_id, response } => {
|
||||
if let Some(channel) = self.outbound_request_responses.remove(&request_id) {
|
||||
let _: Result<_, _> = channel.send(response);
|
||||
}
|
||||
}
|
||||
},
|
||||
reqres::Event::OutboundFailure { request_id, .. } => {
|
||||
// Send None as the response for the request
|
||||
if let Some(channel) = self.outbound_request_responses.remove(&request_id) {
|
||||
let _: Result<_, _> = channel.send(Response::None);
|
||||
}
|
||||
}
|
||||
reqres::Event::InboundFailure { .. } | reqres::Event::ResponseSent { .. } => {}
|
||||
}
|
||||
}
|
||||
|
||||
async fn run(mut self) {
|
||||
loop {
|
||||
let time_till_rebuild_peers = self.rebuild_peers_at.saturating_duration_since(Instant::now());
|
||||
|
||||
tokio::select! {
|
||||
// If the validators have changed, update the allow list
|
||||
validator_changes = self.validator_changes.recv() => {
|
||||
let validator_changes = validator_changes.expect("validators update task shut down?");
|
||||
let behavior = &mut self.swarm.behaviour_mut().allow_list;
|
||||
for removed in validator_changes.removed {
|
||||
behavior.disallow_peer(removed);
|
||||
}
|
||||
for added in validator_changes.added {
|
||||
behavior.allow_peer(added);
|
||||
}
|
||||
}
|
||||
|
||||
// Dial peers we're instructed to
|
||||
dial_opts = self.to_dial.recv() => {
|
||||
let dial_opts = dial_opts.expect("DialTask was closed?");
|
||||
let _: Result<_, _> = self.swarm.dial(dial_opts);
|
||||
}
|
||||
|
||||
/*
|
||||
Rebuild the peers every 10 minutes.
|
||||
|
||||
This protects against any race conditions/edge cases we have in our logic to track peers,
|
||||
along with unrepresented behavior such as when a peer changes the networks they're active
|
||||
in. This lets the peer tracking logic simply be 'good enough' to not become horribly
|
||||
corrupt over the span of `TIME_BETWEEN_REBUILD_PEERS`.
|
||||
|
||||
We also use this to disconnect all peers who are no longer active in any network.
|
||||
*/
|
||||
() = tokio::time::sleep(time_till_rebuild_peers) => {
|
||||
let validators_by_network = self.validators.read().await.by_network().clone();
|
||||
let connected_peers = self.swarm.connected_peers().copied().collect::<HashSet<_>>();
|
||||
|
||||
// Build the new peers object
|
||||
let mut peers = HashMap::new();
|
||||
for (network, validators) in validators_by_network {
|
||||
peers.insert(network, validators.intersection(&connected_peers).copied().collect());
|
||||
}
|
||||
|
||||
// Write the new peers object
|
||||
*self.peers.peers.write().await = peers;
|
||||
self.rebuild_peers_at = Instant::now() + TIME_BETWEEN_REBUILD_PEERS;
|
||||
}
|
||||
|
||||
// Handle swarm events
|
||||
event = self.swarm.next() => {
|
||||
// `Swarm::next` will never return `Poll::Ready(None)`
|
||||
// https://docs.rs/
|
||||
// libp2p/0.54.1/libp2p/struct.Swarm.html#impl-Stream-for-Swarm%3CTBehaviour%3E
|
||||
let event = event.unwrap();
|
||||
match event {
|
||||
// New connection, so update peers
|
||||
SwarmEvent::ConnectionEstablished { peer_id, .. } => {
|
||||
let Some(networks) =
|
||||
self.validators.read().await.networks(&peer_id).cloned() else { continue };
|
||||
let mut peers = self.peers.peers.write().await;
|
||||
for network in networks {
|
||||
peers.entry(network).or_insert_with(HashSet::new).insert(peer_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Connection closed, so update peers
|
||||
SwarmEvent::ConnectionClosed { peer_id, .. } => {
|
||||
let Some(networks) =
|
||||
self.validators.read().await.networks(&peer_id).cloned() else { continue };
|
||||
let mut peers = self.peers.peers.write().await;
|
||||
for network in networks {
|
||||
peers.entry(network).or_insert_with(HashSet::new).remove(&peer_id);
|
||||
}
|
||||
|
||||
/*
|
||||
We want to re-run the dial task, since we lost a peer, in case we should find new
|
||||
peers. This opens a DoS where a validator repeatedly opens/closes connections to
|
||||
force iterations of the dial task. We prevent this by setting a minimum distance
|
||||
since the last explicit iteration.
|
||||
|
||||
This is suboptimal. If we have several disconnects in immediate proximity, we'll
|
||||
trigger the dial task upon the first (where we may still have enough peers we
|
||||
shouldn't dial more) but not the last (where we may have so few peers left we
|
||||
should dial more). This is accepted as the dial task will eventually run on its
|
||||
natural timer.
|
||||
*/
|
||||
const MINIMUM_TIME_SINCE_LAST_EXPLICIT_DIAL: Duration = Duration::from_secs(60);
|
||||
let now = Instant::now();
|
||||
if (self.last_dial_task_run + MINIMUM_TIME_SINCE_LAST_EXPLICIT_DIAL) < now {
|
||||
self.dial_task.run_now();
|
||||
self.last_dial_task_run = now;
|
||||
}
|
||||
}
|
||||
|
||||
SwarmEvent::Behaviour(
|
||||
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event)
|
||||
) => {
|
||||
// This *is* an exhaustive match as these events are empty enums
|
||||
match event {}
|
||||
}
|
||||
SwarmEvent::Behaviour(
|
||||
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, })
|
||||
) => {
|
||||
if result.is_err() {
|
||||
self.swarm.close_connection(connection);
|
||||
}
|
||||
}
|
||||
SwarmEvent::Behaviour(BehaviorEvent::Reqres(event)) => {
|
||||
self.handle_reqres(event)
|
||||
}
|
||||
SwarmEvent::Behaviour(BehaviorEvent::Gossip(event)) => {
|
||||
self.handle_gossip(event)
|
||||
}
|
||||
|
||||
// We don't handle any of these
|
||||
SwarmEvent::IncomingConnection { .. } |
|
||||
SwarmEvent::IncomingConnectionError { .. } |
|
||||
SwarmEvent::OutgoingConnectionError { .. } |
|
||||
SwarmEvent::NewListenAddr { .. } |
|
||||
SwarmEvent::ExpiredListenAddr { .. } |
|
||||
SwarmEvent::ListenerClosed { .. } |
|
||||
SwarmEvent::ListenerError { .. } |
|
||||
SwarmEvent::Dialing { .. } => {}
|
||||
}
|
||||
}
|
||||
|
||||
message = self.gossip.recv() => {
|
||||
let message = message.expect("channel for messages to gossip was closed?");
|
||||
let topic = message.topic();
|
||||
let message = borsh::to_vec(&message).unwrap();
|
||||
|
||||
/*
|
||||
If we're sending a message for this topic, it's because this topic is relevant to us.
|
||||
Subscribe to it.
|
||||
|
||||
We create topics roughly weekly, one per validator set/session. Once present in a
|
||||
topic, we're interested in all messages for it until the validator set/session retires.
|
||||
Then there should no longer be any messages for the topic as we should drop the
|
||||
Tributary which creates the messages.
|
||||
|
||||
We use this as an argument to not bother implement unsubscribing from topics. They're
|
||||
incredibly infrequently created and old topics shouldn't still have messages published
|
||||
to them. Having the coordinator reboot being our method of unsubscribing is fine.
|
||||
|
||||
Alternatively, we could route an API to determine when a topic is retired, or retire
|
||||
any topics we haven't sent messages on in the past hour.
|
||||
*/
|
||||
let behavior = self.swarm.behaviour_mut();
|
||||
let _: Result<_, _> = behavior.gossip.subscribe(&topic);
|
||||
/*
|
||||
This may be an error of `InsufficientPeers`. If so, we could ask DialTask to dial more
|
||||
peers for this network. We don't as we assume DialTask will detect the lack of peers
|
||||
for this network, and will already successfully handle this.
|
||||
*/
|
||||
let _: Result<_, _> = behavior.gossip.publish(topic.hash(), message);
|
||||
}
|
||||
|
||||
request = self.outbound_requests.recv() => {
|
||||
let (peer, request, response_channel) =
|
||||
request.expect("channel for requests was closed?");
|
||||
let request_id = self.swarm.behaviour_mut().reqres.send_request(&peer, request);
|
||||
self.outbound_request_responses.insert(request_id, response_channel);
|
||||
}
|
||||
|
||||
response = self.inbound_request_responses.recv() => {
|
||||
let (request_id, response) =
|
||||
response.expect("channel for inbound request responses was closed?");
|
||||
if let Some(channel) = self.inbound_request_response_channels.remove(&request_id) {
|
||||
let _: Result<_, _> =
|
||||
self.swarm.behaviour_mut().reqres.send_response(channel, response);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn spawn(
|
||||
dial_task: TaskHandle,
|
||||
to_dial: mpsc::UnboundedReceiver<DialOpts>,
|
||||
|
||||
validators: Arc<RwLock<Validators>>,
|
||||
validator_changes: mpsc::UnboundedReceiver<validators::Changes>,
|
||||
peers: Peers,
|
||||
|
||||
swarm: Swarm<Behavior>,
|
||||
|
||||
gossip: mpsc::UnboundedReceiver<gossip::Message>,
|
||||
signed_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
|
||||
|
||||
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||
|
||||
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
||||
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
||||
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
||||
) {
|
||||
tokio::spawn(
|
||||
SwarmTask {
|
||||
dial_task,
|
||||
to_dial,
|
||||
last_dial_task_run: Instant::now(),
|
||||
|
||||
validators,
|
||||
validator_changes,
|
||||
peers,
|
||||
rebuild_peers_at: Instant::now() + TIME_BETWEEN_REBUILD_PEERS,
|
||||
|
||||
swarm,
|
||||
|
||||
gossip,
|
||||
signed_cosigns,
|
||||
tributary_gossip,
|
||||
|
||||
outbound_requests,
|
||||
outbound_request_responses: HashMap::new(),
|
||||
|
||||
inbound_request_response_channels: HashMap::new(),
|
||||
heartbeat_requests,
|
||||
notable_cosign_requests,
|
||||
inbound_request_responses,
|
||||
}
|
||||
.run(),
|
||||
);
|
||||
}
|
||||
}
|
||||
214
coordinator/p2p/libp2p/src/validators.rs
Normal file
214
coordinator/p2p/libp2p/src/validators.rs
Normal file
@@ -0,0 +1,214 @@
|
||||
use core::{borrow::Borrow, future::Future};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, SeraiError, Serai};
|
||||
|
||||
use serai_task::{Task, ContinuallyRan};
|
||||
|
||||
use libp2p::PeerId;
|
||||
|
||||
use futures_util::stream::{StreamExt, FuturesUnordered};
|
||||
use tokio::sync::{mpsc, RwLock};
|
||||
|
||||
use crate::peer_id_from_public;
|
||||
|
||||
pub(crate) struct Changes {
|
||||
pub(crate) removed: HashSet<PeerId>,
|
||||
pub(crate) added: HashSet<PeerId>,
|
||||
}
|
||||
|
||||
pub(crate) struct Validators {
|
||||
serai: Arc<Serai>,
|
||||
|
||||
// A cache for which session we're populated with the validators of
|
||||
sessions: HashMap<NetworkId, Session>,
|
||||
// The validators by network
|
||||
by_network: HashMap<NetworkId, HashSet<PeerId>>,
|
||||
// The validators and their networks
|
||||
validators: HashMap<PeerId, HashSet<NetworkId>>,
|
||||
|
||||
// The channel to send the changes down
|
||||
changes: mpsc::UnboundedSender<Changes>,
|
||||
}
|
||||
|
||||
impl Validators {
|
||||
pub(crate) fn new(serai: Arc<Serai>) -> (Self, mpsc::UnboundedReceiver<Changes>) {
|
||||
let (send, recv) = mpsc::unbounded_channel();
|
||||
let validators = Validators {
|
||||
serai,
|
||||
sessions: HashMap::new(),
|
||||
by_network: HashMap::new(),
|
||||
validators: HashMap::new(),
|
||||
changes: send,
|
||||
};
|
||||
(validators, recv)
|
||||
}
|
||||
|
||||
async fn session_changes(
|
||||
serai: impl Borrow<Serai>,
|
||||
sessions: impl Borrow<HashMap<NetworkId, Session>>,
|
||||
) -> Result<Vec<(NetworkId, Session, HashSet<PeerId>)>, SeraiError> {
|
||||
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
|
||||
let temporal_serai = temporal_serai.validator_sets();
|
||||
|
||||
let mut session_changes = vec![];
|
||||
{
|
||||
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
|
||||
// we poll it till it yields all futures with the most minimal processing possible
|
||||
let mut futures = FuturesUnordered::new();
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if network == NetworkId::Serai {
|
||||
continue;
|
||||
}
|
||||
let sessions = sessions.borrow();
|
||||
futures.push(async move {
|
||||
let session = match temporal_serai.session(network).await {
|
||||
Ok(Some(session)) => session,
|
||||
Ok(None) => return Ok(None),
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
if sessions.get(&network) == Some(&session) {
|
||||
Ok(None)
|
||||
} else {
|
||||
match temporal_serai.active_network_validators(network).await {
|
||||
Ok(validators) => Ok(Some((
|
||||
network,
|
||||
session,
|
||||
validators.into_iter().map(peer_id_from_public).collect(),
|
||||
))),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
while let Some(session_change) = futures.next().await {
|
||||
if let Some(session_change) = session_change? {
|
||||
session_changes.push(session_change);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(session_changes)
|
||||
}
|
||||
|
||||
fn incorporate_session_changes(
|
||||
&mut self,
|
||||
session_changes: Vec<(NetworkId, Session, HashSet<PeerId>)>,
|
||||
) {
|
||||
let mut removed = HashSet::new();
|
||||
let mut added = HashSet::new();
|
||||
|
||||
for (network, session, validators) in session_changes {
|
||||
// Remove the existing validators
|
||||
for validator in self.by_network.remove(&network).unwrap_or_else(HashSet::new) {
|
||||
// Get all networks this validator is in
|
||||
let mut networks = self.validators.remove(&validator).unwrap();
|
||||
// Remove this one
|
||||
networks.remove(&network);
|
||||
if !networks.is_empty() {
|
||||
// Insert the networks back if the validator was present in other networks
|
||||
self.validators.insert(validator, networks);
|
||||
} else {
|
||||
// Because this validator is no longer present in any network, mark them as removed
|
||||
/*
|
||||
This isn't accurate. The validator isn't present in the latest session for this
|
||||
network. The validator was present in the prior session which has yet to retire. Our
|
||||
lack of explicit inclusion for both the prior session and the current session causes
|
||||
only the validators mutually present in both sessions to be responsible for all actions
|
||||
still ongoing as the prior validator set retires.
|
||||
|
||||
TODO: Fix this
|
||||
*/
|
||||
removed.insert(validator);
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new validators
|
||||
for validator in validators.iter().copied() {
|
||||
self.validators.entry(validator).or_insert_with(HashSet::new).insert(network);
|
||||
added.insert(validator);
|
||||
}
|
||||
self.by_network.insert(network, validators);
|
||||
|
||||
// Update the session we have populated
|
||||
self.sessions.insert(network, session);
|
||||
}
|
||||
|
||||
// Only flag validators for removal if they weren't simultaneously added by these changes
|
||||
removed.retain(|validator| !added.contains(validator));
|
||||
// Send the changes, dropping the error
|
||||
// This lets the caller opt-out of change notifications by dropping the receiver
|
||||
let _: Result<_, _> = self.changes.send(Changes { removed, added });
|
||||
}
|
||||
|
||||
/// Update the view of the validators.
|
||||
pub(crate) async fn update(&mut self) -> Result<(), SeraiError> {
|
||||
let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?;
|
||||
self.incorporate_session_changes(session_changes);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn by_network(&self) -> &HashMap<NetworkId, HashSet<PeerId>> {
|
||||
&self.by_network
|
||||
}
|
||||
|
||||
pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<NetworkId>> {
|
||||
self.validators.get(peer_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// A task which updates a set of validators.
|
||||
///
|
||||
/// The validators managed by this tak will have their exclusive lock held for a minimal amount of
|
||||
/// time while the update occurs to minimize the disruption to the services relying on it.
|
||||
pub(crate) struct UpdateValidatorsTask {
|
||||
validators: Arc<RwLock<Validators>>,
|
||||
}
|
||||
|
||||
impl UpdateValidatorsTask {
|
||||
/// Spawn a new instance of the UpdateValidatorsTask.
|
||||
///
|
||||
/// This returns a reference to the Validators it updates after spawning itself.
|
||||
pub(crate) fn spawn(
|
||||
serai: Arc<Serai>,
|
||||
) -> (Arc<RwLock<Validators>>, mpsc::UnboundedReceiver<Changes>) {
|
||||
// The validators which will be updated
|
||||
let (validators, changes) = Validators::new(serai);
|
||||
let validators = Arc::new(RwLock::new(validators));
|
||||
|
||||
// Define the task
|
||||
let (update_validators_task, update_validators_task_handle) = Task::new();
|
||||
// Forget the handle, as dropping the handle would stop the task
|
||||
core::mem::forget(update_validators_task_handle);
|
||||
// Spawn the task
|
||||
tokio::spawn(
|
||||
(Self { validators: validators.clone() }).continually_run(update_validators_task, vec![]),
|
||||
);
|
||||
|
||||
// Return the validators
|
||||
(validators, changes)
|
||||
}
|
||||
}
|
||||
|
||||
impl ContinuallyRan for UpdateValidatorsTask {
|
||||
// Only run every minute, not the default of every five seconds
|
||||
const DELAY_BETWEEN_ITERATIONS: u64 = 60;
|
||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||
|
||||
type Error = SeraiError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let session_changes = {
|
||||
let validators = self.validators.read().await;
|
||||
Validators::session_changes(validators.serai.clone(), validators.sessions.clone()).await?
|
||||
};
|
||||
self.validators.write().await.incorporate_session_changes(session_changes);
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
151
coordinator/p2p/src/heartbeat.rs
Normal file
151
coordinator/p2p/src/heartbeat.rs
Normal file
@@ -0,0 +1,151 @@
|
||||
use core::future::Future;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet};
|
||||
|
||||
use futures_lite::FutureExt;
|
||||
|
||||
use tributary_sdk::{ReadWrite, TransactionTrait, Block, Tributary, TributaryReader};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::{Heartbeat, Peer, P2p};
|
||||
|
||||
// Amount of blocks in a minute
|
||||
const BLOCKS_PER_MINUTE: usize =
|
||||
(60 / (tributary_sdk::tendermint::TARGET_BLOCK_TIME / 1000)) as usize;
|
||||
|
||||
/// The minimum amount of blocks to include/included within a batch, assuming there's blocks to
|
||||
/// include in the batch.
|
||||
///
|
||||
/// This decides the size limit of the Batch (the Block size limit multiplied by the minimum amount
|
||||
/// of blocks we'll send). The actual amount of blocks sent will be the amount which fits within
|
||||
/// the size limit.
|
||||
pub const MIN_BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
|
||||
|
||||
/// The size limit for a batch of blocks sent in response to a Heartbeat.
|
||||
///
|
||||
/// This estimates the size of a commit as `32 + (MAX_VALIDATORS * 128)`. At the time of writing, a
|
||||
/// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators,
|
||||
/// and aggregate signature). Accordingly, this should be a safe over-estimate.
|
||||
pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
|
||||
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((MAX_KEY_SHARES_PER_SET as usize) * 128));
|
||||
|
||||
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
|
||||
/// tip.
|
||||
///
|
||||
/// If the other validator has more blocks then we do, they're expected to inform us. This forms
|
||||
/// the sync protocol for our Tributaries.
|
||||
pub(crate) struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> {
|
||||
pub(crate) set: ValidatorSet,
|
||||
pub(crate) tributary: Tributary<TD, Tx, P>,
|
||||
pub(crate) reader: TributaryReader<TD, Tx>,
|
||||
pub(crate) p2p: P,
|
||||
}
|
||||
|
||||
impl<TD: Db, Tx: TransactionTrait, P: P2p> ContinuallyRan for HeartbeatTask<TD, Tx, P> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
// If our blockchain hasn't had a block in the past minute, trigger the heartbeat protocol
|
||||
const TIME_TO_TRIGGER_SYNCING: Duration = Duration::from_secs(60);
|
||||
|
||||
let mut tip = self.reader.tip();
|
||||
let time_since = {
|
||||
let block_time = if let Some(time_of_block) = self.reader.time_of_block(&tip) {
|
||||
SystemTime::UNIX_EPOCH + Duration::from_secs(time_of_block)
|
||||
} else {
|
||||
// If we couldn't fetch this block's time, assume it's old
|
||||
// We don't want to declare its unix time as 0 and claim it's 50+ years old though
|
||||
log::warn!(
|
||||
"heartbeat task couldn't fetch the time of a block, flagging it as a minute old"
|
||||
);
|
||||
SystemTime::now() - TIME_TO_TRIGGER_SYNCING
|
||||
};
|
||||
SystemTime::now().duration_since(block_time).unwrap_or(Duration::ZERO)
|
||||
};
|
||||
let mut tip_is_stale = false;
|
||||
|
||||
let mut synced_block = false;
|
||||
if TIME_TO_TRIGGER_SYNCING <= time_since {
|
||||
log::warn!(
|
||||
"last known tributary block for {:?} was {} seconds ago",
|
||||
self.set,
|
||||
time_since.as_secs()
|
||||
);
|
||||
|
||||
// This requests all peers for this network, without differentiating by session
|
||||
// This should be fine as most validators should overlap across sessions
|
||||
'peer: for peer in self.p2p.peers(self.set.network).await {
|
||||
loop {
|
||||
// Create the request for blocks
|
||||
if tip_is_stale {
|
||||
tip = self.reader.tip();
|
||||
tip_is_stale = false;
|
||||
}
|
||||
// Necessary due to https://github.com/rust-lang/rust/issues/100013
|
||||
let Some(blocks) = peer
|
||||
.send_heartbeat(Heartbeat { set: self.set, latest_block_hash: tip })
|
||||
.boxed()
|
||||
.await
|
||||
else {
|
||||
continue 'peer;
|
||||
};
|
||||
|
||||
// This is the final batch if it has less than the maximum amount of blocks
|
||||
// (signifying there weren't more blocks after this to fill the batch with)
|
||||
let final_batch = blocks.len() < MIN_BLOCKS_PER_BATCH;
|
||||
|
||||
// Sync each block
|
||||
for block_with_commit in blocks {
|
||||
let Ok(block) = Block::read(&mut block_with_commit.block.as_slice()) else {
|
||||
// TODO: Disconnect/slash this peer
|
||||
log::warn!("received invalid Block inside response to heartbeat");
|
||||
continue 'peer;
|
||||
};
|
||||
|
||||
// Attempt to sync the block
|
||||
if !self.tributary.sync_block(block, block_with_commit.commit).await {
|
||||
// The block may be invalid or stale if we added a block elsewhere
|
||||
if (!tip_is_stale) && (tip != self.reader.tip()) {
|
||||
// Since the Tributary's tip advanced on its own, return
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Since this block was invalid or stale in a way non-trivial to detect, try to
|
||||
// sync with the next peer
|
||||
continue 'peer;
|
||||
}
|
||||
|
||||
// Because we synced a block, flag the tip as stale
|
||||
tip_is_stale = true;
|
||||
// And that we did sync a block
|
||||
synced_block = true;
|
||||
}
|
||||
|
||||
// If this was the final batch, move on from this peer
|
||||
// We could assume they were honest and we are done syncing the chain, but this is a
|
||||
// bit more robust
|
||||
if final_batch {
|
||||
continue 'peer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This will cause the tak to be run less and less often, ensuring we aren't spamming the
|
||||
// net if we legitimately aren't making progress
|
||||
if !synced_block {
|
||||
Err(format!(
|
||||
"tried to sync blocks for {:?} since we haven't seen one in {} seconds but didn't",
|
||||
self.set,
|
||||
time_since.as_secs(),
|
||||
))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(synced_block)
|
||||
}
|
||||
}
|
||||
}
|
||||
204
coordinator/p2p/src/lib.rs
Normal file
204
coordinator/p2p/src/lib.rs
Normal file
@@ -0,0 +1,204 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::future::Future;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
||||
|
||||
use serai_db::Db;
|
||||
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};
|
||||
use serai_cosign::{SignedCosign, Cosigning};
|
||||
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
|
||||
use serai_task::{Task, ContinuallyRan};
|
||||
|
||||
/// The heartbeat task, effecting sync of Tributaries
|
||||
pub mod heartbeat;
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
|
||||
/// A heartbeat for a Tributary.
|
||||
#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)]
|
||||
pub struct Heartbeat {
|
||||
/// The Tributary this is the heartbeat of.
|
||||
pub set: ValidatorSet,
|
||||
/// The hash of the latest block added to the Tributary.
|
||||
pub latest_block_hash: [u8; 32],
|
||||
}
|
||||
|
||||
/// A tributary block and its commit.
|
||||
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||
pub struct TributaryBlockWithCommit {
|
||||
/// The serialized block.
|
||||
pub block: Vec<u8>,
|
||||
/// The serialized commit.
|
||||
pub commit: Vec<u8>,
|
||||
}
|
||||
|
||||
/// A representation of a peer.
|
||||
pub trait Peer<'a>: Send {
|
||||
/// Send a heartbeat to this peer.
|
||||
fn send_heartbeat(
|
||||
&self,
|
||||
heartbeat: Heartbeat,
|
||||
) -> impl Send + Future<Output = Option<Vec<TributaryBlockWithCommit>>>;
|
||||
}
|
||||
|
||||
/// The representation of the P2P network.
|
||||
pub trait P2p:
|
||||
Send + Sync + Clone + tributary_sdk::P2p + serai_cosign::RequestNotableCosigns
|
||||
{
|
||||
/// The representation of a peer.
|
||||
type Peer<'a>: Peer<'a>;
|
||||
|
||||
/// Fetch the peers for this network.
|
||||
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
|
||||
|
||||
/// Broadcast a cosign.
|
||||
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()>;
|
||||
|
||||
/// A cancel-safe future for the next heartbeat received over the P2P network.
|
||||
///
|
||||
/// Yields the validator set its for, the latest block hash observed, and a channel to return the
|
||||
/// descending blocks. This channel MUST NOT and will not have its receiver dropped before a
|
||||
/// message is sent.
|
||||
fn heartbeat(
|
||||
&self,
|
||||
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)>;
|
||||
|
||||
/// A cancel-safe future for the next request for the notable cosigns of a gloabl session.
|
||||
///
|
||||
/// Yields the global session the request is for and a channel to return the notable cosigns.
|
||||
/// This channel MUST NOT and will not have its receiver dropped before a message is sent.
|
||||
fn notable_cosigns_request(
|
||||
&self,
|
||||
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)>;
|
||||
|
||||
/// A cancel-safe future for the next message regarding a Tributary.
|
||||
///
|
||||
/// Yields the message's Tributary's genesis block hash and the message.
|
||||
fn tributary_message(&self) -> impl Send + Future<Output = ([u8; 32], Vec<u8>)>;
|
||||
|
||||
/// A cancel-safe future for the next cosign received.
|
||||
fn cosign(&self) -> impl Send + Future<Output = SignedCosign>;
|
||||
}
|
||||
|
||||
fn handle_notable_cosigns_request<D: Db>(
|
||||
db: &D,
|
||||
global_session: [u8; 32],
|
||||
channel: oneshot::Sender<Vec<SignedCosign>>,
|
||||
) {
|
||||
let cosigns = Cosigning::<D>::notable_cosigns(db, global_session);
|
||||
channel.send(cosigns).expect("channel listening for cosign oneshot response was dropped?");
|
||||
}
|
||||
|
||||
fn handle_heartbeat<D: Db, T: TransactionTrait>(
|
||||
reader: &TributaryReader<D, T>,
|
||||
mut latest_block_hash: [u8; 32],
|
||||
channel: oneshot::Sender<Vec<TributaryBlockWithCommit>>,
|
||||
) {
|
||||
let mut res_size = 8;
|
||||
let mut res = vec![];
|
||||
// This former case should be covered by this latter case
|
||||
while (res.len() < heartbeat::MIN_BLOCKS_PER_BATCH) || (res_size < heartbeat::BATCH_SIZE_LIMIT) {
|
||||
let Some(block_after) = reader.block_after(&latest_block_hash) else { break };
|
||||
|
||||
// These `break` conditions should only occur under edge cases, such as if we're actively
|
||||
// deleting this Tributary due to being done with it
|
||||
let Some(block) = reader.block(&block_after) else { break };
|
||||
let block = block.serialize();
|
||||
let Some(commit) = reader.commit(&block_after) else { break };
|
||||
res_size += 8 + block.len() + 8 + commit.len();
|
||||
res.push(TributaryBlockWithCommit { block, commit });
|
||||
|
||||
latest_block_hash = block_after;
|
||||
}
|
||||
channel
|
||||
.send(res)
|
||||
.map_err(|_| ())
|
||||
.expect("channel listening for heartbeat oneshot response was dropped?");
|
||||
}
|
||||
|
||||
/// Run the P2P instance.
|
||||
///
|
||||
/// `add_tributary`'s and `retire_tributary's senders, along with `send_cosigns`'s receiver, must
|
||||
/// never be dropped. `retire_tributary` is not required to only be instructed with added
|
||||
/// Tributaries.
|
||||
pub async fn run<TD: Db, Tx: TransactionTrait, P: P2p>(
|
||||
db: impl Db,
|
||||
p2p: P,
|
||||
mut add_tributary: mpsc::UnboundedReceiver<(ValidatorSet, Tributary<TD, Tx, P>)>,
|
||||
mut retire_tributary: mpsc::UnboundedReceiver<ValidatorSet>,
|
||||
send_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||
) {
|
||||
let mut readers = HashMap::<ValidatorSet, TributaryReader<TD, Tx>>::new();
|
||||
let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender<Vec<u8>>>::new();
|
||||
let mut heartbeat_tasks = HashMap::<ValidatorSet, _>::new();
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
tributary = add_tributary.recv() => {
|
||||
let (set, tributary) = tributary.expect("add_tributary send was dropped");
|
||||
let reader = tributary.reader();
|
||||
readers.insert(set, reader.clone());
|
||||
|
||||
let (heartbeat_task_def, heartbeat_task) = Task::new();
|
||||
tokio::spawn(
|
||||
(HeartbeatTask {
|
||||
set,
|
||||
tributary: tributary.clone(),
|
||||
reader: reader.clone(),
|
||||
p2p: p2p.clone(),
|
||||
}).continually_run(heartbeat_task_def, vec![])
|
||||
);
|
||||
heartbeat_tasks.insert(set, heartbeat_task);
|
||||
|
||||
let (tributary_message_send, mut tributary_message_recv) = mpsc::unbounded_channel();
|
||||
tributaries.insert(tributary.genesis(), tributary_message_send);
|
||||
// For as long as this sender exists, handle the messages from it on a dedicated task
|
||||
tokio::spawn(async move {
|
||||
while let Some(message) = tributary_message_recv.recv().await {
|
||||
tributary.handle_message(&message).await;
|
||||
}
|
||||
});
|
||||
}
|
||||
set = retire_tributary.recv() => {
|
||||
let set = set.expect("retire_tributary send was dropped");
|
||||
let Some(reader) = readers.remove(&set) else { continue };
|
||||
tributaries.remove(&reader.genesis()).expect("tributary reader but no tributary");
|
||||
heartbeat_tasks.remove(&set).expect("tributary but no heartbeat task");
|
||||
}
|
||||
|
||||
(heartbeat, channel) = p2p.heartbeat() => {
|
||||
if let Some(reader) = readers.get(&heartbeat.set) {
|
||||
let reader = reader.clone(); // This is a cheap clone
|
||||
// We spawn this on a task due to the DB reads needed
|
||||
tokio::spawn(async move {
|
||||
handle_heartbeat(&reader, heartbeat.latest_block_hash, channel)
|
||||
});
|
||||
}
|
||||
}
|
||||
(global_session, channel) = p2p.notable_cosigns_request() => {
|
||||
tokio::spawn({
|
||||
let db = db.clone();
|
||||
async move { handle_notable_cosigns_request(&db, global_session, channel) }
|
||||
});
|
||||
}
|
||||
(tributary, message) = p2p.tributary_message() => {
|
||||
if let Some(tributary) = tributaries.get(&tributary) {
|
||||
tributary.send(message).expect("tributary message recv was dropped?");
|
||||
}
|
||||
}
|
||||
cosign = p2p.cosign() => {
|
||||
// We don't call `Cosigning::intake_cosign` here as that can only be called from a single
|
||||
// location. We also need to intake the cosigns we produce, which means we need to merge
|
||||
// these streams (signing, network) somehow. That's done with this mpsc channel
|
||||
send_cosigns.send(cosign).expect("channel receiving cosigns was dropped");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
113
coordinator/src/db.rs
Normal file
113
coordinator/src/db.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
use std::{path::Path, fs};
|
||||
|
||||
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait};
|
||||
use serai_db::{create_db, db_channel};
|
||||
|
||||
use serai_client::{
|
||||
primitives::NetworkId,
|
||||
validator_sets::primitives::{Session, ValidatorSet},
|
||||
};
|
||||
|
||||
use serai_cosign::SignedCosign;
|
||||
use serai_coordinator_substrate::NewSetInformation;
|
||||
use serai_coordinator_tributary::Transaction;
|
||||
|
||||
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||
pub(crate) type Db = serai_db::ParityDb;
|
||||
#[cfg(feature = "rocksdb")]
|
||||
pub(crate) type Db = serai_db::RocksDB;
|
||||
|
||||
#[allow(unused_variables, unreachable_code)]
|
||||
fn db(path: &str) -> Db {
|
||||
{
|
||||
let path: &Path = path.as_ref();
|
||||
// This may error if this path already exists, which we shouldn't propagate/panic on. If this
|
||||
// is a problem (such as we don't have the necessary permissions to write to this path), we
|
||||
// expect the following DB opening to error.
|
||||
let _: Result<_, _> = fs::create_dir_all(path.parent().unwrap());
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "parity-db", feature = "rocksdb"))]
|
||||
panic!("built with parity-db and rocksdb");
|
||||
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||
let db = serai_db::new_parity_db(path);
|
||||
#[cfg(feature = "rocksdb")]
|
||||
let db = serai_db::new_rocksdb(path);
|
||||
db
|
||||
}
|
||||
|
||||
pub(crate) fn coordinator_db() -> Db {
|
||||
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
||||
db(&format!("{root_path}/coordinator/db"))
|
||||
}
|
||||
|
||||
fn tributary_db_folder(set: ValidatorSet) -> String {
|
||||
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
||||
let network = match set.network {
|
||||
NetworkId::Serai => panic!("creating Tributary for the Serai network"),
|
||||
NetworkId::Bitcoin => "Bitcoin",
|
||||
NetworkId::Ethereum => "Ethereum",
|
||||
NetworkId::Monero => "Monero",
|
||||
};
|
||||
format!("{root_path}/tributary-{network}-{}", set.session.0)
|
||||
}
|
||||
|
||||
pub(crate) fn tributary_db(set: ValidatorSet) -> Db {
|
||||
db(&format!("{}/db", tributary_db_folder(set)))
|
||||
}
|
||||
|
||||
pub(crate) fn prune_tributary_db(set: ValidatorSet) {
|
||||
log::info!("pruning data directory for tributary {set:?}");
|
||||
let db = tributary_db_folder(set);
|
||||
if fs::exists(&db).expect("couldn't check if tributary DB exists") {
|
||||
fs::remove_dir_all(db).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
create_db! {
|
||||
Coordinator {
|
||||
// The currently active Tributaries
|
||||
ActiveTributaries: () -> Vec<NewSetInformation>,
|
||||
// The latest Tributary to have been retired for a network
|
||||
// Since Tributaries are retired sequentially, this is informative to if any Tributary has been
|
||||
// retired
|
||||
RetiredTributary: (network: NetworkId) -> Session,
|
||||
// The last handled message from a Processor
|
||||
LastProcessorMessage: (network: NetworkId) -> u64,
|
||||
// Cosigns we produced and tried to intake yet incurred an error while doing so
|
||||
ErroneousCosigns: () -> Vec<SignedCosign>,
|
||||
}
|
||||
}
|
||||
|
||||
db_channel! {
|
||||
Coordinator {
|
||||
// Cosigns we produced
|
||||
SignedCosigns: () -> SignedCosign,
|
||||
// Tributaries to clean up upon reboot
|
||||
TributaryCleanup: () -> ValidatorSet,
|
||||
}
|
||||
}
|
||||
|
||||
mod _internal_db {
|
||||
use super::*;
|
||||
|
||||
db_channel! {
|
||||
Coordinator {
|
||||
// Tributary transactions to publish
|
||||
TributaryTransactions: (set: ValidatorSet) -> Transaction,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct TributaryTransactions;
|
||||
impl TributaryTransactions {
|
||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, tx: &Transaction) {
|
||||
// If this set has yet to be retired, send this transaction
|
||||
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||
_internal_db::TributaryTransactions::send(txn, set, tx);
|
||||
}
|
||||
}
|
||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<Transaction> {
|
||||
_internal_db::TributaryTransactions::try_recv(txn, set)
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,430 @@
|
||||
use core::{ops::Deref, time::Duration};
|
||||
use std::{sync::Arc, collections::HashMap, time::Instant};
|
||||
|
||||
use zeroize::{Zeroize, Zeroizing};
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use ciphersuite::{
|
||||
group::{ff::PrimeField, GroupEncoding},
|
||||
Ciphersuite, Ristretto,
|
||||
};
|
||||
|
||||
use borsh::BorshDeserialize;
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use serai_client::{
|
||||
primitives::{NetworkId, PublicKey},
|
||||
validator_sets::primitives::ValidatorSet,
|
||||
Serai,
|
||||
};
|
||||
use message_queue::{Service, client::MessageQueue};
|
||||
|
||||
use serai_task::{Task, TaskHandle, ContinuallyRan};
|
||||
|
||||
use serai_cosign::{Faulted, SignedCosign, Cosigning};
|
||||
use serai_coordinator_substrate::{CanonicalEventStream, EphemeralEventStream, SignSlashReport};
|
||||
use serai_coordinator_tributary::{Signed, Transaction, SubstrateBlockPlans};
|
||||
|
||||
mod db;
|
||||
use db::*;
|
||||
|
||||
mod tributary;
|
||||
|
||||
fn main() {
|
||||
todo!("TODO")
|
||||
mod substrate;
|
||||
use substrate::SubstrateTask;
|
||||
|
||||
mod p2p {
|
||||
pub use serai_coordinator_p2p::*;
|
||||
pub use serai_coordinator_libp2p_p2p::Libp2p;
|
||||
}
|
||||
|
||||
// Use a zeroizing allocator for this entire application
|
||||
// While secrets should already be zeroized, the presence of secret keys in a networked application
|
||||
// (at increased risk of OOB reads) justifies the performance hit in case any secrets weren't
|
||||
// already
|
||||
#[global_allocator]
|
||||
static ALLOCATOR: zalloc::ZeroizingAlloc<std::alloc::System> =
|
||||
zalloc::ZeroizingAlloc(std::alloc::System);
|
||||
|
||||
async fn serai() -> Arc<Serai> {
|
||||
const SERAI_CONNECTION_DELAY: Duration = Duration::from_secs(10);
|
||||
const MAX_SERAI_CONNECTION_DELAY: Duration = Duration::from_secs(300);
|
||||
|
||||
let mut delay = SERAI_CONNECTION_DELAY;
|
||||
loop {
|
||||
let Ok(serai) = Serai::new(format!(
|
||||
"http://{}:9944",
|
||||
serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided")
|
||||
))
|
||||
.await
|
||||
else {
|
||||
log::error!("couldn't connect to the Serai node");
|
||||
tokio::time::sleep(delay).await;
|
||||
delay = (delay + SERAI_CONNECTION_DELAY).min(MAX_SERAI_CONNECTION_DELAY);
|
||||
continue;
|
||||
};
|
||||
log::info!("made initial connection to Serai node");
|
||||
return Arc::new(serai);
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_cosigning<D: serai_db::Db>(
|
||||
mut db: D,
|
||||
serai: Arc<Serai>,
|
||||
p2p: impl p2p::P2p,
|
||||
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
|
||||
mut p2p_cosigns: mpsc::UnboundedReceiver<SignedCosign>,
|
||||
) {
|
||||
let mut cosigning = Cosigning::spawn(db.clone(), serai, p2p.clone(), tasks_to_run_upon_cosigning);
|
||||
tokio::spawn(async move {
|
||||
const COSIGN_LOOP_INTERVAL: Duration = Duration::from_secs(5);
|
||||
|
||||
let last_cosign_rebroadcast = Instant::now();
|
||||
loop {
|
||||
// Intake our own cosigns
|
||||
match Cosigning::<D>::latest_cosigned_block_number(&db) {
|
||||
Ok(latest_cosigned_block_number) => {
|
||||
let mut txn = db.txn();
|
||||
// The cosigns we prior tried to intake yet failed to
|
||||
let mut cosigns = ErroneousCosigns::get(&txn).unwrap_or(vec![]);
|
||||
// The cosigns we have yet to intake
|
||||
while let Some(cosign) = SignedCosigns::try_recv(&mut txn) {
|
||||
cosigns.push(cosign);
|
||||
}
|
||||
|
||||
let mut erroneous = vec![];
|
||||
for cosign in cosigns {
|
||||
// If this cosign is stale, move on
|
||||
if cosign.cosign.block_number <= latest_cosigned_block_number {
|
||||
continue;
|
||||
}
|
||||
|
||||
match cosigning.intake_cosign(&cosign) {
|
||||
// Publish this cosign
|
||||
Ok(()) => p2p.publish_cosign(cosign).await,
|
||||
Err(e) => {
|
||||
assert!(e.temporal(), "signed an invalid cosign: {e:?}");
|
||||
// Since this had a temporal error, queue it to try again later
|
||||
erroneous.push(cosign);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Save the cosigns with temporal errors to the database
|
||||
ErroneousCosigns::set(&mut txn, &erroneous);
|
||||
|
||||
txn.commit();
|
||||
}
|
||||
Err(Faulted) => {
|
||||
// We don't panic here as the following code rebroadcasts our cosigns which is
|
||||
// necessary to inform other coordinators of the faulty cosigns
|
||||
log::error!("cosigning faulted");
|
||||
}
|
||||
}
|
||||
|
||||
let time_till_cosign_rebroadcast = (last_cosign_rebroadcast +
|
||||
serai_cosign::BROADCAST_FREQUENCY)
|
||||
.saturating_duration_since(Instant::now());
|
||||
tokio::select! {
|
||||
() = tokio::time::sleep(time_till_cosign_rebroadcast) => {
|
||||
for cosign in cosigning.cosigns_to_rebroadcast() {
|
||||
p2p.publish_cosign(cosign).await;
|
||||
}
|
||||
}
|
||||
cosign = p2p_cosigns.recv() => {
|
||||
let cosign = cosign.expect("p2p cosigns channel was dropped?");
|
||||
if cosigning.intake_cosign(&cosign).is_ok() {
|
||||
p2p.publish_cosign(cosign).await;
|
||||
}
|
||||
}
|
||||
// Make sure this loop runs at least this often
|
||||
() = tokio::time::sleep(COSIGN_LOOP_INTERVAL) => {}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async fn handle_processor_messages(
|
||||
mut db: impl serai_db::Db,
|
||||
message_queue: Arc<MessageQueue>,
|
||||
network: NetworkId,
|
||||
) {
|
||||
loop {
|
||||
let (msg_id, msg) = {
|
||||
let msg = message_queue.next(Service::Processor(network)).await;
|
||||
// Check this message's sender is as expected
|
||||
assert_eq!(msg.from, Service::Processor(network));
|
||||
|
||||
// Check this message's ID is as expected
|
||||
let last = LastProcessorMessage::get(&db, network);
|
||||
let next = last.map(|id| id + 1).unwrap_or(0);
|
||||
// This should either be the last message's ID, if we committed but didn't send our ACK, or
|
||||
// the expected next message's ID
|
||||
assert!((Some(msg.id) == last) || (msg.id == next));
|
||||
|
||||
// TODO: Check msg.sig
|
||||
|
||||
// If this is the message we already handled, and just failed to ACK, ACK it now and move on
|
||||
if Some(msg.id) == last {
|
||||
message_queue.ack(Service::Processor(network), msg.id).await;
|
||||
continue;
|
||||
}
|
||||
|
||||
(msg.id, messages::ProcessorMessage::deserialize(&mut msg.msg.as_slice()).unwrap())
|
||||
};
|
||||
|
||||
let mut txn = db.txn();
|
||||
|
||||
match msg {
|
||||
messages::ProcessorMessage::KeyGen(msg) => match msg {
|
||||
messages::key_gen::ProcessorMessage::Participation { session, participation } => {
|
||||
let set = ValidatorSet { network, session };
|
||||
TributaryTransactions::send(
|
||||
&mut txn,
|
||||
set,
|
||||
&Transaction::DkgParticipation { participation, signed: Signed::default() },
|
||||
);
|
||||
}
|
||||
messages::key_gen::ProcessorMessage::GeneratedKeyPair {
|
||||
session,
|
||||
substrate_key,
|
||||
network_key,
|
||||
} => todo!("TODO Transaction::DkgConfirmationPreprocess"),
|
||||
messages::key_gen::ProcessorMessage::Blame { session, participant } => {
|
||||
let set = ValidatorSet { network, session };
|
||||
TributaryTransactions::send(
|
||||
&mut txn,
|
||||
set,
|
||||
&Transaction::RemoveParticipant {
|
||||
participant: todo!("TODO"),
|
||||
signed: Signed::default(),
|
||||
},
|
||||
);
|
||||
}
|
||||
},
|
||||
messages::ProcessorMessage::Sign(msg) => match msg {
|
||||
messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => {
|
||||
let set = ValidatorSet { network, session };
|
||||
TributaryTransactions::send(
|
||||
&mut txn,
|
||||
set,
|
||||
&Transaction::RemoveParticipant {
|
||||
participant: todo!("TODO"),
|
||||
signed: Signed::default(),
|
||||
},
|
||||
);
|
||||
}
|
||||
messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => {
|
||||
todo!("TODO Transaction::Batch + Transaction::Sign")
|
||||
}
|
||||
messages::sign::ProcessorMessage::Shares { id, shares } => todo!("TODO Transaction::Sign"),
|
||||
},
|
||||
messages::ProcessorMessage::Coordinator(msg) => match msg {
|
||||
messages::coordinator::ProcessorMessage::CosignedBlock { cosign } => {
|
||||
SignedCosigns::send(&mut txn, &cosign);
|
||||
}
|
||||
messages::coordinator::ProcessorMessage::SignedBatch { batch } => {
|
||||
todo!("TODO PublishBatchTask")
|
||||
}
|
||||
messages::coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
|
||||
todo!("TODO PublishSlashReportTask")
|
||||
}
|
||||
},
|
||||
messages::ProcessorMessage::Substrate(msg) => match msg {
|
||||
messages::substrate::ProcessorMessage::SubstrateBlockAck { block, plans } => {
|
||||
let mut by_session = HashMap::new();
|
||||
for plan in plans {
|
||||
by_session
|
||||
.entry(plan.session)
|
||||
.or_insert_with(|| Vec::with_capacity(1))
|
||||
.push(plan.transaction_plan_id);
|
||||
}
|
||||
for (session, plans) in by_session {
|
||||
let set = ValidatorSet { network, session };
|
||||
SubstrateBlockPlans::set(&mut txn, set, block, &plans);
|
||||
TributaryTransactions::send(
|
||||
&mut txn,
|
||||
set,
|
||||
&Transaction::SubstrateBlock { hash: block },
|
||||
);
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// Mark this as the last handled message
|
||||
LastProcessorMessage::set(&mut txn, network, &msg_id);
|
||||
// Commit the txn
|
||||
txn.commit();
|
||||
// Now that we won't handle this message again, acknowledge it so we won't see it again
|
||||
message_queue.ack(Service::Processor(network), msg_id).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// Override the panic handler with one which will panic if any tokio task panics
|
||||
{
|
||||
let existing = std::panic::take_hook();
|
||||
std::panic::set_hook(Box::new(move |panic| {
|
||||
existing(panic);
|
||||
const MSG: &str = "exiting the process due to a task panicking";
|
||||
println!("{MSG}");
|
||||
log::error!("{MSG}");
|
||||
std::process::exit(1);
|
||||
}));
|
||||
}
|
||||
|
||||
// Initialize the logger
|
||||
if std::env::var("RUST_LOG").is_err() {
|
||||
std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string()));
|
||||
}
|
||||
env_logger::init();
|
||||
log::info!("starting coordinator service...");
|
||||
|
||||
// Read the Serai key from the env
|
||||
let serai_key = {
|
||||
let mut key_hex = serai_env::var("SERAI_KEY").expect("Serai key wasn't provided");
|
||||
let mut key_vec = hex::decode(&key_hex).map_err(|_| ()).expect("Serai key wasn't hex-encoded");
|
||||
key_hex.zeroize();
|
||||
if key_vec.len() != 32 {
|
||||
key_vec.zeroize();
|
||||
panic!("Serai key had an invalid length");
|
||||
}
|
||||
let mut key_bytes = [0; 32];
|
||||
key_bytes.copy_from_slice(&key_vec);
|
||||
key_vec.zeroize();
|
||||
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(key_bytes).unwrap());
|
||||
key_bytes.zeroize();
|
||||
key
|
||||
};
|
||||
|
||||
// Open the database
|
||||
let mut db = coordinator_db();
|
||||
|
||||
let existing_tributaries_at_boot = {
|
||||
let mut txn = db.txn();
|
||||
|
||||
// Cleanup all historic Tributaries
|
||||
while let Some(to_cleanup) = TributaryCleanup::try_recv(&mut txn) {
|
||||
prune_tributary_db(to_cleanup);
|
||||
// Drain the cosign intents created for this set
|
||||
while !Cosigning::<Db>::intended_cosigns(&mut txn, to_cleanup).is_empty() {}
|
||||
// Drain the transactions to publish for this set
|
||||
while TributaryTransactions::try_recv(&mut txn, to_cleanup).is_some() {}
|
||||
// Remove the SignSlashReport notification
|
||||
SignSlashReport::try_recv(&mut txn, to_cleanup);
|
||||
}
|
||||
|
||||
// Remove retired Tributaries from ActiveTributaries
|
||||
let mut active_tributaries = ActiveTributaries::get(&txn).unwrap_or(vec![]);
|
||||
active_tributaries.retain(|tributary| {
|
||||
RetiredTributary::get(&txn, tributary.set.network).map(|session| session.0) <
|
||||
Some(tributary.set.session.0)
|
||||
});
|
||||
ActiveTributaries::set(&mut txn, &active_tributaries);
|
||||
|
||||
txn.commit();
|
||||
|
||||
active_tributaries
|
||||
};
|
||||
|
||||
// Connect to the message-queue
|
||||
let message_queue = Arc::new(MessageQueue::from_env(Service::Coordinator));
|
||||
|
||||
// Connect to the Serai node
|
||||
let serai = serai().await;
|
||||
|
||||
let (p2p_add_tributary_send, p2p_add_tributary_recv) = mpsc::unbounded_channel();
|
||||
let (p2p_retire_tributary_send, p2p_retire_tributary_recv) = mpsc::unbounded_channel();
|
||||
let (p2p_cosigns_send, p2p_cosigns_recv) = mpsc::unbounded_channel();
|
||||
|
||||
// Spawn the P2P network
|
||||
let p2p = {
|
||||
let serai_keypair = {
|
||||
let mut key_bytes = serai_key.to_bytes();
|
||||
// Schnorrkel SecretKey is the key followed by 32 bytes of entropy for nonces
|
||||
let mut expanded_key = Zeroizing::new([0; 64]);
|
||||
expanded_key.as_mut_slice()[.. 32].copy_from_slice(&key_bytes);
|
||||
OsRng.fill_bytes(&mut expanded_key.as_mut_slice()[32 ..]);
|
||||
key_bytes.zeroize();
|
||||
Zeroizing::new(
|
||||
schnorrkel::SecretKey::from_bytes(expanded_key.as_slice()).unwrap().to_keypair(),
|
||||
)
|
||||
};
|
||||
let p2p = p2p::Libp2p::new(&serai_keypair, serai.clone());
|
||||
tokio::spawn(p2p::run::<Db, Transaction, _>(
|
||||
db.clone(),
|
||||
p2p.clone(),
|
||||
p2p_add_tributary_recv,
|
||||
p2p_retire_tributary_recv,
|
||||
p2p_cosigns_send,
|
||||
));
|
||||
p2p
|
||||
};
|
||||
|
||||
// Spawn the Substrate scanners
|
||||
let (substrate_task_def, substrate_task) = Task::new();
|
||||
let (substrate_canonical_task_def, substrate_canonical_task) = Task::new();
|
||||
tokio::spawn(
|
||||
CanonicalEventStream::new(db.clone(), serai.clone())
|
||||
.continually_run(substrate_canonical_task_def, vec![substrate_task.clone()]),
|
||||
);
|
||||
let (substrate_ephemeral_task_def, substrate_ephemeral_task) = Task::new();
|
||||
tokio::spawn(
|
||||
EphemeralEventStream::new(
|
||||
db.clone(),
|
||||
serai.clone(),
|
||||
PublicKey::from_raw((<Ristretto as Ciphersuite>::generator() * serai_key.deref()).to_bytes()),
|
||||
)
|
||||
.continually_run(substrate_ephemeral_task_def, vec![substrate_task]),
|
||||
);
|
||||
|
||||
// Spawn the cosign handler
|
||||
spawn_cosigning(
|
||||
db.clone(),
|
||||
serai.clone(),
|
||||
p2p.clone(),
|
||||
// Run the Substrate scanners once we cosign new blocks
|
||||
vec![substrate_canonical_task, substrate_ephemeral_task],
|
||||
p2p_cosigns_recv,
|
||||
);
|
||||
|
||||
// Spawn all Tributaries on-disk
|
||||
for tributary in existing_tributaries_at_boot {
|
||||
crate::tributary::spawn_tributary(
|
||||
db.clone(),
|
||||
message_queue.clone(),
|
||||
p2p.clone(),
|
||||
&p2p_add_tributary_send,
|
||||
tributary,
|
||||
serai_key.clone(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// Handle the events from the Substrate scanner
|
||||
tokio::spawn(
|
||||
(SubstrateTask {
|
||||
serai_key: serai_key.clone(),
|
||||
db: db.clone(),
|
||||
message_queue: message_queue.clone(),
|
||||
p2p: p2p.clone(),
|
||||
p2p_add_tributary: p2p_add_tributary_send.clone(),
|
||||
p2p_retire_tributary: p2p_retire_tributary_send.clone(),
|
||||
})
|
||||
.continually_run(substrate_task_def, vec![]),
|
||||
);
|
||||
|
||||
// Handle all of the Processors' messages
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if network == NetworkId::Serai {
|
||||
continue;
|
||||
}
|
||||
tokio::spawn(handle_processor_messages(db.clone(), message_queue.clone(), network));
|
||||
}
|
||||
|
||||
// Run the spawned tasks ad-infinitum
|
||||
core::future::pending().await
|
||||
}
|
||||
|
||||
159
coordinator/src/substrate.rs
Normal file
159
coordinator/src/substrate.rs
Normal file
@@ -0,0 +1,159 @@
|
||||
use core::future::Future;
|
||||
use std::sync::Arc;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use serai_db::{DbTxn, Db as DbTrait};
|
||||
|
||||
use serai_client::validator_sets::primitives::{Session, ValidatorSet};
|
||||
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||
|
||||
use tributary_sdk::Tributary;
|
||||
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use serai_coordinator_tributary::Transaction;
|
||||
use serai_coordinator_p2p::P2p;
|
||||
|
||||
use crate::Db;
|
||||
|
||||
pub(crate) struct SubstrateTask<P: P2p> {
|
||||
pub(crate) serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
pub(crate) db: Db,
|
||||
pub(crate) message_queue: Arc<MessageQueue>,
|
||||
pub(crate) p2p: P,
|
||||
pub(crate) p2p_add_tributary:
|
||||
mpsc::UnboundedSender<(ValidatorSet, Tributary<Db, Transaction, P>)>,
|
||||
pub(crate) p2p_retire_tributary: mpsc::UnboundedSender<ValidatorSet>,
|
||||
}
|
||||
|
||||
impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
||||
type Error = String; // TODO
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
|
||||
// Handle the Canonical events
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
loop {
|
||||
let mut txn = self.db.txn();
|
||||
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)
|
||||
else {
|
||||
break;
|
||||
};
|
||||
|
||||
match msg {
|
||||
// TODO: Stop trying to confirm the DKG
|
||||
messages::substrate::CoordinatorMessage::SetKeys { .. } => todo!("TODO"),
|
||||
messages::substrate::CoordinatorMessage::SlashesReported { session } => {
|
||||
let prior_retired = crate::db::RetiredTributary::get(&txn, network);
|
||||
let next_to_be_retired =
|
||||
prior_retired.map(|session| Session(session.0 + 1)).unwrap_or(Session(0));
|
||||
assert_eq!(session, next_to_be_retired);
|
||||
crate::db::RetiredTributary::set(&mut txn, network, &session);
|
||||
self
|
||||
.p2p_retire_tributary
|
||||
.send(ValidatorSet { network, session })
|
||||
.expect("p2p retire_tributary channel dropped?");
|
||||
}
|
||||
messages::substrate::CoordinatorMessage::Block { .. } => {}
|
||||
}
|
||||
|
||||
let msg = messages::CoordinatorMessage::from(msg);
|
||||
let metadata = Metadata {
|
||||
from: Service::Coordinator,
|
||||
to: Service::Processor(network),
|
||||
intent: msg.intent(),
|
||||
};
|
||||
let msg = borsh::to_vec(&msg).unwrap();
|
||||
self.message_queue.queue(metadata, msg).await?;
|
||||
txn.commit();
|
||||
made_progress = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle the NewSet events
|
||||
loop {
|
||||
let mut txn = self.db.txn();
|
||||
let Some(new_set) = serai_coordinator_substrate::NewSet::try_recv(&mut txn) else { break };
|
||||
|
||||
if let Some(historic_session) = new_set.set.session.0.checked_sub(2) {
|
||||
// We should have retired this session if we're here
|
||||
if crate::db::RetiredTributary::get(&txn, new_set.set.network).map(|session| session.0) <
|
||||
Some(historic_session)
|
||||
{
|
||||
/*
|
||||
If we haven't, it's because we're processing the NewSet event before the retiry
|
||||
event from the Canonical event stream. This happens if the Canonical event, and
|
||||
then the NewSet event, is fired while we're already iterating over NewSet events.
|
||||
|
||||
We break, dropping the txn, restoring this NewSet to the database, so we'll only
|
||||
handle it once a future iteration of this loop handles the retiry event.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
Queue this historical Tributary for deletion.
|
||||
|
||||
We explicitly don't queue this upon Tributary retire, instead here, to give time to
|
||||
investigate retired Tributaries if questions are raised post-retiry. This gives a
|
||||
week (the duration of the following session) after the Tributary has been retired to
|
||||
make a backup of the data directory for any investigations.
|
||||
*/
|
||||
crate::db::TributaryCleanup::send(
|
||||
&mut txn,
|
||||
&ValidatorSet { network: new_set.set.network, session: Session(historic_session) },
|
||||
);
|
||||
}
|
||||
|
||||
// Save this Tributary as active to the database
|
||||
{
|
||||
let mut active_tributaries =
|
||||
crate::db::ActiveTributaries::get(&txn).unwrap_or(Vec::with_capacity(1));
|
||||
active_tributaries.push(new_set.clone());
|
||||
crate::db::ActiveTributaries::set(&mut txn, &active_tributaries);
|
||||
}
|
||||
|
||||
// Send GenerateKey to the processor
|
||||
let msg = messages::key_gen::CoordinatorMessage::GenerateKey {
|
||||
session: new_set.set.session,
|
||||
threshold: new_set.threshold,
|
||||
evrf_public_keys: new_set.evrf_public_keys.clone(),
|
||||
};
|
||||
let msg = messages::CoordinatorMessage::from(msg);
|
||||
let metadata = Metadata {
|
||||
from: Service::Coordinator,
|
||||
to: Service::Processor(new_set.set.network),
|
||||
intent: msg.intent(),
|
||||
};
|
||||
let msg = borsh::to_vec(&msg).unwrap();
|
||||
self.message_queue.queue(metadata, msg).await?;
|
||||
|
||||
// Commit the transaction for all of this
|
||||
txn.commit();
|
||||
|
||||
// Now spawn the Tributary
|
||||
// If we reboot after committing the txn, but before this is called, this will be called
|
||||
// on boot
|
||||
crate::tributary::spawn_tributary(
|
||||
self.db.clone(),
|
||||
self.message_queue.clone(),
|
||||
self.p2p.clone(),
|
||||
&self.p2p_add_tributary,
|
||||
new_set,
|
||||
self.serai_key.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
made_progress = true;
|
||||
}
|
||||
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
453
coordinator/src/tributary.rs
Normal file
453
coordinator/src/tributary.rs
Normal file
@@ -0,0 +1,453 @@
|
||||
use core::{future::Future, time::Duration};
|
||||
use std::sync::Arc;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::OsRng;
|
||||
use blake2::{digest::typenum::U32, Digest, Blake2s};
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
|
||||
|
||||
use scale::Encode;
|
||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||
|
||||
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
|
||||
|
||||
use serai_task::{Task, TaskHandle, DoesNotError, ContinuallyRan};
|
||||
|
||||
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||
|
||||
use serai_cosign::{Faulted, CosignIntent, Cosigning};
|
||||
use serai_coordinator_substrate::{NewSetInformation, SignSlashReport};
|
||||
use serai_coordinator_tributary::{Transaction, ProcessorMessages, CosignIntents, ScanTributaryTask};
|
||||
use serai_coordinator_p2p::P2p;
|
||||
|
||||
use crate::{Db, TributaryTransactions};
|
||||
|
||||
db_channel! {
|
||||
Coordinator {
|
||||
PendingCosigns: (set: ValidatorSet) -> CosignIntent,
|
||||
}
|
||||
}
|
||||
|
||||
/// Provide a Provided Transaction to the Tributary.
|
||||
///
|
||||
/// This is not a well-designed function. This is specific to the context in which its called,
|
||||
/// within this file. It should only be considered an internal helper for this domain alone.
|
||||
async fn provide_transaction<TD: DbTrait, P: P2p>(
|
||||
set: ValidatorSet,
|
||||
tributary: &Tributary<TD, Transaction, P>,
|
||||
tx: Transaction,
|
||||
) {
|
||||
match tributary.provide_transaction(tx.clone()).await {
|
||||
// The Tributary uses its own DB, so we may provide this multiple times if we reboot before
|
||||
// committing the txn which provoked this
|
||||
Ok(()) | Err(ProvidedError::AlreadyProvided) => {}
|
||||
Err(ProvidedError::NotProvided) => {
|
||||
panic!("providing a Transaction which wasn't a Provided transaction: {tx:?}");
|
||||
}
|
||||
Err(ProvidedError::InvalidProvided(e)) => {
|
||||
panic!("providing an invalid Provided transaction, tx: {tx:?}, error: {e:?}")
|
||||
}
|
||||
// The Tributary's scan task won't advance if we don't have the Provided transactions
|
||||
// present on-chain, and this enters an infinite loop to block the calling task from
|
||||
// advancing
|
||||
Err(ProvidedError::LocalMismatchesOnChain) => loop {
|
||||
log::error!(
|
||||
"Tributary {:?} was supposed to provide {:?} but peers disagree, halting Tributary",
|
||||
set,
|
||||
tx,
|
||||
);
|
||||
// Print this every five minutes as this does need to be handled
|
||||
tokio::time::sleep(Duration::from_secs(5 * 60)).await;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides Cosign/Cosigned Transactions onto the Tributary.
|
||||
pub(crate) struct ProvideCosignCosignedTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||
db: CD,
|
||||
tributary_db: TD,
|
||||
set: NewSetInformation,
|
||||
tributary: Tributary<TD, Transaction, P>,
|
||||
}
|
||||
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan
|
||||
for ProvideCosignCosignedTransactionsTask<CD, TD, P>
|
||||
{
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
|
||||
// Check if we produced any cosigns we were supposed to
|
||||
let mut pending_notable_cosign = false;
|
||||
loop {
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
// Fetch the next cosign this tributary should handle
|
||||
let Some(cosign) = PendingCosigns::try_recv(&mut txn, self.set.set) else { break };
|
||||
pending_notable_cosign = cosign.notable;
|
||||
|
||||
// If we (Serai) haven't cosigned this block, break as this is still pending
|
||||
let latest = match Cosigning::<CD>::latest_cosigned_block_number(&txn) {
|
||||
Ok(latest) => latest,
|
||||
Err(Faulted) => {
|
||||
log::error!("cosigning faulted");
|
||||
Err("cosigning faulted")?
|
||||
}
|
||||
};
|
||||
if latest < cosign.block_number {
|
||||
break;
|
||||
}
|
||||
|
||||
// Because we've cosigned it, provide the TX for that
|
||||
{
|
||||
let mut txn = self.tributary_db.txn();
|
||||
CosignIntents::provide(&mut txn, self.set.set, &cosign);
|
||||
txn.commit();
|
||||
}
|
||||
provide_transaction(
|
||||
self.set.set,
|
||||
&self.tributary,
|
||||
Transaction::Cosigned { substrate_block_hash: cosign.block_hash },
|
||||
)
|
||||
.await;
|
||||
// Clear pending_notable_cosign since this cosign isn't pending
|
||||
pending_notable_cosign = false;
|
||||
|
||||
// Commit the txn to clear this from PendingCosigns
|
||||
txn.commit();
|
||||
made_progress = true;
|
||||
}
|
||||
|
||||
// If we don't have any notable cosigns pending, provide the next set of cosign intents
|
||||
if !pending_notable_cosign {
|
||||
let mut txn = self.db.txn();
|
||||
// intended_cosigns will only yield up to and including the next notable cosign
|
||||
for cosign in Cosigning::<CD>::intended_cosigns(&mut txn, self.set.set) {
|
||||
// Flag this cosign as pending
|
||||
PendingCosigns::send(&mut txn, self.set.set, &cosign);
|
||||
// Provide the transaction to queue it for work
|
||||
provide_transaction(
|
||||
self.set.set,
|
||||
&self.tributary,
|
||||
Transaction::Cosign { substrate_block_hash: cosign.block_hash },
|
||||
)
|
||||
.await;
|
||||
}
|
||||
txn.commit();
|
||||
made_progress = true;
|
||||
}
|
||||
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds all of the transactions sent via `TributaryTransactions`.
|
||||
pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||
db: CD,
|
||||
tributary_db: TD,
|
||||
tributary: Tributary<TD, Transaction, P>,
|
||||
set: ValidatorSet,
|
||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
}
|
||||
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> {
|
||||
type Error = DoesNotError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
let mut txn = self.db.txn();
|
||||
let Some(mut tx) = TributaryTransactions::try_recv(&mut txn, self.set) else { break };
|
||||
|
||||
let kind = tx.kind();
|
||||
match kind {
|
||||
TransactionKind::Provided(_) => provide_transaction(self.set, &self.tributary, tx).await,
|
||||
TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
|
||||
// If this is a signed transaction, sign it
|
||||
if matches!(kind, TransactionKind::Signed(_, _)) {
|
||||
tx.sign(&mut OsRng, self.tributary.genesis(), &self.key);
|
||||
}
|
||||
|
||||
// Actually add the transaction
|
||||
// TODO: If this is a preprocess, make sure the topic has been recognized
|
||||
let res = self.tributary.add_transaction(tx.clone()).await;
|
||||
match &res {
|
||||
// Fresh publication, already published
|
||||
Ok(true | false) => {}
|
||||
Err(
|
||||
TransactionError::TooLargeTransaction |
|
||||
TransactionError::InvalidSigner |
|
||||
TransactionError::InvalidNonce |
|
||||
TransactionError::InvalidSignature |
|
||||
TransactionError::InvalidContent,
|
||||
) => {
|
||||
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
|
||||
}
|
||||
// We've published too many transactions recently
|
||||
// Drop this txn to try to publish it again later on a future iteration
|
||||
Err(TransactionError::TooManyInMempool) => {
|
||||
drop(txn);
|
||||
break;
|
||||
}
|
||||
// This isn't a Provided transaction so this should never be hit
|
||||
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
made_progress = true;
|
||||
txn.commit();
|
||||
}
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Takes the messages from ScanTributaryTask and publishes them to the message-queue.
|
||||
pub(crate) struct TributaryProcessorMessagesTask<TD: DbTrait> {
|
||||
tributary_db: TD,
|
||||
set: ValidatorSet,
|
||||
message_queue: Arc<MessageQueue>,
|
||||
}
|
||||
impl<TD: DbTrait> ContinuallyRan for TributaryProcessorMessagesTask<TD> {
|
||||
type Error = String; // TODO
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
let mut txn = self.tributary_db.txn();
|
||||
let Some(msg) = ProcessorMessages::try_recv(&mut txn, self.set) else { break };
|
||||
let metadata = Metadata {
|
||||
from: Service::Coordinator,
|
||||
to: Service::Processor(self.set.network),
|
||||
intent: msg.intent(),
|
||||
};
|
||||
let msg = borsh::to_vec(&msg).unwrap();
|
||||
self.message_queue.queue(metadata, msg).await?;
|
||||
txn.commit();
|
||||
made_progress = true;
|
||||
}
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks for the notification to sign a slash report and does so if present.
|
||||
pub(crate) struct SignSlashReportTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||
db: CD,
|
||||
tributary_db: TD,
|
||||
tributary: Tributary<TD, Transaction, P>,
|
||||
set: NewSetInformation,
|
||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
}
|
||||
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for SignSlashReportTask<CD, TD, P> {
|
||||
type Error = DoesNotError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut txn = self.db.txn();
|
||||
let Some(()) = SignSlashReport::try_recv(&mut txn, self.set.set) else { return Ok(false) };
|
||||
|
||||
// Fetch the slash report for this Tributary
|
||||
let mut tx =
|
||||
serai_coordinator_tributary::slash_report_transaction(&self.tributary_db, &self.set);
|
||||
tx.sign(&mut OsRng, self.tributary.genesis(), &self.key);
|
||||
|
||||
let res = self.tributary.add_transaction(tx.clone()).await;
|
||||
match &res {
|
||||
// Fresh publication, already published
|
||||
Ok(true | false) => {}
|
||||
Err(
|
||||
TransactionError::TooLargeTransaction |
|
||||
TransactionError::InvalidSigner |
|
||||
TransactionError::InvalidNonce |
|
||||
TransactionError::InvalidSignature |
|
||||
TransactionError::InvalidContent,
|
||||
) => {
|
||||
panic!("created an invalid SlashReport transaction, tx: {tx:?}, err: {res:?}");
|
||||
}
|
||||
// We've published too many transactions recently
|
||||
// Drop this txn to try to publish it again later on a future iteration
|
||||
Err(TransactionError::TooManyInMempool) => {
|
||||
drop(txn);
|
||||
return Ok(false);
|
||||
}
|
||||
// This isn't a Provided transaction so this should never be hit
|
||||
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Run the scan task whenever the Tributary adds a new block.
|
||||
async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
|
||||
db: CD,
|
||||
set: ValidatorSet,
|
||||
tributary: Tributary<TD, Transaction, P>,
|
||||
scan_tributary_task: TaskHandle,
|
||||
tasks_to_keep_alive: Vec<TaskHandle>,
|
||||
) {
|
||||
loop {
|
||||
// Break once this Tributary is retired
|
||||
if crate::RetiredTributary::get(&db, set.network).map(|session| session.0) >=
|
||||
Some(set.session.0)
|
||||
{
|
||||
drop(tasks_to_keep_alive);
|
||||
break;
|
||||
}
|
||||
|
||||
// Have the tributary scanner run as soon as there's a new block
|
||||
match tributary.next_block_notification().await.await {
|
||||
Ok(()) => scan_tributary_task.run_now(),
|
||||
// unreachable since this owns the tributary object and doesn't drop it
|
||||
Err(_) => panic!("tributary was dropped causing notification to error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn a Tributary.
|
||||
///
|
||||
/// This will:
|
||||
/// - Spawn the Tributary
|
||||
/// - Inform the P2P network of the Tributary
|
||||
/// - Spawn the ScanTributaryTask
|
||||
/// - Spawn the ProvideCosignCosignedTransactionsTask
|
||||
/// - Spawn the TributaryProcessorMessagesTask
|
||||
/// - Spawn the SignSlashReportTask
|
||||
/// - Iterate the scan task whenever a new block occurs (not just on the standard interval)
|
||||
pub(crate) async fn spawn_tributary<P: P2p>(
|
||||
db: Db,
|
||||
message_queue: Arc<MessageQueue>,
|
||||
p2p: P,
|
||||
p2p_add_tributary: &mpsc::UnboundedSender<(ValidatorSet, Tributary<Db, Transaction, P>)>,
|
||||
set: NewSetInformation,
|
||||
serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
) {
|
||||
// Don't spawn retired Tributaries
|
||||
if crate::db::RetiredTributary::get(&db, set.set.network).map(|session| session.0) >=
|
||||
Some(set.set.session.0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
let genesis = <[u8; 32]>::from(Blake2s::<U32>::digest((set.serai_block, set.set).encode()));
|
||||
|
||||
// Since the Serai block will be finalized, then cosigned, before we handle this, this time will
|
||||
// be a couple of minutes stale. While the Tributary will still function with a start time in the
|
||||
// past, the Tributary will immediately incur round timeouts. We reduce these by adding a
|
||||
// constant delay of a couple of minutes.
|
||||
const TRIBUTARY_START_TIME_DELAY: u64 = 120;
|
||||
let start_time = set.declaration_time + TRIBUTARY_START_TIME_DELAY;
|
||||
|
||||
let mut tributary_validators = Vec::with_capacity(set.validators.len());
|
||||
for (validator, weight) in set.validators.iter().copied() {
|
||||
let validator_key = <Ristretto as Ciphersuite>::read_G(&mut validator.0.as_slice())
|
||||
.expect("Serai validator had an invalid public key");
|
||||
let weight = u64::from(weight);
|
||||
tributary_validators.push((validator_key, weight));
|
||||
}
|
||||
|
||||
// Spawn the Tributary
|
||||
let tributary_db = crate::db::tributary_db(set.set);
|
||||
let tributary = Tributary::new(
|
||||
tributary_db.clone(),
|
||||
genesis,
|
||||
start_time,
|
||||
serai_key.clone(),
|
||||
tributary_validators,
|
||||
p2p,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let reader = tributary.reader();
|
||||
|
||||
// Inform the P2P network
|
||||
p2p_add_tributary
|
||||
.send((set.set, tributary.clone()))
|
||||
.expect("p2p's add_tributary channel was closed?");
|
||||
|
||||
// Spawn the task to provide Cosign/Cosigned transactions onto the Tributary
|
||||
let (provide_cosign_cosigned_transactions_task_def, provide_cosign_cosigned_transactions_task) =
|
||||
Task::new();
|
||||
tokio::spawn(
|
||||
(ProvideCosignCosignedTransactionsTask {
|
||||
db: db.clone(),
|
||||
tributary_db: tributary_db.clone(),
|
||||
set: set.clone(),
|
||||
tributary: tributary.clone(),
|
||||
})
|
||||
.continually_run(provide_cosign_cosigned_transactions_task_def, vec![]),
|
||||
);
|
||||
|
||||
// Spawn the task to send all messages from the Tributary scanner to the message-queue
|
||||
let (scan_tributary_messages_task_def, scan_tributary_messages_task) = Task::new();
|
||||
tokio::spawn(
|
||||
(TributaryProcessorMessagesTask {
|
||||
tributary_db: tributary_db.clone(),
|
||||
set: set.set,
|
||||
message_queue,
|
||||
})
|
||||
.continually_run(scan_tributary_messages_task_def, vec![]),
|
||||
);
|
||||
|
||||
// Spawn the scan task
|
||||
let (scan_tributary_task_def, scan_tributary_task) = Task::new();
|
||||
tokio::spawn(
|
||||
ScanTributaryTask::<_, P>::new(tributary_db.clone(), &set, reader)
|
||||
// This is the only handle for this TributaryProcessorMessagesTask, so when this task is
|
||||
// dropped, it will be too
|
||||
.continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]),
|
||||
);
|
||||
|
||||
// Spawn the sign slash report task
|
||||
let (sign_slash_report_task_def, sign_slash_report_task) = Task::new();
|
||||
tokio::spawn(
|
||||
(SignSlashReportTask {
|
||||
db: db.clone(),
|
||||
tributary_db: tributary_db.clone(),
|
||||
tributary: tributary.clone(),
|
||||
set: set.clone(),
|
||||
key: serai_key.clone(),
|
||||
})
|
||||
.continually_run(sign_slash_report_task_def, vec![]),
|
||||
);
|
||||
|
||||
// Spawn the add transactions task
|
||||
let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new();
|
||||
tokio::spawn(
|
||||
(AddTributaryTransactionsTask {
|
||||
db: db.clone(),
|
||||
tributary_db,
|
||||
tributary: tributary.clone(),
|
||||
set: set.set,
|
||||
key: serai_key,
|
||||
})
|
||||
.continually_run(add_tributary_transactions_task_def, vec![]),
|
||||
);
|
||||
|
||||
// Whenever a new block occurs, immediately run the scan task
|
||||
// This function also preserves the ProvideCosignCosignedTransactionsTask handle until the
|
||||
// Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad
|
||||
// infinitum
|
||||
tokio::spawn(scan_on_new_block(
|
||||
db,
|
||||
set.set,
|
||||
tributary,
|
||||
scan_tributary_task,
|
||||
vec![
|
||||
provide_cosign_cosigned_transactions_task,
|
||||
sign_slash_report_task,
|
||||
add_tributary_transactions_task,
|
||||
],
|
||||
));
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
mod transaction;
|
||||
pub use transaction::Transaction;
|
||||
|
||||
mod db;
|
||||
|
||||
mod scan;
|
||||
@@ -1,203 +0,0 @@
|
||||
use core::future::Future;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use ciphersuite::group::GroupEncoding;
|
||||
|
||||
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::ValidatorSet};
|
||||
|
||||
use tributary::{
|
||||
Signed as TributarySigned, TransactionError, TransactionKind, TransactionTrait,
|
||||
Transaction as TributaryTransaction, Block, TributaryReader,
|
||||
tendermint::{
|
||||
tx::{TendermintTx, Evidence, decode_signed_message},
|
||||
TendermintNetwork,
|
||||
},
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::tributary::{
|
||||
db::*,
|
||||
transaction::{Signed, Transaction},
|
||||
};
|
||||
|
||||
struct ScanBlock<'a, D: DbTxn, TD: Db> {
|
||||
txn: &'a mut D,
|
||||
set: ValidatorSet,
|
||||
validators: &'a [SeraiAddress],
|
||||
total_weight: u64,
|
||||
validator_weights: &'a HashMap<SeraiAddress, u64>,
|
||||
tributary: &'a TributaryReader<TD, Transaction>,
|
||||
}
|
||||
impl<'a, D: DbTxn, TD: Db> ScanBlock<'a, D, TD> {
|
||||
fn handle_application_tx(&mut self, block_number: u64, tx: Transaction) {
|
||||
let signer = |signed: Signed| SeraiAddress(signed.signer.to_bytes());
|
||||
|
||||
if let TransactionKind::Signed(_, TributarySigned { signer, .. }) = tx.kind() {
|
||||
// Don't handle transactions from those fatally slashed
|
||||
// TODO: The fact they can publish these TXs makes this a notable spam vector
|
||||
if TributaryDb::is_fatally_slashed(self.txn, self.set, SeraiAddress(signer.to_bytes())) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
match tx {
|
||||
Transaction::RemoveParticipant { participant, signed } => {
|
||||
// Accumulate this vote and fatally slash the participant if past the threshold
|
||||
let signer = signer(signed);
|
||||
match TributaryDb::accumulate(
|
||||
self.txn,
|
||||
self.set,
|
||||
self.validators,
|
||||
self.total_weight,
|
||||
block_number,
|
||||
Topic::RemoveParticipant { participant },
|
||||
signer,
|
||||
self.validator_weights[&signer],
|
||||
&(),
|
||||
) {
|
||||
DataSet::None => {}
|
||||
DataSet::Participating(_) => {
|
||||
TributaryDb::fatal_slash(self.txn, self.set, participant, "voted to remove")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::DkgParticipation { participation, signed } => {
|
||||
// Send the participation to the processor
|
||||
todo!("TODO")
|
||||
}
|
||||
Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed } => {
|
||||
// Accumulate the preprocesses into our own FROST attempt manager
|
||||
todo!("TODO")
|
||||
}
|
||||
Transaction::DkgConfirmationShare { attempt, share, signed } => {
|
||||
// Accumulate the shares into our own FROST attempt manager
|
||||
todo!("TODO")
|
||||
}
|
||||
|
||||
Transaction::Cosign { substrate_block_hash } => {
|
||||
// Update the latest intended-to-be-cosigned Substrate block
|
||||
todo!("TODO")
|
||||
}
|
||||
Transaction::Cosigned { substrate_block_hash } => {
|
||||
// Start cosigning the latest intended-to-be-cosigned block
|
||||
todo!("TODO")
|
||||
}
|
||||
Transaction::SubstrateBlock { hash } => {
|
||||
// Whitelist all of the IDs this Substrate block causes to be signed
|
||||
todo!("TODO")
|
||||
}
|
||||
Transaction::Batch { hash } => {
|
||||
// Whitelist the signing of this batch, publishing our own preprocess
|
||||
todo!("TODO")
|
||||
}
|
||||
|
||||
Transaction::SlashReport { slash_points, signed } => {
|
||||
// Accumulate, and if past the threshold, calculate *the* slash report and start signing it
|
||||
todo!("TODO")
|
||||
}
|
||||
|
||||
Transaction::Sign { id, attempt, label, data, signed } => todo!("TODO"),
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_block(mut self, block_number: u64, block: Block<Transaction>) {
|
||||
TributaryDb::start_of_block(self.txn, self.set, block_number);
|
||||
|
||||
for tx in block.transactions {
|
||||
match tx {
|
||||
TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => {
|
||||
// Since the evidence is on the chain, it will have already been validated
|
||||
// We can just punish the signer
|
||||
let data = match ev {
|
||||
Evidence::ConflictingMessages(first, second) => (first, Some(second)),
|
||||
Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None),
|
||||
};
|
||||
/* TODO
|
||||
let msgs = (
|
||||
decode_signed_message::<TendermintNetwork<D, Transaction, P>>(&data.0).unwrap(),
|
||||
if data.1.is_some() {
|
||||
Some(
|
||||
decode_signed_message::<TendermintNetwork<D, Transaction, P>>(&data.1.unwrap())
|
||||
.unwrap(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
);
|
||||
|
||||
// Since anything with evidence is fundamentally faulty behavior, not just temporal
|
||||
// errors, mark the node as fatally slashed
|
||||
TributaryDb::fatal_slash(
|
||||
self.txn, msgs.0.msg.sender, &format!("invalid tendermint messages: {msgs:?}"));
|
||||
*/
|
||||
todo!("TODO")
|
||||
}
|
||||
TributaryTransaction::Application(tx) => {
|
||||
self.handle_application_tx(block_number, tx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ScanTributaryTask<D: Db, TD: Db> {
|
||||
db: D,
|
||||
set: ValidatorSet,
|
||||
validators: Vec<SeraiAddress>,
|
||||
total_weight: u64,
|
||||
validator_weights: HashMap<SeraiAddress, u64>,
|
||||
tributary: TributaryReader<TD, Transaction>,
|
||||
}
|
||||
impl<D: Db, TD: Db> ContinuallyRan for ScanTributaryTask<D, TD> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let (mut last_block_number, mut last_block_hash) =
|
||||
TributaryDb::last_handled_tributary_block(&self.db, self.set)
|
||||
.unwrap_or((0, self.tributary.genesis()));
|
||||
|
||||
let mut made_progess = false;
|
||||
while let Some(next) = self.tributary.block_after(&last_block_hash) {
|
||||
let block = self.tributary.block(&next).unwrap();
|
||||
let block_number = last_block_number + 1;
|
||||
let block_hash = block.hash();
|
||||
|
||||
// Make sure we have all of the provided transactions for this block
|
||||
for tx in &block.transactions {
|
||||
let TransactionKind::Provided(order) = tx.kind() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// make sure we have all the provided txs in this block locally
|
||||
if !self.tributary.locally_provided_txs_in_block(&block_hash, order) {
|
||||
return Err(format!(
|
||||
"didn't have the provided Transactions on-chain for set (ephemeral error): {:?}",
|
||||
self.set
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
(ScanBlock {
|
||||
txn: &mut txn,
|
||||
set: self.set,
|
||||
validators: &self.validators,
|
||||
total_weight: self.total_weight,
|
||||
validator_weights: &self.validator_weights,
|
||||
tributary: &self.tributary,
|
||||
})
|
||||
.handle_block(block_number, block);
|
||||
TributaryDb::set_last_handled_tributary_block(&mut txn, self.set, block_number, block_hash);
|
||||
last_block_number = block_number;
|
||||
last_block_hash = block_hash;
|
||||
txn.commit();
|
||||
|
||||
made_progess = true;
|
||||
}
|
||||
|
||||
Ok(made_progess)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,308 +0,0 @@
|
||||
use core::{ops::Deref, fmt::Debug};
|
||||
use std::io;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use blake2::{digest::typenum::U32, Digest, Blake2b};
|
||||
use ciphersuite::{
|
||||
group::{ff::Field, GroupEncoding},
|
||||
Ciphersuite, Ristretto,
|
||||
};
|
||||
use schnorr::SchnorrSignature;
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::primitives::SeraiAddress;
|
||||
|
||||
use processor_messages::sign::VariantSignId;
|
||||
|
||||
use tributary::{
|
||||
ReadWrite,
|
||||
transaction::{
|
||||
Signed as TributarySigned, TransactionError, TransactionKind, Transaction as TransactionTrait,
|
||||
},
|
||||
};
|
||||
|
||||
/// The label for data from a signing protocol.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||
pub enum SigningProtocolRound {
|
||||
/// A preprocess.
|
||||
Preprocess,
|
||||
/// A signature share.
|
||||
Share,
|
||||
}
|
||||
|
||||
impl SigningProtocolRound {
|
||||
fn nonce(&self) -> u32 {
|
||||
match self {
|
||||
SigningProtocolRound::Preprocess => 0,
|
||||
SigningProtocolRound::Share => 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// `tributary::Signed` without the nonce.
|
||||
///
|
||||
/// All of our nonces are deterministic to the type of transaction and fields within.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Signed {
|
||||
pub signer: <Ristretto as Ciphersuite>::G,
|
||||
pub signature: SchnorrSignature<Ristretto>,
|
||||
}
|
||||
|
||||
impl BorshSerialize for Signed {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> Result<(), io::Error> {
|
||||
writer.write_all(self.signer.to_bytes().as_ref())?;
|
||||
self.signature.write(writer)
|
||||
}
|
||||
}
|
||||
impl BorshDeserialize for Signed {
|
||||
fn deserialize_reader<R: io::Read>(reader: &mut R) -> Result<Self, io::Error> {
|
||||
let signer = Ristretto::read_G(reader)?;
|
||||
let signature = SchnorrSignature::read(reader)?;
|
||||
Ok(Self { signer, signature })
|
||||
}
|
||||
}
|
||||
|
||||
impl Signed {
|
||||
/// Provide a nonce to convert a `Signed` into a `tributary::Signed`.
|
||||
fn nonce(&self, nonce: u32) -> TributarySigned {
|
||||
TributarySigned { signer: self.signer, nonce, signature: self.signature }
|
||||
}
|
||||
}
|
||||
|
||||
/// The Tributary transaction definition used by Serai
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum Transaction {
|
||||
/// A vote to remove a participant for invalid behavior
|
||||
RemoveParticipant {
|
||||
/// The participant to remove
|
||||
participant: SeraiAddress,
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
|
||||
/// A participation in the DKG
|
||||
DkgParticipation {
|
||||
participation: Vec<u8>,
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
/// The preprocess to confirm the DKG results on-chain
|
||||
DkgConfirmationPreprocess {
|
||||
/// The attempt number of this signing protocol
|
||||
attempt: u32,
|
||||
// The preprocess
|
||||
preprocess: [u8; 64],
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
/// The signature share to confirm the DKG results on-chain
|
||||
DkgConfirmationShare {
|
||||
/// The attempt number of this signing protocol
|
||||
attempt: u32,
|
||||
// The signature share
|
||||
share: [u8; 32],
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
|
||||
/// Intend to co-sign a finalized Substrate block
|
||||
///
|
||||
/// When the time comes to start a new co-signing protocol, the most recent Substrate block will
|
||||
/// be the one selected to be cosigned.
|
||||
Cosign {
|
||||
/// The hash of the Substrate block to sign
|
||||
substrate_block_hash: [u8; 32],
|
||||
},
|
||||
|
||||
/// The cosign for a Substrate block
|
||||
///
|
||||
/// After producing this cosign, we need to start work on the latest intended-to-be cosigned
|
||||
/// block. That requires agreement on when this cosign was produced, which we solve by embedding
|
||||
/// this cosign on chain.
|
||||
///
|
||||
/// We ideally don't have this transaction at all. The coordinator, without access to any of the
|
||||
/// key shares, could observe the FROST signing session and determine a successful completion.
|
||||
/// Unfortunately, that functionality is not present in modular-frost, so we do need to support
|
||||
/// *some* asynchronous flow (where the processor or P2P network informs us of the successful
|
||||
/// completion).
|
||||
///
|
||||
/// If we use a `Provided` transaction, that requires everyone observe this cosign.
|
||||
///
|
||||
/// If we use an `Unsigned` transaction, we can't verify the cosign signature inside
|
||||
/// `Transaction::verify` unless we embedded the full `SignedCosign` on-chain. The issue is since
|
||||
/// a Tributary is stateless with regards to the on-chain logic, including `Transaction::verify`,
|
||||
/// we can't verify the signature against the group's public key unless we also include that (but
|
||||
/// then we open a DoS where arbitrary group keys are specified to cause inclusion of arbitrary
|
||||
/// blobs on chain).
|
||||
///
|
||||
/// If we use a `Signed` transaction, we mitigate the DoS risk by having someone to fatally
|
||||
/// slash. We have horrible performance though as for 100 validators, all 100 will publish this
|
||||
/// transaction.
|
||||
///
|
||||
/// We could use a signed `Unsigned` transaction, where it includes a signer and signature but
|
||||
/// isn't technically a Signed transaction. This lets us de-duplicate the transaction premised on
|
||||
/// its contents.
|
||||
///
|
||||
/// The optimal choice is likely to use a `Provided` transaction. We don't actually need to
|
||||
/// observe the produced cosign (which is ephemeral). As long as it's agreed the cosign in
|
||||
/// question no longer needs to produced, which would mean the cosigning protocol at-large
|
||||
/// cosigning the block in question, it'd be safe to provide this and move on to the next cosign.
|
||||
Cosigned { substrate_block_hash: [u8; 32] },
|
||||
|
||||
/// Acknowledge a Substrate block
|
||||
///
|
||||
/// This is provided after the block has been cosigned.
|
||||
///
|
||||
/// With the acknowledgement of a Substrate block, we can whitelist all the `VariantSignId`s
|
||||
/// resulting from its handling.
|
||||
SubstrateBlock {
|
||||
/// The hash of the Substrate block
|
||||
hash: [u8; 32],
|
||||
},
|
||||
|
||||
/// Acknowledge a Batch
|
||||
///
|
||||
/// Once everyone has acknowledged the Batch, we can begin signing it.
|
||||
Batch {
|
||||
/// The hash of the Batch's serialization.
|
||||
///
|
||||
/// Generally, we refer to a Batch by its ID/the hash of its instructions. Here, we want to
|
||||
/// ensure consensus on the Batch, and achieving consensus on its hash is the most effective
|
||||
/// way to do that.
|
||||
hash: [u8; 32],
|
||||
},
|
||||
|
||||
/// Data from a signing protocol.
|
||||
Sign {
|
||||
/// The ID of the object being signed
|
||||
id: VariantSignId,
|
||||
/// The attempt number of this signing protocol
|
||||
attempt: u32,
|
||||
/// The label for this data within the signing protocol
|
||||
label: SigningProtocolRound,
|
||||
/// The data itself
|
||||
///
|
||||
/// There will be `n` blobs of data where `n` is the amount of key shares the validator sending
|
||||
/// this transaction has.
|
||||
data: Vec<Vec<u8>>,
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
|
||||
/// The local view of slashes observed by the transaction's sender
|
||||
SlashReport {
|
||||
/// The slash points accrued by each validator
|
||||
slash_points: Vec<u32>,
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
}
|
||||
|
||||
impl ReadWrite for Transaction {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
borsh::from_reader(reader)
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
borsh::to_writer(writer, self)
|
||||
}
|
||||
}
|
||||
|
||||
impl TransactionTrait for Transaction {
|
||||
fn kind(&self) -> TransactionKind {
|
||||
match self {
|
||||
Transaction::RemoveParticipant { participant, signed } => {
|
||||
TransactionKind::Signed((b"RemoveParticipant", participant).encode(), signed.nonce(0))
|
||||
}
|
||||
|
||||
Transaction::DkgParticipation { signed, .. } => {
|
||||
TransactionKind::Signed(b"DkgParticipation".encode(), signed.nonce(0))
|
||||
}
|
||||
Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => {
|
||||
TransactionKind::Signed((b"DkgConfirmation", attempt).encode(), signed.nonce(0))
|
||||
}
|
||||
Transaction::DkgConfirmationShare { attempt, signed, .. } => {
|
||||
TransactionKind::Signed((b"DkgConfirmation", attempt).encode(), signed.nonce(1))
|
||||
}
|
||||
|
||||
Transaction::Cosign { .. } => TransactionKind::Provided("CosignSubstrateBlock"),
|
||||
Transaction::Cosigned { .. } => TransactionKind::Provided("Cosigned"),
|
||||
Transaction::SubstrateBlock { .. } => TransactionKind::Provided("SubstrateBlock"),
|
||||
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
||||
|
||||
Transaction::Sign { id, attempt, label, signed, .. } => {
|
||||
TransactionKind::Signed((b"Sign", id, attempt).encode(), signed.nonce(label.nonce()))
|
||||
}
|
||||
|
||||
Transaction::SlashReport { signed, .. } => {
|
||||
TransactionKind::Signed(b"SlashReport".encode(), signed.nonce(0))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn hash(&self) -> [u8; 32] {
|
||||
let mut tx = ReadWrite::serialize(self);
|
||||
if let TransactionKind::Signed(_, signed) = self.kind() {
|
||||
// Make sure the part we're cutting off is the signature
|
||||
assert_eq!(tx.drain((tx.len() - 64) ..).collect::<Vec<_>>(), signed.signature.serialize());
|
||||
}
|
||||
Blake2b::<U32>::digest(&tx).into()
|
||||
}
|
||||
|
||||
// We don't have any verification logic embedded into the transaction. We just slash anyone who
|
||||
// publishes an invalid transaction.
|
||||
fn verify(&self) -> Result<(), TransactionError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Transaction {
|
||||
// Sign a transaction
|
||||
//
|
||||
// Panics if signing a transaction type which isn't `TransactionKind::Signed`
|
||||
pub fn sign<R: RngCore + CryptoRng>(
|
||||
&mut self,
|
||||
rng: &mut R,
|
||||
genesis: [u8; 32],
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
) {
|
||||
fn signed(tx: &mut Transaction) -> &mut Signed {
|
||||
#[allow(clippy::match_same_arms)] // This doesn't make semantic sense here
|
||||
match tx {
|
||||
Transaction::RemoveParticipant { ref mut signed, .. } |
|
||||
Transaction::DkgParticipation { ref mut signed, .. } |
|
||||
Transaction::DkgConfirmationPreprocess { ref mut signed, .. } => signed,
|
||||
Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
|
||||
|
||||
Transaction::Cosign { .. } => panic!("signing CosignSubstrateBlock"),
|
||||
Transaction::Cosigned { .. } => panic!("signing Cosigned"),
|
||||
Transaction::SubstrateBlock { .. } => panic!("signing SubstrateBlock"),
|
||||
Transaction::Batch { .. } => panic!("signing Batch"),
|
||||
|
||||
Transaction::Sign { ref mut signed, .. } => signed,
|
||||
|
||||
Transaction::SlashReport { ref mut signed, .. } => signed,
|
||||
}
|
||||
}
|
||||
|
||||
// Decide the nonce to sign with
|
||||
let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));
|
||||
|
||||
{
|
||||
// Set the signer and the nonce
|
||||
let signed = signed(self);
|
||||
signed.signer = Ristretto::generator() * key.deref();
|
||||
signed.signature.R = <Ristretto as Ciphersuite>::generator() * sig_nonce.deref();
|
||||
}
|
||||
|
||||
// Get the signature hash (which now includes `R || A` making it valid as the challenge)
|
||||
let sig_hash = self.sig_hash(genesis);
|
||||
|
||||
// Sign the signature
|
||||
signed(self).signature = SchnorrSignature::<Ristretto>::sign(key, sig_nonce, sig_hash);
|
||||
}
|
||||
}
|
||||
@@ -18,7 +18,9 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
bitvec = { version = "1", default-features = false, features = ["std"] }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Serai Coordinate Substrate Scanner
|
||||
# Serai Coordinator Substrate
|
||||
|
||||
This is the scanner of the Serai blockchain for the purposes of Serai's coordinator.
|
||||
This crate manages the Serai coordinators's interactions with Serai's Substrate blockchain.
|
||||
|
||||
Two event streams are defined:
|
||||
|
||||
@@ -12,3 +12,9 @@ Two event streams are defined:
|
||||
The canonical event stream is available without provision of a validator's public key. The ephemeral
|
||||
event stream requires provision of a validator's public key. Both are ordered within themselves, yet
|
||||
there are no ordering guarantees across the two.
|
||||
|
||||
Additionally, a collection of tasks are defined to publish data onto Serai:
|
||||
|
||||
- `SetKeysTask`, which sets the keys generated via DKGs onto Serai.
|
||||
- `PublishBatchTask`, which publishes `Batch`s onto Serai.
|
||||
- `PublishSlashReportTask`, which publishes `SlashReport`s onto Serai.
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use std::future::Future;
|
||||
use core::future::Future;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::{StreamExt, FuturesOrdered};
|
||||
|
||||
@@ -20,20 +21,22 @@ create_db!(
|
||||
/// The event stream for canonical events.
|
||||
pub struct CanonicalEventStream<D: Db> {
|
||||
db: D,
|
||||
serai: Serai,
|
||||
serai: Arc<Serai>,
|
||||
}
|
||||
|
||||
impl<D: Db> CanonicalEventStream<D> {
|
||||
/// Create a new canonical event stream.
|
||||
///
|
||||
/// Only one of these may exist over the provided database.
|
||||
pub fn new(db: D, serai: Serai) -> Self {
|
||||
pub fn new(db: D, serai: Arc<Serai>) -> Self {
|
||||
Self { db, serai }
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let next_block = NextBlock::get(&self.db).unwrap_or(0);
|
||||
let latest_finalized_block =
|
||||
@@ -107,6 +110,9 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
||||
|
||||
// Sync the next set of upcoming blocks all at once to minimize latency
|
||||
const BLOCKS_TO_SYNC_AT_ONCE: u64 = 10;
|
||||
// FuturesOrdered can be bad practice due to potentially causing tiemouts if it isn't
|
||||
// sufficiently polled. Considering our processing loop is minimal and it does poll this,
|
||||
// it's fine.
|
||||
let mut set = FuturesOrdered::new();
|
||||
for block_number in
|
||||
next_block ..= latest_finalized_block.min(next_block + BLOCKS_TO_SYNC_AT_ONCE)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use std::future::Future;
|
||||
use core::future::Future;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::stream::{StreamExt, FuturesOrdered};
|
||||
|
||||
@@ -24,7 +25,7 @@ create_db!(
|
||||
/// The event stream for ephemeral events.
|
||||
pub struct EphemeralEventStream<D: Db> {
|
||||
db: D,
|
||||
serai: Serai,
|
||||
serai: Arc<Serai>,
|
||||
validator: PublicKey,
|
||||
}
|
||||
|
||||
@@ -32,13 +33,15 @@ impl<D: Db> EphemeralEventStream<D> {
|
||||
/// Create a new ephemeral event stream.
|
||||
///
|
||||
/// Only one of these may exist over the provided database.
|
||||
pub fn new(db: D, serai: Serai, validator: PublicKey) -> Self {
|
||||
pub fn new(db: D, serai: Arc<Serai>, validator: PublicKey) -> Self {
|
||||
Self { db, serai, validator }
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let next_block = NextBlock::get(&self.db).unwrap_or(0);
|
||||
let latest_finalized_block =
|
||||
@@ -100,6 +103,11 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
||||
|
||||
// Sync the next set of upcoming blocks all at once to minimize latency
|
||||
const BLOCKS_TO_SYNC_AT_ONCE: u64 = 50;
|
||||
// FuturesOrdered can be bad practice due to potentially causing tiemouts if it isn't
|
||||
// sufficiently polled. Our processing loop isn't minimal, itself making multiple requests,
|
||||
// but the loop body should only be executed a few times a week. It's better to get through
|
||||
// most blocks with this optimization, and have timeouts a few times a week, than not have
|
||||
// this at all.
|
||||
let mut set = FuturesOrdered::new();
|
||||
for block_number in
|
||||
next_block ..= latest_finalized_block.min(next_block + BLOCKS_TO_SYNC_AT_ONCE)
|
||||
@@ -151,8 +159,9 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
||||
Err("validator's weight exceeded u16::MAX".to_string())?
|
||||
};
|
||||
|
||||
// Do the summation in u32 so we don't risk a u16 overflow
|
||||
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
|
||||
if total_weight > MAX_KEY_SHARES_PER_SET {
|
||||
if total_weight > u32::from(MAX_KEY_SHARES_PER_SET) {
|
||||
Err(format!(
|
||||
"{set:?} has {total_weight} key shares when the max is {MAX_KEY_SHARES_PER_SET}"
|
||||
))?;
|
||||
@@ -211,7 +220,7 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
||||
&NewSetInformation {
|
||||
set: *set,
|
||||
serai_block: block.block_hash,
|
||||
start_time: block.time,
|
||||
declaration_time: block.time,
|
||||
// TODO: Why do we have this as an explicit field here?
|
||||
// Shouldn't thiis be inlined into the Processor's key gen code, where it's used?
|
||||
threshold: ((total_weight * 2) / 3) + 1,
|
||||
@@ -228,7 +237,7 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
||||
else {
|
||||
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
||||
};
|
||||
crate::SignSlashReport::send(&mut txn, set);
|
||||
crate::SignSlashReport::send(&mut txn, *set);
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
|
||||
@@ -6,14 +6,25 @@ use scale::{Encode, Decode};
|
||||
use borsh::{io, BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{PublicKey, NetworkId},
|
||||
validator_sets::primitives::ValidatorSet,
|
||||
primitives::{NetworkId, PublicKey, Signature, SeraiAddress},
|
||||
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
||||
in_instructions::primitives::SignedBatch,
|
||||
Transaction,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
|
||||
mod canonical;
|
||||
pub use canonical::CanonicalEventStream;
|
||||
mod ephemeral;
|
||||
pub use ephemeral::EphemeralEventStream;
|
||||
|
||||
mod set_keys;
|
||||
pub use set_keys::SetKeysTask;
|
||||
mod publish_batch;
|
||||
pub use publish_batch::PublishBatchTask;
|
||||
mod publish_slash_report;
|
||||
pub use publish_slash_report::PublishSlashReportTask;
|
||||
|
||||
fn borsh_serialize_validators<W: io::Write>(
|
||||
validators: &Vec<(PublicKey, u16)>,
|
||||
@@ -30,26 +41,28 @@ fn borsh_deserialize_validators<R: io::Read>(
|
||||
}
|
||||
|
||||
/// The information for a new set.
|
||||
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct NewSetInformation {
|
||||
set: ValidatorSet,
|
||||
serai_block: [u8; 32],
|
||||
start_time: u64,
|
||||
threshold: u16,
|
||||
/// The set.
|
||||
pub set: ValidatorSet,
|
||||
/// The Serai block which declared it.
|
||||
pub serai_block: [u8; 32],
|
||||
/// The time of the block which declared it, in seconds.
|
||||
pub declaration_time: u64,
|
||||
/// The threshold to use.
|
||||
pub threshold: u16,
|
||||
/// The validators, with the amount of key shares they have.
|
||||
#[borsh(
|
||||
serialize_with = "borsh_serialize_validators",
|
||||
deserialize_with = "borsh_deserialize_validators"
|
||||
)]
|
||||
validators: Vec<(PublicKey, u16)>,
|
||||
evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||
pub validators: Vec<(PublicKey, u16)>,
|
||||
/// The eVRF public keys.
|
||||
pub evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||
}
|
||||
|
||||
mod _public_db {
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
||||
|
||||
use serai_db::*;
|
||||
|
||||
use crate::NewSetInformation;
|
||||
use super::*;
|
||||
|
||||
db_channel!(
|
||||
CoordinatorSubstrate {
|
||||
@@ -58,8 +71,20 @@ mod _public_db {
|
||||
|
||||
// Relevant new set, from an ephemeral event stream
|
||||
NewSet: () -> NewSetInformation,
|
||||
// Relevant sign slash report, from an ephemeral event stream
|
||||
SignSlashReport: () -> ValidatorSet,
|
||||
// Potentially relevant sign slash report, from an ephemeral event stream
|
||||
SignSlashReport: (set: ValidatorSet) -> (),
|
||||
|
||||
// Signed batches to publish onto the Serai network
|
||||
SignedBatches: (network: NetworkId) -> SignedBatch,
|
||||
}
|
||||
);
|
||||
|
||||
create_db!(
|
||||
CoordinatorSubstrate {
|
||||
// Keys to set on the Serai network
|
||||
Keys: (network: NetworkId) -> (Session, Vec<u8>),
|
||||
// Slash reports to publish onto the Serai network
|
||||
SlashReports: (network: NetworkId) -> (Session, Vec<u8>),
|
||||
}
|
||||
);
|
||||
}
|
||||
@@ -101,12 +126,103 @@ impl NewSet {
|
||||
/// notifications for all relevant validator sets will be included.
|
||||
pub struct SignSlashReport;
|
||||
impl SignSlashReport {
|
||||
pub(crate) fn send(txn: &mut impl DbTxn, set: &ValidatorSet) {
|
||||
_public_db::SignSlashReport::send(txn, set);
|
||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet) {
|
||||
_public_db::SignSlashReport::send(txn, set, &());
|
||||
}
|
||||
/// Try to receive a notification to sign a slash report, returning `None` if there is none to
|
||||
/// receive.
|
||||
pub fn try_recv(txn: &mut impl DbTxn) -> Option<ValidatorSet> {
|
||||
_public_db::SignSlashReport::try_recv(txn)
|
||||
pub fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<()> {
|
||||
_public_db::SignSlashReport::try_recv(txn, set)
|
||||
}
|
||||
}
|
||||
|
||||
/// The keys to set on Serai.
|
||||
pub struct Keys;
|
||||
impl Keys {
|
||||
/// Set the keys to report for a validator set.
|
||||
///
|
||||
/// This only saves the most recent keys as only a single session is eligible to have its keys
|
||||
/// reported at once.
|
||||
pub fn set(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
key_pair: KeyPair,
|
||||
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||
signature: Signature,
|
||||
) {
|
||||
// If we have a more recent pair of keys, don't write this historic one
|
||||
if let Some((existing_session, _)) = _public_db::Keys::get(txn, set.network) {
|
||||
if existing_session.0 >= set.session.0 {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let tx = serai_client::validator_sets::SeraiValidatorSets::set_keys(
|
||||
set.network,
|
||||
key_pair,
|
||||
signature_participants,
|
||||
signature,
|
||||
);
|
||||
_public_db::Keys::set(txn, set.network, &(set.session, tx.encode()));
|
||||
}
|
||||
pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> {
|
||||
let (session, tx) = _public_db::Keys::take(txn, network)?;
|
||||
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
||||
}
|
||||
}
|
||||
|
||||
/// The signed batches to publish onto Serai.
|
||||
pub struct SignedBatches;
|
||||
impl SignedBatches {
|
||||
/// Send a `SignedBatch` to publish onto Serai.
|
||||
///
|
||||
/// These will be published sequentially. Out-of-order sending risks hanging the task.
|
||||
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
|
||||
_public_db::SignedBatches::send(txn, batch.batch.network, batch);
|
||||
}
|
||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, network: NetworkId) -> Option<SignedBatch> {
|
||||
_public_db::SignedBatches::try_recv(txn, network)
|
||||
}
|
||||
}
|
||||
|
||||
/// The slash report was invalid.
|
||||
#[derive(Debug)]
|
||||
pub struct InvalidSlashReport;
|
||||
|
||||
/// The slash reports to publish onto Serai.
|
||||
pub struct SlashReports;
|
||||
impl SlashReports {
|
||||
/// Set the slashes to report for a validator set.
|
||||
///
|
||||
/// This only saves the most recent slashes as only a single session is eligible to have its
|
||||
/// slashes reported at once.
|
||||
///
|
||||
/// Returns Err if the slashes are invalid. Returns Ok if the slashes weren't detected as
|
||||
/// invalid. Slashes may be considered invalid by the Serai blockchain later even if not detected
|
||||
/// as invalid here.
|
||||
pub fn set(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
slashes: Vec<(SeraiAddress, u32)>,
|
||||
signature: Signature,
|
||||
) -> Result<(), InvalidSlashReport> {
|
||||
// If we have a more recent slash report, don't write this historic one
|
||||
if let Some((existing_session, _)) = _public_db::SlashReports::get(txn, set.network) {
|
||||
if existing_session.0 >= set.session.0 {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
|
||||
set.network,
|
||||
slashes.try_into().map_err(|_| InvalidSlashReport)?,
|
||||
signature,
|
||||
);
|
||||
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
|
||||
Ok(())
|
||||
}
|
||||
pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> {
|
||||
let (session, tx) = _public_db::SlashReports::take(txn, network)?;
|
||||
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
||||
}
|
||||
}
|
||||
|
||||
66
coordinator/substrate/src/publish_batch.rs
Normal file
66
coordinator/substrate/src/publish_batch.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use core::future::Future;
|
||||
use std::sync::Arc;
|
||||
|
||||
use serai_db::{DbTxn, Db};
|
||||
|
||||
use serai_client::{primitives::NetworkId, SeraiError, Serai};
|
||||
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::SignedBatches;
|
||||
|
||||
/// Publish `SignedBatch`s from `SignedBatches` onto Serai.
|
||||
pub struct PublishBatchTask<D: Db> {
|
||||
db: D,
|
||||
serai: Arc<Serai>,
|
||||
network: NetworkId,
|
||||
}
|
||||
|
||||
impl<D: Db> PublishBatchTask<D> {
|
||||
/// Create a task to publish `SignedBatch`s onto Serai.
|
||||
///
|
||||
/// Returns None if `network == NetworkId::Serai`.
|
||||
// TODO: ExternalNetworkId
|
||||
pub fn new(db: D, serai: Arc<Serai>, network: NetworkId) -> Option<Self> {
|
||||
if network == NetworkId::Serai {
|
||||
None?
|
||||
};
|
||||
Some(Self { db, serai, network })
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
||||
type Error = SeraiError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
|
||||
loop {
|
||||
let mut txn = self.db.txn();
|
||||
let Some(batch) = SignedBatches::try_recv(&mut txn, self.network) else {
|
||||
// No batch to publish at this time
|
||||
break;
|
||||
};
|
||||
|
||||
// Publish this Batch if it hasn't already been published
|
||||
let serai = self.serai.as_of_latest_finalized_block().await?;
|
||||
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
|
||||
if last_batch < Some(batch.batch.id) {
|
||||
// This stream of Batches *should* be sequential within the larger context of the Serai
|
||||
// coordinator. In this library, we use a more relaxed definition and don't assert
|
||||
// sequence. This does risk hanging the task, if Batch #n+1 is sent before Batch #n, but
|
||||
// that is a documented fault of the `SignedBatches` API.
|
||||
self
|
||||
.serai
|
||||
.publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
|
||||
.await?;
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
made_progress = true;
|
||||
}
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
89
coordinator/substrate/src/publish_slash_report.rs
Normal file
89
coordinator/substrate/src/publish_slash_report.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use core::future::Future;
|
||||
use std::sync::Arc;
|
||||
|
||||
use serai_db::{DbTxn, Db};
|
||||
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, Serai};
|
||||
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::SlashReports;
|
||||
|
||||
/// Publish slash reports from `SlashReports` onto Serai.
|
||||
pub struct PublishSlashReportTask<D: Db> {
|
||||
db: D,
|
||||
serai: Arc<Serai>,
|
||||
}
|
||||
|
||||
impl<D: Db> PublishSlashReportTask<D> {
|
||||
/// Create a task to publish slash reports onto Serai.
|
||||
pub fn new(db: D, serai: Arc<Serai>) -> Self {
|
||||
Self { db, serai }
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if network == NetworkId::Serai {
|
||||
continue;
|
||||
};
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else {
|
||||
// No slash report to publish
|
||||
continue;
|
||||
};
|
||||
|
||||
let serai =
|
||||
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||
let serai = serai.validator_sets();
|
||||
let session_after_slash_report = Session(session.0 + 1);
|
||||
let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
|
||||
let current_session = current_session.map(|session| session.0);
|
||||
// Only attempt to publish the slash report for session #n while session #n+1 is still
|
||||
// active
|
||||
let session_after_slash_report_retired =
|
||||
current_session > Some(session_after_slash_report.0);
|
||||
if session_after_slash_report_retired {
|
||||
// Commit the txn to drain this slash report from the database and not try it again later
|
||||
txn.commit();
|
||||
continue;
|
||||
}
|
||||
|
||||
if Some(session_after_slash_report.0) != current_session {
|
||||
// We already checked the current session wasn't greater, and they're not equal
|
||||
assert!(current_session < Some(session_after_slash_report.0));
|
||||
// This would mean the Serai node is resyncing and is behind where it prior was
|
||||
Err("have a slash report for a session Serai has yet to retire".to_string())?;
|
||||
}
|
||||
|
||||
// If this session which should publish a slash report already has, move on
|
||||
let key_pending_slash_report =
|
||||
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
|
||||
if key_pending_slash_report.is_none() {
|
||||
txn.commit();
|
||||
continue;
|
||||
};
|
||||
|
||||
match self.serai.publish(&slash_report).await {
|
||||
Ok(()) => {
|
||||
txn.commit();
|
||||
made_progress = true;
|
||||
}
|
||||
// This could be specific to this TX (such as an already in mempool error) and it may be
|
||||
// worthwhile to continue iteration with the other pending slash reports. We assume this
|
||||
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
|
||||
// miniscule compared to the window available to publish the slash report. That makes
|
||||
// this a non-issue.
|
||||
Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}"))?,
|
||||
}
|
||||
}
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
88
coordinator/substrate/src/set_keys.rs
Normal file
88
coordinator/substrate/src/set_keys.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
use core::future::Future;
|
||||
use std::sync::Arc;
|
||||
|
||||
use serai_db::{DbTxn, Db};
|
||||
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
|
||||
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::Keys;
|
||||
|
||||
/// Set keys from `Keys` on Serai.
|
||||
pub struct SetKeysTask<D: Db> {
|
||||
db: D,
|
||||
serai: Arc<Serai>,
|
||||
}
|
||||
|
||||
impl<D: Db> SetKeysTask<D> {
|
||||
/// Create a task to publish slash reports onto Serai.
|
||||
pub fn new(db: D, serai: Arc<Serai>) -> Self {
|
||||
Self { db, serai }
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if network == NetworkId::Serai {
|
||||
continue;
|
||||
};
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
let Some((session, keys)) = Keys::take(&mut txn, network) else {
|
||||
// No keys to set
|
||||
continue;
|
||||
};
|
||||
|
||||
let serai =
|
||||
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||
let serai = serai.validator_sets();
|
||||
let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
|
||||
let current_session = current_session.map(|session| session.0);
|
||||
// Only attempt to set these keys if this isn't a retired session
|
||||
if Some(session.0) < current_session {
|
||||
// Commit the txn to take these keys from the database and not try it again later
|
||||
txn.commit();
|
||||
continue;
|
||||
}
|
||||
|
||||
if Some(session.0) != current_session {
|
||||
// We already checked the current session wasn't greater, and they're not equal
|
||||
assert!(current_session < Some(session.0));
|
||||
// This would mean the Serai node is resyncing and is behind where it prior was
|
||||
Err("have a keys for a session Serai has yet to start".to_string())?;
|
||||
}
|
||||
|
||||
// If this session already has had its keys set, move on
|
||||
if serai
|
||||
.keys(ValidatorSet { network, session })
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
.is_some()
|
||||
{
|
||||
txn.commit();
|
||||
continue;
|
||||
};
|
||||
|
||||
match self.serai.publish(&keys).await {
|
||||
Ok(()) => {
|
||||
txn.commit();
|
||||
made_progress = true;
|
||||
}
|
||||
// This could be specific to this TX (such as an already in mempool error) and it may be
|
||||
// worthwhile to continue iteration with the other pending slash reports. We assume this
|
||||
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
|
||||
// miniscule compared to the window reasonable to set the keys. That makes this a
|
||||
// non-issue.
|
||||
Err(e) => Err(format!("couldn't publish set keys transaction: {e:?}"))?,
|
||||
}
|
||||
}
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
49
coordinator/tributary-sdk/Cargo.toml
Normal file
49
coordinator/tributary-sdk/Cargo.toml
Normal file
@@ -0,0 +1,49 @@
|
||||
[package]
|
||||
name = "tributary-sdk"
|
||||
version = "0.1.0"
|
||||
description = "A micro-blockchain to provide consensus and ordering to P2P communication"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary-sdk"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||
|
||||
subtle = { version = "^2", default-features = false, features = ["std"] }
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||
|
||||
rand = { version = "0.8", default-features = false, features = ["std"] }
|
||||
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
||||
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["std", "recommended"] }
|
||||
|
||||
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", version = "0.4", default-features = false, features = ["std", "ristretto"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", version = "0.5", default-features = false, features = ["std"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
serai-db = { path = "../../common/db", version = "0.1" }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] }
|
||||
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
||||
tendermint = { package = "tendermint-machine", path = "./tendermint", version = "0.2" }
|
||||
|
||||
tokio = { version = "1", default-features = false, features = ["sync", "time", "rt"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1", features = ["macros"] }
|
||||
|
||||
[features]
|
||||
tests = []
|
||||
15
coordinator/tributary-sdk/LICENSE
Normal file
15
coordinator/tributary-sdk/LICENSE
Normal file
@@ -0,0 +1,15 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2023 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
3
coordinator/tributary-sdk/README.md
Normal file
3
coordinator/tributary-sdk/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Tributary
|
||||
|
||||
A verifiable, ordered broadcast layer implemented as a BFT micro-blockchain.
|
||||
388
coordinator/tributary-sdk/src/lib.rs
Normal file
388
coordinator/tributary-sdk/src/lib.rs
Normal file
@@ -0,0 +1,388 @@
|
||||
use core::{marker::PhantomData, fmt::Debug, future::Future};
|
||||
use std::{sync::Arc, io};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
|
||||
use scale::Decode;
|
||||
use futures_channel::mpsc::UnboundedReceiver;
|
||||
use futures_util::{StreamExt, SinkExt};
|
||||
use ::tendermint::{
|
||||
ext::{BlockNumber, Commit, Block as BlockTrait, Network},
|
||||
SignedMessageFor, SyncedBlock, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,
|
||||
TendermintMachine, TendermintHandle,
|
||||
};
|
||||
|
||||
pub use ::tendermint::Evidence;
|
||||
|
||||
use serai_db::Db;
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
mod merkle;
|
||||
pub(crate) use merkle::*;
|
||||
|
||||
pub mod transaction;
|
||||
pub use transaction::{TransactionError, Signed, TransactionKind, Transaction as TransactionTrait};
|
||||
|
||||
use crate::tendermint::tx::TendermintTx;
|
||||
|
||||
mod provided;
|
||||
pub(crate) use provided::*;
|
||||
pub use provided::ProvidedError;
|
||||
|
||||
mod block;
|
||||
pub use block::*;
|
||||
|
||||
mod blockchain;
|
||||
pub(crate) use blockchain::*;
|
||||
|
||||
mod mempool;
|
||||
pub(crate) use mempool::*;
|
||||
|
||||
pub mod tendermint;
|
||||
pub(crate) use crate::tendermint::*;
|
||||
|
||||
#[cfg(any(test, feature = "tests"))]
|
||||
pub mod tests;
|
||||
|
||||
/// Size limit for an individual transaction.
|
||||
// This needs to be big enough to participate in a 101-of-150 eVRF DKG with each element taking
|
||||
// `MAX_KEY_LEN`. This also needs to be big enough to pariticpate in signing 520 Bitcoin inputs
|
||||
// with 49 key shares, and signing 120 Monero inputs with 49 key shares.
|
||||
// TODO: Add a test for these properties
|
||||
pub const TRANSACTION_SIZE_LIMIT: usize = 2_000_000;
|
||||
/// Amount of transactions a single account may have in the mempool.
|
||||
pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
||||
/// Block size limit.
|
||||
// This targets a growth limit of roughly 30 GB a day, under load, in order to prevent a malicious
|
||||
// participant from flooding disks and causing out of space errors in order processes.
|
||||
pub const BLOCK_SIZE_LIMIT: usize = 2_001_000;
|
||||
|
||||
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
||||
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum Transaction<T: TransactionTrait> {
|
||||
Tendermint(TendermintTx),
|
||||
Application(T),
|
||||
}
|
||||
|
||||
impl<T: TransactionTrait> ReadWrite for Transaction<T> {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0];
|
||||
reader.read_exact(&mut kind)?;
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let tx = TendermintTx::read(reader)?;
|
||||
Ok(Transaction::Tendermint(tx))
|
||||
}
|
||||
1 => {
|
||||
let tx = T::read(reader)?;
|
||||
Ok(Transaction::Application(tx))
|
||||
}
|
||||
_ => Err(io::Error::other("invalid transaction type")),
|
||||
}
|
||||
}
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
Transaction::Tendermint(tx) => {
|
||||
writer.write_all(&[0])?;
|
||||
tx.write(writer)
|
||||
}
|
||||
Transaction::Application(tx) => {
|
||||
writer.write_all(&[1])?;
|
||||
tx.write(writer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TransactionTrait> Transaction<T> {
|
||||
pub fn hash(&self) -> [u8; 32] {
|
||||
match self {
|
||||
Transaction::Tendermint(tx) => tx.hash(),
|
||||
Transaction::Application(tx) => tx.hash(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn kind(&self) -> TransactionKind {
|
||||
match self {
|
||||
Transaction::Tendermint(tx) => tx.kind(),
|
||||
Transaction::Application(tx) => tx.kind(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An item which can be read and written.
|
||||
pub trait ReadWrite: Sized {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;
|
||||
|
||||
fn serialize(&self) -> Vec<u8> {
|
||||
// BlockHeader is 64 bytes and likely the smallest item in this system
|
||||
let mut buf = Vec::with_capacity(64);
|
||||
self.write(&mut buf).unwrap();
|
||||
buf
|
||||
}
|
||||
}
|
||||
|
||||
pub trait P2p: 'static + Send + Sync + Clone {
|
||||
/// Broadcast a message to all other members of the Tributary with the specified genesis.
|
||||
///
|
||||
/// The Tributary will re-broadcast consensus messages on a fixed interval to ensure they aren't
|
||||
/// prematurely dropped from the P2P layer. THe P2P layer SHOULD perform content-based
|
||||
/// deduplication to ensure a sane amount of load.
|
||||
fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) -> impl Send + Future<Output = ()>;
|
||||
}
|
||||
|
||||
impl<P: P2p> P2p for Arc<P> {
|
||||
fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||
P::broadcast(self, genesis, msg)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Tributary<D: Db, T: TransactionTrait, P: P2p> {
|
||||
db: D,
|
||||
|
||||
genesis: [u8; 32],
|
||||
network: TendermintNetwork<D, T, P>,
|
||||
|
||||
synced_block: Arc<RwLock<SyncedBlockSender<TendermintNetwork<D, T, P>>>>,
|
||||
synced_block_result: Arc<RwLock<SyncedBlockResultReceiver>>,
|
||||
messages: Arc<RwLock<MessageSender<TendermintNetwork<D, T, P>>>>,
|
||||
}
|
||||
|
||||
impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
pub async fn new(
|
||||
db: D,
|
||||
genesis: [u8; 32],
|
||||
start_time: u64,
|
||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,
|
||||
p2p: P,
|
||||
) -> Option<Self> {
|
||||
log::info!("new Tributary with genesis {}", hex::encode(genesis));
|
||||
|
||||
let validators_vec = validators.iter().map(|validator| validator.0).collect::<Vec<_>>();
|
||||
|
||||
let signer = Arc::new(Signer::new(genesis, key));
|
||||
let validators = Arc::new(Validators::new(genesis, validators)?);
|
||||
|
||||
let mut blockchain = Blockchain::new(db.clone(), genesis, &validators_vec);
|
||||
let block_number = BlockNumber(blockchain.block_number());
|
||||
|
||||
let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) {
|
||||
Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time
|
||||
} else {
|
||||
start_time
|
||||
};
|
||||
let proposal = TendermintBlock(
|
||||
blockchain.build_block::<TendermintNetwork<D, T, P>>(&validators).serialize(),
|
||||
);
|
||||
let blockchain = Arc::new(RwLock::new(blockchain));
|
||||
|
||||
let network = TendermintNetwork { genesis, signer, validators, blockchain, p2p };
|
||||
|
||||
let TendermintHandle { synced_block, synced_block_result, messages, machine } =
|
||||
TendermintMachine::new(
|
||||
db.clone(),
|
||||
network.clone(),
|
||||
genesis,
|
||||
block_number,
|
||||
start_time,
|
||||
proposal,
|
||||
)
|
||||
.await;
|
||||
tokio::spawn(machine.run());
|
||||
|
||||
Some(Self {
|
||||
db,
|
||||
genesis,
|
||||
network,
|
||||
synced_block: Arc::new(RwLock::new(synced_block)),
|
||||
synced_block_result: Arc::new(RwLock::new(synced_block_result)),
|
||||
messages: Arc::new(RwLock::new(messages)),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn block_time() -> u32 {
|
||||
TendermintNetwork::<D, T, P>::block_time()
|
||||
}
|
||||
|
||||
pub fn genesis(&self) -> [u8; 32] {
|
||||
self.genesis
|
||||
}
|
||||
|
||||
pub async fn block_number(&self) -> u64 {
|
||||
self.network.blockchain.read().await.block_number()
|
||||
}
|
||||
pub async fn tip(&self) -> [u8; 32] {
|
||||
self.network.blockchain.read().await.tip()
|
||||
}
|
||||
|
||||
pub fn reader(&self) -> TributaryReader<D, T> {
|
||||
TributaryReader(self.db.clone(), self.genesis, PhantomData)
|
||||
}
|
||||
|
||||
pub async fn provide_transaction(&self, tx: T) -> Result<(), ProvidedError> {
|
||||
self.network.blockchain.write().await.provide_transaction(tx)
|
||||
}
|
||||
|
||||
pub async fn next_nonce(
|
||||
&self,
|
||||
signer: &<Ristretto as Ciphersuite>::G,
|
||||
order: &[u8],
|
||||
) -> Option<u32> {
|
||||
self.network.blockchain.read().await.next_nonce(signer, order)
|
||||
}
|
||||
|
||||
// Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error.
|
||||
// Safe to be &self since the only meaningful usage of self is self.network.blockchain which
|
||||
// successfully acquires its own write lock
|
||||
pub async fn add_transaction(&self, tx: T) -> Result<bool, TransactionError> {
|
||||
let tx = Transaction::Application(tx);
|
||||
let mut to_broadcast = vec![TRANSACTION_MESSAGE];
|
||||
tx.write(&mut to_broadcast).unwrap();
|
||||
let res = self.network.blockchain.write().await.add_transaction::<TendermintNetwork<D, T, P>>(
|
||||
true,
|
||||
tx,
|
||||
&self.network.signature_scheme(),
|
||||
);
|
||||
if res == Ok(true) {
|
||||
self.network.p2p.broadcast(self.genesis, to_broadcast).await;
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
async fn sync_block_internal(
|
||||
&self,
|
||||
block: Block<T>,
|
||||
commit: Vec<u8>,
|
||||
result: &mut UnboundedReceiver<bool>,
|
||||
) -> bool {
|
||||
let (tip, block_number) = {
|
||||
let blockchain = self.network.blockchain.read().await;
|
||||
(blockchain.tip(), blockchain.block_number())
|
||||
};
|
||||
|
||||
if block.header.parent != tip {
|
||||
log::debug!("told to sync a block whose parent wasn't our tip");
|
||||
return false;
|
||||
}
|
||||
|
||||
let block = TendermintBlock(block.serialize());
|
||||
let mut commit_ref = commit.as_ref();
|
||||
let Ok(commit) = Commit::<Arc<Validators>>::decode(&mut commit_ref) else {
|
||||
log::error!("sent an invalidly serialized commit");
|
||||
return false;
|
||||
};
|
||||
// Storage DoS vector. We *could* truncate to solely the relevant portion, trying to save this,
|
||||
// yet then we'd have to test the truncation was performed correctly.
|
||||
if !commit_ref.is_empty() {
|
||||
log::error!("sent an commit with additional data after it");
|
||||
return false;
|
||||
}
|
||||
if !self.network.verify_commit(block.id(), &commit) {
|
||||
log::error!("sent an invalid commit");
|
||||
return false;
|
||||
}
|
||||
|
||||
let number = BlockNumber(block_number + 1);
|
||||
self.synced_block.write().await.send(SyncedBlock { number, block, commit }).await.unwrap();
|
||||
result.next().await.unwrap()
|
||||
}
|
||||
|
||||
// Sync a block.
|
||||
// TODO: Since we have a static validator set, we should only need the tail commit?
|
||||
pub async fn sync_block(&self, block: Block<T>, commit: Vec<u8>) -> bool {
|
||||
let mut result = self.synced_block_result.write().await;
|
||||
self.sync_block_internal(block, commit, &mut result).await
|
||||
}
|
||||
|
||||
// Return true if the message should be rebroadcasted.
|
||||
pub async fn handle_message(&self, msg: &[u8]) -> bool {
|
||||
match msg.first() {
|
||||
Some(&TRANSACTION_MESSAGE) => {
|
||||
let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {
|
||||
log::error!("received invalid transaction message");
|
||||
return false;
|
||||
};
|
||||
|
||||
// TODO: Sync mempools with fellow peers
|
||||
// Can we just rebroadcast transactions not included for at least two blocks?
|
||||
let res =
|
||||
self.network.blockchain.write().await.add_transaction::<TendermintNetwork<D, T, P>>(
|
||||
false,
|
||||
tx,
|
||||
&self.network.signature_scheme(),
|
||||
);
|
||||
log::debug!("received transaction message. valid new transaction: {res:?}");
|
||||
res == Ok(true)
|
||||
}
|
||||
|
||||
Some(&TENDERMINT_MESSAGE) => {
|
||||
let Ok(msg) =
|
||||
SignedMessageFor::<TendermintNetwork<D, T, P>>::decode::<&[u8]>(&mut &msg[1 ..])
|
||||
else {
|
||||
log::error!("received invalid tendermint message");
|
||||
return false;
|
||||
};
|
||||
|
||||
self.messages.write().await.send(msg).await.unwrap();
|
||||
false
|
||||
}
|
||||
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a Future which will resolve once the next block has been added.
|
||||
pub async fn next_block_notification(
|
||||
&self,
|
||||
) -> impl Send + Sync + core::future::Future<Output = Result<(), impl Send + Sync>> {
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
self.network.blockchain.write().await.next_block_notifications.push_back(tx);
|
||||
rx
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TributaryReader<D: Db, T: TransactionTrait>(D, [u8; 32], PhantomData<T>);
|
||||
impl<D: Db, T: TransactionTrait> TributaryReader<D, T> {
|
||||
pub fn genesis(&self) -> [u8; 32] {
|
||||
self.1
|
||||
}
|
||||
|
||||
// Since these values are static once set, they can be safely read from the database without lock
|
||||
// acquisition
|
||||
pub fn block(&self, hash: &[u8; 32]) -> Option<Block<T>> {
|
||||
Blockchain::<D, T>::block_from_db(&self.0, self.1, hash)
|
||||
}
|
||||
pub fn commit(&self, hash: &[u8; 32]) -> Option<Vec<u8>> {
|
||||
Blockchain::<D, T>::commit_from_db(&self.0, self.1, hash)
|
||||
}
|
||||
pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option<Commit<Validators>> {
|
||||
self.commit(hash).map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap())
|
||||
}
|
||||
pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> {
|
||||
Blockchain::<D, T>::block_after(&self.0, self.1, hash)
|
||||
}
|
||||
pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> {
|
||||
self
|
||||
.commit(hash)
|
||||
.map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time)
|
||||
}
|
||||
|
||||
pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool {
|
||||
Blockchain::<D, T>::locally_provided_txs_in_block(&self.0, &self.1, hash, order)
|
||||
}
|
||||
|
||||
// This isn't static, yet can be read with only minor discrepancy risks
|
||||
pub fn tip(&self) -> [u8; 32] {
|
||||
Blockchain::<D, T>::tip_from_db(&self.0, self.1)
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
use core::ops::Deref;
|
||||
use core::{ops::Deref, future::Future};
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use subtle::ConstantTimeEq;
|
||||
use zeroize::{Zeroize, Zeroizing};
|
||||
|
||||
@@ -74,50 +72,52 @@ impl Signer {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SignerTrait for Signer {
|
||||
type ValidatorId = [u8; 32];
|
||||
type Signature = [u8; 64];
|
||||
|
||||
/// Returns the validator's current ID. Returns None if they aren't a current validator.
|
||||
async fn validator_id(&self) -> Option<Self::ValidatorId> {
|
||||
Some((Ristretto::generator() * self.key.deref()).to_bytes())
|
||||
fn validator_id(&self) -> impl Send + Future<Output = Option<Self::ValidatorId>> {
|
||||
async move { Some((Ristretto::generator() * self.key.deref()).to_bytes()) }
|
||||
}
|
||||
|
||||
/// Sign a signature with the current validator's private key.
|
||||
async fn sign(&self, msg: &[u8]) -> Self::Signature {
|
||||
let mut nonce = Zeroizing::new(RecommendedTranscript::new(b"Tributary Chain Tendermint Nonce"));
|
||||
nonce.append_message(b"genesis", self.genesis);
|
||||
nonce.append_message(b"key", Zeroizing::new(self.key.deref().to_repr()).as_ref());
|
||||
nonce.append_message(b"message", msg);
|
||||
let mut nonce = nonce.challenge(b"nonce");
|
||||
fn sign(&self, msg: &[u8]) -> impl Send + Future<Output = Self::Signature> {
|
||||
async move {
|
||||
let mut nonce =
|
||||
Zeroizing::new(RecommendedTranscript::new(b"Tributary Chain Tendermint Nonce"));
|
||||
nonce.append_message(b"genesis", self.genesis);
|
||||
nonce.append_message(b"key", Zeroizing::new(self.key.deref().to_repr()).as_ref());
|
||||
nonce.append_message(b"message", msg);
|
||||
let mut nonce = nonce.challenge(b"nonce");
|
||||
|
||||
let mut nonce_arr = [0; 64];
|
||||
nonce_arr.copy_from_slice(nonce.as_ref());
|
||||
let mut nonce_arr = [0; 64];
|
||||
nonce_arr.copy_from_slice(nonce.as_ref());
|
||||
|
||||
let nonce_ref: &mut [u8] = nonce.as_mut();
|
||||
nonce_ref.zeroize();
|
||||
let nonce_ref: &[u8] = nonce.as_ref();
|
||||
assert_eq!(nonce_ref, [0; 64].as_ref());
|
||||
let nonce_ref: &mut [u8] = nonce.as_mut();
|
||||
nonce_ref.zeroize();
|
||||
let nonce_ref: &[u8] = nonce.as_ref();
|
||||
assert_eq!(nonce_ref, [0; 64].as_ref());
|
||||
|
||||
let nonce =
|
||||
Zeroizing::new(<Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(&nonce_arr));
|
||||
nonce_arr.zeroize();
|
||||
let nonce =
|
||||
Zeroizing::new(<Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(&nonce_arr));
|
||||
nonce_arr.zeroize();
|
||||
|
||||
assert!(!bool::from(nonce.ct_eq(&<Ristretto as Ciphersuite>::F::ZERO)));
|
||||
assert!(!bool::from(nonce.ct_eq(&<Ristretto as Ciphersuite>::F::ZERO)));
|
||||
|
||||
let challenge = challenge(
|
||||
self.genesis,
|
||||
(Ristretto::generator() * self.key.deref()).to_bytes(),
|
||||
(Ristretto::generator() * nonce.deref()).to_bytes().as_ref(),
|
||||
msg,
|
||||
);
|
||||
let challenge = challenge(
|
||||
self.genesis,
|
||||
(Ristretto::generator() * self.key.deref()).to_bytes(),
|
||||
(Ristretto::generator() * nonce.deref()).to_bytes().as_ref(),
|
||||
msg,
|
||||
);
|
||||
|
||||
let sig = SchnorrSignature::<Ristretto>::sign(&self.key, nonce, challenge).serialize();
|
||||
let sig = SchnorrSignature::<Ristretto>::sign(&self.key, nonce, challenge).serialize();
|
||||
|
||||
let mut res = [0; 64];
|
||||
res.copy_from_slice(&sig);
|
||||
res
|
||||
let mut res = [0; 64];
|
||||
res.copy_from_slice(&sig);
|
||||
res
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -274,7 +274,6 @@ pub const BLOCK_PROCESSING_TIME: u32 = 999;
|
||||
pub const LATENCY_TIME: u32 = 1667;
|
||||
pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME);
|
||||
|
||||
#[async_trait]
|
||||
impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> {
|
||||
type Db = D;
|
||||
|
||||
@@ -300,111 +299,126 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
||||
self.validators.clone()
|
||||
}
|
||||
|
||||
async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
|
||||
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
|
||||
to_broadcast.extend(msg.encode());
|
||||
self.p2p.broadcast(self.genesis, to_broadcast).await
|
||||
}
|
||||
|
||||
async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent) {
|
||||
log::error!(
|
||||
"validator {} triggered a slash event on tributary {} (with evidence: {})",
|
||||
hex::encode(validator),
|
||||
hex::encode(self.genesis),
|
||||
matches!(slash_event, SlashEvent::WithEvidence(_)),
|
||||
);
|
||||
|
||||
let signer = self.signer();
|
||||
let Some(tx) = (match slash_event {
|
||||
SlashEvent::WithEvidence(evidence) => {
|
||||
// create an unsigned evidence tx
|
||||
Some(TendermintTx::SlashEvidence(evidence))
|
||||
}
|
||||
SlashEvent::Id(_reason, _block, _round) => {
|
||||
// TODO: Increase locally observed slash points
|
||||
None
|
||||
}
|
||||
}) else {
|
||||
return;
|
||||
};
|
||||
|
||||
// add tx to blockchain and broadcast to peers
|
||||
let mut to_broadcast = vec![TRANSACTION_MESSAGE];
|
||||
tx.write(&mut to_broadcast).unwrap();
|
||||
if self.blockchain.write().await.add_transaction::<Self>(
|
||||
true,
|
||||
Transaction::Tendermint(tx),
|
||||
&self.signature_scheme(),
|
||||
) == Ok(true)
|
||||
{
|
||||
self.p2p.broadcast(signer.genesis, to_broadcast).await;
|
||||
fn broadcast(&mut self, msg: SignedMessageFor<Self>) -> impl Send + Future<Output = ()> {
|
||||
async move {
|
||||
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
|
||||
to_broadcast.extend(msg.encode());
|
||||
self.p2p.broadcast(self.genesis, to_broadcast).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn validate(&self, block: &Self::Block) -> Result<(), TendermintBlockError> {
|
||||
let block =
|
||||
Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?;
|
||||
self
|
||||
.blockchain
|
||||
.read()
|
||||
.await
|
||||
.verify_block::<Self>(&block, &self.signature_scheme(), false)
|
||||
.map_err(|e| match e {
|
||||
BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal,
|
||||
_ => {
|
||||
log::warn!("Tributary Tendermint validate returning BlockError::Fatal due to {e:?}");
|
||||
TendermintBlockError::Fatal
|
||||
fn slash(
|
||||
&mut self,
|
||||
validator: Self::ValidatorId,
|
||||
slash_event: SlashEvent,
|
||||
) -> impl Send + Future<Output = ()> {
|
||||
async move {
|
||||
log::error!(
|
||||
"validator {} triggered a slash event on tributary {} (with evidence: {})",
|
||||
hex::encode(validator),
|
||||
hex::encode(self.genesis),
|
||||
matches!(slash_event, SlashEvent::WithEvidence(_)),
|
||||
);
|
||||
|
||||
let signer = self.signer();
|
||||
let Some(tx) = (match slash_event {
|
||||
SlashEvent::WithEvidence(evidence) => {
|
||||
// create an unsigned evidence tx
|
||||
Some(TendermintTx::SlashEvidence(evidence))
|
||||
}
|
||||
})
|
||||
SlashEvent::Id(_reason, _block, _round) => {
|
||||
// TODO: Increase locally observed slash points
|
||||
None
|
||||
}
|
||||
}) else {
|
||||
return;
|
||||
};
|
||||
|
||||
// add tx to blockchain and broadcast to peers
|
||||
let mut to_broadcast = vec![TRANSACTION_MESSAGE];
|
||||
tx.write(&mut to_broadcast).unwrap();
|
||||
if self.blockchain.write().await.add_transaction::<Self>(
|
||||
true,
|
||||
Transaction::Tendermint(tx),
|
||||
&self.signature_scheme(),
|
||||
) == Ok(true)
|
||||
{
|
||||
self.p2p.broadcast(signer.genesis, to_broadcast).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn add_block(
|
||||
fn validate(
|
||||
&self,
|
||||
block: &Self::Block,
|
||||
) -> impl Send + Future<Output = Result<(), TendermintBlockError>> {
|
||||
async move {
|
||||
let block =
|
||||
Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?;
|
||||
self
|
||||
.blockchain
|
||||
.read()
|
||||
.await
|
||||
.verify_block::<Self>(&block, &self.signature_scheme(), false)
|
||||
.map_err(|e| match e {
|
||||
BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal,
|
||||
_ => {
|
||||
log::warn!("Tributary Tendermint validate returning BlockError::Fatal due to {e:?}");
|
||||
TendermintBlockError::Fatal
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn add_block(
|
||||
&mut self,
|
||||
serialized_block: Self::Block,
|
||||
commit: Commit<Self::SignatureScheme>,
|
||||
) -> Option<Self::Block> {
|
||||
let invalid_block = || {
|
||||
// There's a fatal flaw in the code, it's behind a hard fork, or the validators turned
|
||||
// malicious
|
||||
// All justify a halt to then achieve social consensus from
|
||||
// TODO: Under multiple validator sets, a small validator set turning malicious knocks
|
||||
// off the entire network. That's an unacceptable DoS.
|
||||
panic!("validators added invalid block to tributary {}", hex::encode(self.genesis));
|
||||
};
|
||||
) -> impl Send + Future<Output = Option<Self::Block>> {
|
||||
async move {
|
||||
let invalid_block = || {
|
||||
// There's a fatal flaw in the code, it's behind a hard fork, or the validators turned
|
||||
// malicious
|
||||
// All justify a halt to then achieve social consensus from
|
||||
// TODO: Under multiple validator sets, a small validator set turning malicious knocks
|
||||
// off the entire network. That's an unacceptable DoS.
|
||||
panic!("validators added invalid block to tributary {}", hex::encode(self.genesis));
|
||||
};
|
||||
|
||||
// Tendermint should only produce valid commits
|
||||
assert!(self.verify_commit(serialized_block.id(), &commit));
|
||||
// Tendermint should only produce valid commits
|
||||
assert!(self.verify_commit(serialized_block.id(), &commit));
|
||||
|
||||
let Ok(block) = Block::read::<&[u8]>(&mut serialized_block.0.as_ref()) else {
|
||||
return invalid_block();
|
||||
};
|
||||
let Ok(block) = Block::read::<&[u8]>(&mut serialized_block.0.as_ref()) else {
|
||||
return invalid_block();
|
||||
};
|
||||
|
||||
let encoded_commit = commit.encode();
|
||||
loop {
|
||||
let block_res = self.blockchain.write().await.add_block::<Self>(
|
||||
&block,
|
||||
encoded_commit.clone(),
|
||||
&self.signature_scheme(),
|
||||
);
|
||||
match block_res {
|
||||
Ok(()) => {
|
||||
// If we successfully added this block, break
|
||||
break;
|
||||
let encoded_commit = commit.encode();
|
||||
loop {
|
||||
let block_res = self.blockchain.write().await.add_block::<Self>(
|
||||
&block,
|
||||
encoded_commit.clone(),
|
||||
&self.signature_scheme(),
|
||||
);
|
||||
match block_res {
|
||||
Ok(()) => {
|
||||
// If we successfully added this block, break
|
||||
break;
|
||||
}
|
||||
Err(BlockError::NonLocalProvided(hash)) => {
|
||||
log::error!(
|
||||
"missing provided transaction {} which other validators on tributary {} had",
|
||||
hex::encode(hash),
|
||||
hex::encode(self.genesis)
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
_ => return invalid_block(),
|
||||
}
|
||||
Err(BlockError::NonLocalProvided(hash)) => {
|
||||
log::error!(
|
||||
"missing provided transaction {} which other validators on tributary {} had",
|
||||
hex::encode(hash),
|
||||
hex::encode(self.genesis)
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
_ => return invalid_block(),
|
||||
}
|
||||
}
|
||||
|
||||
Some(TendermintBlock(
|
||||
self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(),
|
||||
))
|
||||
Some(TendermintBlock(
|
||||
self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
12
coordinator/tributary-sdk/src/tests/p2p.rs
Normal file
12
coordinator/tributary-sdk/src/tests/p2p.rs
Normal file
@@ -0,0 +1,12 @@
|
||||
use core::future::Future;
|
||||
|
||||
pub use crate::P2p;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DummyP2p;
|
||||
|
||||
impl P2p for DummyP2p {
|
||||
fn broadcast(&self, _: [u8; 32], _: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||
async move { unimplemented!() }
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,7 @@
|
||||
use core::future::Future;
|
||||
|
||||
use tendermint::ext::Network;
|
||||
|
||||
use crate::{
|
||||
P2p, TendermintTx,
|
||||
tendermint::{TARGET_BLOCK_TIME, TendermintNetwork},
|
||||
@@ -11,10 +14,9 @@ fn assert_target_block_time() {
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DummyP2p;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl P2p for DummyP2p {
|
||||
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
|
||||
unimplemented!()
|
||||
fn broadcast(&self, _: [u8; 32], _: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||
async move { unimplemented!() }
|
||||
}
|
||||
}
|
||||
|
||||
218
coordinator/tributary-sdk/src/transaction.rs
Normal file
218
coordinator/tributary-sdk/src/transaction.rs
Normal file
@@ -0,0 +1,218 @@
|
||||
use core::fmt::Debug;
|
||||
use std::io;
|
||||
|
||||
use zeroize::Zeroize;
|
||||
use thiserror::Error;
|
||||
|
||||
use blake2::{Digest, Blake2b512};
|
||||
|
||||
use ciphersuite::{
|
||||
group::{Group, GroupEncoding},
|
||||
Ciphersuite, Ristretto,
|
||||
};
|
||||
use schnorr::SchnorrSignature;
|
||||
|
||||
use crate::{TRANSACTION_SIZE_LIMIT, ReadWrite};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Error)]
|
||||
pub enum TransactionError {
|
||||
/// Transaction exceeded the size limit.
|
||||
#[error("transaction is too large")]
|
||||
TooLargeTransaction,
|
||||
/// Transaction's signer isn't a participant.
|
||||
#[error("invalid signer")]
|
||||
InvalidSigner,
|
||||
/// Transaction's nonce isn't the prior nonce plus one.
|
||||
#[error("invalid nonce")]
|
||||
InvalidNonce,
|
||||
/// Transaction's signature is invalid.
|
||||
#[error("invalid signature")]
|
||||
InvalidSignature,
|
||||
/// Transaction's content is invalid.
|
||||
#[error("transaction content is invalid")]
|
||||
InvalidContent,
|
||||
/// Transaction's signer has too many transactions in the mempool.
|
||||
#[error("signer has too many transactions in the mempool")]
|
||||
TooManyInMempool,
|
||||
/// Provided Transaction added to mempool.
|
||||
#[error("provided transaction added to mempool")]
|
||||
ProvidedAddedToMempool,
|
||||
}
|
||||
|
||||
/// Data for a signed transaction.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Signed {
|
||||
pub signer: <Ristretto as Ciphersuite>::G,
|
||||
pub nonce: u32,
|
||||
pub signature: SchnorrSignature<Ristretto>,
|
||||
}
|
||||
|
||||
impl ReadWrite for Signed {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let signer = Ristretto::read_G(reader)?;
|
||||
|
||||
let mut nonce = [0; 4];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
let nonce = u32::from_le_bytes(nonce);
|
||||
if nonce >= (u32::MAX - 1) {
|
||||
Err(io::Error::other("nonce exceeded limit"))?;
|
||||
}
|
||||
|
||||
let mut signature = SchnorrSignature::<Ristretto>::read(reader)?;
|
||||
if signature.R.is_identity().into() {
|
||||
// Anyone malicious could remove this and try to find zero signatures
|
||||
// We should never produce zero signatures though meaning this should never come up
|
||||
// If it does somehow come up, this is a decent courtesy
|
||||
signature.zeroize();
|
||||
Err(io::Error::other("signature nonce was identity"))?;
|
||||
}
|
||||
|
||||
Ok(Signed { signer, nonce, signature })
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
// This is either an invalid signature or a private key leak
|
||||
if self.signature.R.is_identity().into() {
|
||||
Err(io::Error::other("signature nonce was identity"))?;
|
||||
}
|
||||
writer.write_all(&self.signer.to_bytes())?;
|
||||
writer.write_all(&self.nonce.to_le_bytes())?;
|
||||
self.signature.write(writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl Signed {
|
||||
pub fn read_without_nonce<R: io::Read>(reader: &mut R, nonce: u32) -> io::Result<Self> {
|
||||
let signer = Ristretto::read_G(reader)?;
|
||||
|
||||
let mut signature = SchnorrSignature::<Ristretto>::read(reader)?;
|
||||
if signature.R.is_identity().into() {
|
||||
// Anyone malicious could remove this and try to find zero signatures
|
||||
// We should never produce zero signatures though meaning this should never come up
|
||||
// If it does somehow come up, this is a decent courtesy
|
||||
signature.zeroize();
|
||||
Err(io::Error::other("signature nonce was identity"))?;
|
||||
}
|
||||
|
||||
Ok(Signed { signer, nonce, signature })
|
||||
}
|
||||
|
||||
pub fn write_without_nonce<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
// This is either an invalid signature or a private key leak
|
||||
if self.signature.R.is_identity().into() {
|
||||
Err(io::Error::other("signature nonce was identity"))?;
|
||||
}
|
||||
writer.write_all(&self.signer.to_bytes())?;
|
||||
self.signature.write(writer)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum TransactionKind {
|
||||
/// This transaction should be provided by every validator, in an exact order.
|
||||
///
|
||||
/// The contained static string names the orderer to use. This allows two distinct provided
|
||||
/// transaction kinds, without a synchronized order, to be ordered within their own kind without
|
||||
/// requiring ordering with each other.
|
||||
///
|
||||
/// The only malleability is in when this transaction appears on chain. The block producer will
|
||||
/// include it when they have it. Block verification will fail for validators without it.
|
||||
///
|
||||
/// If a supermajority of validators produce a commit for a block with a provided transaction
|
||||
/// which isn't locally held, the block will be added to the local chain. When the transaction is
|
||||
/// locally provided, it will be compared for correctness to the on-chain version
|
||||
///
|
||||
/// In order to ensure TXs aren't accidentally provided multiple times, all provided transactions
|
||||
/// must have a unique hash which is also unique to all Unsigned transactions.
|
||||
Provided(&'static str),
|
||||
|
||||
/// An unsigned transaction, only able to be included by the block producer.
|
||||
///
|
||||
/// Once an Unsigned transaction is included on-chain, it may not be included again. In order to
|
||||
/// have multiple Unsigned transactions with the same values included on-chain, some distinct
|
||||
/// nonce must be included in order to cause a distinct hash.
|
||||
///
|
||||
/// The hash must also be unique with all Provided transactions.
|
||||
Unsigned,
|
||||
|
||||
/// A signed transaction.
|
||||
Signed(Vec<u8>, Signed),
|
||||
}
|
||||
|
||||
// TODO: Should this be renamed TransactionTrait now that a literal Transaction exists?
|
||||
// Or should the literal Transaction be renamed to Event?
|
||||
pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite {
|
||||
/// Return what type of transaction this is.
|
||||
fn kind(&self) -> TransactionKind;
|
||||
|
||||
/// Return the hash of this transaction.
|
||||
///
|
||||
/// The hash must NOT commit to the signature.
|
||||
fn hash(&self) -> [u8; 32];
|
||||
|
||||
/// Perform transaction-specific verification.
|
||||
fn verify(&self) -> Result<(), TransactionError>;
|
||||
|
||||
/// Obtain the challenge for this transaction's signature.
|
||||
///
|
||||
/// Do not override this unless you know what you're doing.
|
||||
///
|
||||
/// Panics if called on non-signed transactions.
|
||||
fn sig_hash(&self, genesis: [u8; 32]) -> <Ristretto as Ciphersuite>::F {
|
||||
match self.kind() {
|
||||
TransactionKind::Signed(order, Signed { signature, .. }) => {
|
||||
<Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(
|
||||
&Blake2b512::digest(
|
||||
[
|
||||
b"Tributary Signed Transaction",
|
||||
genesis.as_ref(),
|
||||
&self.hash(),
|
||||
order.as_ref(),
|
||||
signature.R.to_bytes().as_ref(),
|
||||
]
|
||||
.concat(),
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
}
|
||||
_ => panic!("sig_hash called on non-signed transaction"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GAIN: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32> {}
|
||||
impl<F: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32>> GAIN for F {}
|
||||
|
||||
pub(crate) fn verify_transaction<F: GAIN, T: Transaction>(
|
||||
tx: &T,
|
||||
genesis: [u8; 32],
|
||||
get_and_increment_nonce: &mut F,
|
||||
) -> Result<(), TransactionError> {
|
||||
if tx.serialize().len() > TRANSACTION_SIZE_LIMIT {
|
||||
Err(TransactionError::TooLargeTransaction)?;
|
||||
}
|
||||
|
||||
tx.verify()?;
|
||||
|
||||
match tx.kind() {
|
||||
TransactionKind::Provided(_) | TransactionKind::Unsigned => {}
|
||||
TransactionKind::Signed(order, Signed { signer, nonce, signature }) => {
|
||||
if let Some(next_nonce) = get_and_increment_nonce(&signer, &order) {
|
||||
if nonce != next_nonce {
|
||||
Err(TransactionError::InvalidNonce)?;
|
||||
}
|
||||
} else {
|
||||
// Not a participant
|
||||
Err(TransactionError::InvalidSigner)?;
|
||||
}
|
||||
|
||||
// TODO: Use a batch verification here
|
||||
if !signature.verify(signer, tx.sig_hash(genesis)) {
|
||||
Err(TransactionError::InvalidSignature)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -16,7 +16,6 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
@@ -1,7 +1,6 @@
|
||||
use core::{hash::Hash, fmt::Debug};
|
||||
use core::{hash::Hash, fmt::Debug, future::Future};
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use thiserror::Error;
|
||||
|
||||
use parity_scale_codec::{Encode, Decode};
|
||||
@@ -34,7 +33,6 @@ pub struct BlockNumber(pub u64);
|
||||
pub struct RoundNumber(pub u32);
|
||||
|
||||
/// A signer for a validator.
|
||||
#[async_trait]
|
||||
pub trait Signer: Send + Sync {
|
||||
// Type used to identify validators.
|
||||
type ValidatorId: ValidatorId;
|
||||
@@ -42,22 +40,21 @@ pub trait Signer: Send + Sync {
|
||||
type Signature: Signature;
|
||||
|
||||
/// Returns the validator's current ID. Returns None if they aren't a current validator.
|
||||
async fn validator_id(&self) -> Option<Self::ValidatorId>;
|
||||
fn validator_id(&self) -> impl Send + Future<Output = Option<Self::ValidatorId>>;
|
||||
/// Sign a signature with the current validator's private key.
|
||||
async fn sign(&self, msg: &[u8]) -> Self::Signature;
|
||||
fn sign(&self, msg: &[u8]) -> impl Send + Future<Output = Self::Signature>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<S: Signer> Signer for Arc<S> {
|
||||
type ValidatorId = S::ValidatorId;
|
||||
type Signature = S::Signature;
|
||||
|
||||
async fn validator_id(&self) -> Option<Self::ValidatorId> {
|
||||
self.as_ref().validator_id().await
|
||||
fn validator_id(&self) -> impl Send + Future<Output = Option<Self::ValidatorId>> {
|
||||
self.as_ref().validator_id()
|
||||
}
|
||||
|
||||
async fn sign(&self, msg: &[u8]) -> Self::Signature {
|
||||
self.as_ref().sign(msg).await
|
||||
fn sign(&self, msg: &[u8]) -> impl Send + Future<Output = Self::Signature> {
|
||||
self.as_ref().sign(msg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -210,7 +207,6 @@ pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode
|
||||
}
|
||||
|
||||
/// Trait representing the distributed system Tendermint is providing consensus over.
|
||||
#[async_trait]
|
||||
pub trait Network: Sized + Send + Sync {
|
||||
/// The database used to back this.
|
||||
type Db: serai_db::Db;
|
||||
@@ -229,6 +225,7 @@ pub trait Network: Sized + Send + Sync {
|
||||
/// This should include both the time to download the block and the actual processing time.
|
||||
///
|
||||
/// BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME) must be divisible by 1000.
|
||||
// TODO: Redefine as Duration
|
||||
const BLOCK_PROCESSING_TIME: u32;
|
||||
/// Network latency time in milliseconds.
|
||||
///
|
||||
@@ -280,15 +277,19 @@ pub trait Network: Sized + Send + Sync {
|
||||
/// Switching to unauthenticated channels in a system already providing authenticated channels is
|
||||
/// not recommended as this is a minor, temporal inefficiency, while downgrading channels may
|
||||
/// have wider implications.
|
||||
async fn broadcast(&mut self, msg: SignedMessageFor<Self>);
|
||||
fn broadcast(&mut self, msg: SignedMessageFor<Self>) -> impl Send + Future<Output = ()>;
|
||||
|
||||
/// Trigger a slash for the validator in question who was definitively malicious.
|
||||
///
|
||||
/// The exact process of triggering a slash is undefined and left to the network as a whole.
|
||||
async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent);
|
||||
fn slash(
|
||||
&mut self,
|
||||
validator: Self::ValidatorId,
|
||||
slash_event: SlashEvent,
|
||||
) -> impl Send + Future<Output = ()>;
|
||||
|
||||
/// Validate a block.
|
||||
async fn validate(&self, block: &Self::Block) -> Result<(), BlockError>;
|
||||
fn validate(&self, block: &Self::Block) -> impl Send + Future<Output = Result<(), BlockError>>;
|
||||
|
||||
/// Add a block, returning the proposal for the next one.
|
||||
///
|
||||
@@ -298,9 +299,9 @@ pub trait Network: Sized + Send + Sync {
|
||||
/// This deviates from the paper which will have a local node refuse to decide on a block it
|
||||
/// considers invalid. This library acknowledges the network did decide on it, leaving handling
|
||||
/// of it to the network, and outside of this scope.
|
||||
async fn add_block(
|
||||
fn add_block(
|
||||
&mut self,
|
||||
block: Self::Block,
|
||||
commit: Commit<Self::SignatureScheme>,
|
||||
) -> Option<Self::Block>;
|
||||
) -> impl Send + Future<Output = Option<Self::Block>>;
|
||||
}
|
||||
@@ -1,10 +1,9 @@
|
||||
use core::future::Future;
|
||||
use std::{
|
||||
sync::Arc,
|
||||
time::{UNIX_EPOCH, SystemTime, Duration},
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use parity_scale_codec::{Encode, Decode};
|
||||
|
||||
use futures_util::sink::SinkExt;
|
||||
@@ -21,20 +20,21 @@ type TestValidatorId = u16;
|
||||
type TestBlockId = [u8; 4];
|
||||
|
||||
struct TestSigner(u16);
|
||||
#[async_trait]
|
||||
impl Signer for TestSigner {
|
||||
type ValidatorId = TestValidatorId;
|
||||
type Signature = [u8; 32];
|
||||
|
||||
async fn validator_id(&self) -> Option<TestValidatorId> {
|
||||
Some(self.0)
|
||||
fn validator_id(&self) -> impl Send + Future<Output = Option<TestValidatorId>> {
|
||||
async move { Some(self.0) }
|
||||
}
|
||||
|
||||
async fn sign(&self, msg: &[u8]) -> [u8; 32] {
|
||||
let mut sig = [0; 32];
|
||||
sig[.. 2].copy_from_slice(&self.0.to_le_bytes());
|
||||
sig[2 .. (2 + 30.min(msg.len()))].copy_from_slice(&msg[.. 30.min(msg.len())]);
|
||||
sig
|
||||
fn sign(&self, msg: &[u8]) -> impl Send + Future<Output = [u8; 32]> {
|
||||
async move {
|
||||
let mut sig = [0; 32];
|
||||
sig[.. 2].copy_from_slice(&self.0.to_le_bytes());
|
||||
sig[2 .. (2 + 30.min(msg.len()))].copy_from_slice(&msg[.. 30.min(msg.len())]);
|
||||
sig
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,7 +111,6 @@ struct TestNetwork(
|
||||
Arc<RwLock<Vec<(MessageSender<Self>, SyncedBlockSender<Self>, SyncedBlockResultReceiver)>>>,
|
||||
);
|
||||
|
||||
#[async_trait]
|
||||
impl Network for TestNetwork {
|
||||
type Db = MemDb;
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
[package]
|
||||
name = "tributary-chain"
|
||||
name = "serai-coordinator-tributary"
|
||||
version = "0.1.0"
|
||||
description = "A micro-blockchain to provide consensus and ordering to P2P communication"
|
||||
description = "The Tributary used by the Serai Coordinator"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
@@ -16,35 +18,29 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||
|
||||
subtle = { version = "^2", default-features = false, features = ["std"] }
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||
|
||||
rand = { version = "0.8", default-features = false, features = ["std"] }
|
||||
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
||||
|
||||
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
|
||||
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
|
||||
serai-db = { path = "../../common/db" }
|
||||
serai-task = { path = "../../common/task", version = "0.1" }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] }
|
||||
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
||||
tendermint = { package = "tendermint-machine", path = "./tendermint" }
|
||||
tributary-sdk = { path = "../tributary-sdk" }
|
||||
|
||||
tokio = { version = "1", default-features = false, features = ["sync", "time", "rt"] }
|
||||
serai-cosign = { path = "../cosign" }
|
||||
serai-coordinator-substrate = { path = "../substrate" }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1", features = ["macros"] }
|
||||
messages = { package = "serai-processor-messages", path = "../../processor/messages" }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
[features]
|
||||
tests = []
|
||||
longer-reattempts = []
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2023 Luke Parker
|
||||
Copyright (c) 2023-2025 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# Tributary
|
||||
# Serai Coordinator Tributary
|
||||
|
||||
A verifiable, ordered broadcast layer implemented as a BFT micro-blockchain.
|
||||
The Tributary used by the Serai Coordinator. This includes the `Transaction`
|
||||
definition and the code to handle blocks added on-chain.
|
||||
|
||||
@@ -5,28 +5,30 @@ use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::ValidatorSet};
|
||||
|
||||
use processor_messages::sign::VariantSignId;
|
||||
use messages::sign::{VariantSignId, SignId};
|
||||
|
||||
use serai_db::*;
|
||||
|
||||
use crate::tributary::transaction::SigningProtocolRound;
|
||||
use serai_cosign::CosignIntent;
|
||||
|
||||
use crate::transaction::SigningProtocolRound;
|
||||
|
||||
/// A topic within the database which the group participates in
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||
pub enum Topic {
|
||||
pub(crate) enum Topic {
|
||||
/// Vote to remove a participant
|
||||
RemoveParticipant { participant: SeraiAddress },
|
||||
|
||||
// DkgParticipation isn't represented here as participations are immediately sent to the
|
||||
// processor, not accumulated within this databse
|
||||
/// Participation in the signing protocol to confirm the DKG results on Substrate
|
||||
DkgConfirmation { attempt: u32, label: SigningProtocolRound },
|
||||
DkgConfirmation { attempt: u32, round: SigningProtocolRound },
|
||||
|
||||
/// The local view of the SlashReport, to be aggregated into the final SlashReport
|
||||
SlashReport,
|
||||
|
||||
/// Participation in a signing protocol
|
||||
Sign { id: VariantSignId, attempt: u32, label: SigningProtocolRound },
|
||||
Sign { id: VariantSignId, attempt: u32, round: SigningProtocolRound },
|
||||
}
|
||||
|
||||
enum Participating {
|
||||
@@ -40,13 +42,13 @@ impl Topic {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
Topic::RemoveParticipant { .. } => None,
|
||||
Topic::DkgConfirmation { attempt, label: _ } => Some(Topic::DkgConfirmation {
|
||||
Topic::DkgConfirmation { attempt, round: _ } => Some(Topic::DkgConfirmation {
|
||||
attempt: attempt + 1,
|
||||
label: SigningProtocolRound::Preprocess,
|
||||
round: SigningProtocolRound::Preprocess,
|
||||
}),
|
||||
Topic::SlashReport { .. } => None,
|
||||
Topic::Sign { id, attempt, label: _ } => {
|
||||
Some(Topic::Sign { id, attempt: attempt + 1, label: SigningProtocolRound::Preprocess })
|
||||
Topic::Sign { id, attempt, round: _ } => {
|
||||
Some(Topic::Sign { id, attempt: attempt + 1, round: SigningProtocolRound::Preprocess })
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -56,27 +58,40 @@ impl Topic {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
Topic::RemoveParticipant { .. } => None,
|
||||
Topic::DkgConfirmation { attempt, label } => match label {
|
||||
Topic::DkgConfirmation { attempt, round } => match round {
|
||||
SigningProtocolRound::Preprocess => {
|
||||
let attempt = attempt + 1;
|
||||
Some((
|
||||
attempt,
|
||||
Topic::DkgConfirmation { attempt, label: SigningProtocolRound::Preprocess },
|
||||
Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess },
|
||||
))
|
||||
}
|
||||
SigningProtocolRound::Share => None,
|
||||
},
|
||||
Topic::SlashReport { .. } => None,
|
||||
Topic::Sign { id, attempt, label } => match label {
|
||||
Topic::Sign { id, attempt, round } => match round {
|
||||
SigningProtocolRound::Preprocess => {
|
||||
let attempt = attempt + 1;
|
||||
Some((attempt, Topic::Sign { id, attempt, label: SigningProtocolRound::Preprocess }))
|
||||
Some((attempt, Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess }))
|
||||
}
|
||||
SigningProtocolRound::Share => None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// The SignId for this topic
|
||||
//
|
||||
// Returns None if Topic isn't Topic::Sign
|
||||
pub(crate) fn sign_id(self, set: ValidatorSet) -> Option<messages::sign::SignId> {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
Topic::RemoveParticipant { .. } => None,
|
||||
Topic::DkgConfirmation { .. } => None,
|
||||
Topic::SlashReport { .. } => None,
|
||||
Topic::Sign { id, attempt, round: _ } => Some(SignId { session: set.session, id, attempt }),
|
||||
}
|
||||
}
|
||||
|
||||
/// The topic which precedes this topic as a prerequisite
|
||||
///
|
||||
/// The preceding topic must define this topic as succeeding
|
||||
@@ -84,17 +99,17 @@ impl Topic {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
Topic::RemoveParticipant { .. } => None,
|
||||
Topic::DkgConfirmation { attempt, label } => match label {
|
||||
Topic::DkgConfirmation { attempt, round } => match round {
|
||||
SigningProtocolRound::Preprocess => None,
|
||||
SigningProtocolRound::Share => {
|
||||
Some(Topic::DkgConfirmation { attempt, label: SigningProtocolRound::Preprocess })
|
||||
Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess })
|
||||
}
|
||||
},
|
||||
Topic::SlashReport { .. } => None,
|
||||
Topic::Sign { id, attempt, label } => match label {
|
||||
Topic::Sign { id, attempt, round } => match round {
|
||||
SigningProtocolRound::Preprocess => None,
|
||||
SigningProtocolRound::Share => {
|
||||
Some(Topic::Sign { id, attempt, label: SigningProtocolRound::Preprocess })
|
||||
Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess })
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -107,16 +122,16 @@ impl Topic {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
Topic::RemoveParticipant { .. } => None,
|
||||
Topic::DkgConfirmation { attempt, label } => match label {
|
||||
Topic::DkgConfirmation { attempt, round } => match round {
|
||||
SigningProtocolRound::Preprocess => {
|
||||
Some(Topic::DkgConfirmation { attempt, label: SigningProtocolRound::Share })
|
||||
Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Share })
|
||||
}
|
||||
SigningProtocolRound::Share => None,
|
||||
},
|
||||
Topic::SlashReport { .. } => None,
|
||||
Topic::Sign { id, attempt, label } => match label {
|
||||
Topic::Sign { id, attempt, round } => match round {
|
||||
SigningProtocolRound::Preprocess => {
|
||||
Some(Topic::Sign { id, attempt, label: SigningProtocolRound::Share })
|
||||
Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Share })
|
||||
}
|
||||
SigningProtocolRound::Share => None,
|
||||
},
|
||||
@@ -154,8 +169,11 @@ impl Topic {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait Borshy: BorshSerialize + BorshDeserialize {}
|
||||
impl<T: BorshSerialize + BorshDeserialize> Borshy for T {}
|
||||
|
||||
/// The resulting data set from an accumulation
|
||||
pub enum DataSet<D: Borshy> {
|
||||
pub(crate) enum DataSet<D: Borshy> {
|
||||
/// Accumulating this did not produce a data set to act on
|
||||
/// (non-existent, not ready, prior handled, not participating, etc.)
|
||||
None,
|
||||
@@ -163,19 +181,25 @@ pub enum DataSet<D: Borshy> {
|
||||
Participating(HashMap<SeraiAddress, D>),
|
||||
}
|
||||
|
||||
trait Borshy: BorshSerialize + BorshDeserialize {}
|
||||
impl<T: BorshSerialize + BorshDeserialize> Borshy for T {}
|
||||
|
||||
create_db!(
|
||||
CoordinatorTributary {
|
||||
// The last handled tributary block's (number, hash)
|
||||
LastHandledTributaryBlock: (set: ValidatorSet) -> (u64, [u8; 32]),
|
||||
|
||||
// The slash points a validator has accrued, with u64::MAX representing a fatal slash.
|
||||
SlashPoints: (set: ValidatorSet, validator: SeraiAddress) -> u64,
|
||||
// The slash points a validator has accrued, with u32::MAX representing a fatal slash.
|
||||
SlashPoints: (set: ValidatorSet, validator: SeraiAddress) -> u32,
|
||||
|
||||
// The cosign intent for a Substrate block
|
||||
CosignIntents: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> CosignIntent,
|
||||
// The latest Substrate block to cosign.
|
||||
LatestSubstrateBlockToCosign: (set: ValidatorSet) -> [u8; 32],
|
||||
// The hash of the block we're actively cosigning.
|
||||
ActivelyCosigning: (set: ValidatorSet) -> [u8; 32],
|
||||
// If this block has already been cosigned.
|
||||
Cosigned: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> (),
|
||||
|
||||
// The plans to whitelist upon a `Transaction::SubstrateBlock` being included on-chain.
|
||||
SubstrateBlockPlans: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> Vec<[u8; 32]>,
|
||||
|
||||
// The weight accumulated for a topic.
|
||||
AccumulatedWeight: (set: ValidatorSet, topic: Topic) -> u64,
|
||||
@@ -187,15 +211,21 @@ create_db!(
|
||||
}
|
||||
);
|
||||
|
||||
pub struct TributaryDb;
|
||||
db_channel!(
|
||||
CoordinatorTributary {
|
||||
ProcessorMessages: (set: ValidatorSet) -> messages::CoordinatorMessage,
|
||||
}
|
||||
);
|
||||
|
||||
pub(crate) struct TributaryDb;
|
||||
impl TributaryDb {
|
||||
pub fn last_handled_tributary_block(
|
||||
pub(crate) fn last_handled_tributary_block(
|
||||
getter: &impl Get,
|
||||
set: ValidatorSet,
|
||||
) -> Option<(u64, [u8; 32])> {
|
||||
LastHandledTributaryBlock::get(getter, set)
|
||||
}
|
||||
pub fn set_last_handled_tributary_block(
|
||||
pub(crate) fn set_last_handled_tributary_block(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
block_number: u64,
|
||||
@@ -204,33 +234,108 @@ impl TributaryDb {
|
||||
LastHandledTributaryBlock::set(txn, set, &(block_number, block_hash));
|
||||
}
|
||||
|
||||
pub fn recognize_topic(txn: &mut impl DbTxn, set: ValidatorSet, topic: Topic) {
|
||||
pub(crate) fn latest_substrate_block_to_cosign(
|
||||
getter: &impl Get,
|
||||
set: ValidatorSet,
|
||||
) -> Option<[u8; 32]> {
|
||||
LatestSubstrateBlockToCosign::get(getter, set)
|
||||
}
|
||||
pub(crate) fn set_latest_substrate_block_to_cosign(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
) {
|
||||
LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash);
|
||||
}
|
||||
pub(crate) fn actively_cosigning(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<[u8; 32]> {
|
||||
ActivelyCosigning::get(txn, set)
|
||||
}
|
||||
pub(crate) fn start_cosigning(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
substrate_block_number: u64,
|
||||
) {
|
||||
assert!(
|
||||
ActivelyCosigning::get(txn, set).is_none(),
|
||||
"starting cosigning while already cosigning"
|
||||
);
|
||||
ActivelyCosigning::set(txn, set, &substrate_block_hash);
|
||||
|
||||
TributaryDb::recognize_topic(
|
||||
txn,
|
||||
set,
|
||||
Topic::Sign {
|
||||
id: VariantSignId::Cosign(substrate_block_number),
|
||||
attempt: 0,
|
||||
round: SigningProtocolRound::Preprocess,
|
||||
},
|
||||
);
|
||||
}
|
||||
pub(crate) fn finish_cosigning(txn: &mut impl DbTxn, set: ValidatorSet) {
|
||||
assert!(ActivelyCosigning::take(txn, set).is_some(), "finished cosigning but not cosigning");
|
||||
}
|
||||
pub(crate) fn mark_cosigned(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
) {
|
||||
Cosigned::set(txn, set, substrate_block_hash, &());
|
||||
}
|
||||
pub(crate) fn cosigned(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
) -> bool {
|
||||
Cosigned::get(txn, set, substrate_block_hash).is_some()
|
||||
}
|
||||
|
||||
pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ValidatorSet, topic: Topic) {
|
||||
AccumulatedWeight::set(txn, set, topic, &0);
|
||||
}
|
||||
|
||||
pub fn start_of_block(txn: &mut impl DbTxn, set: ValidatorSet, block_number: u64) {
|
||||
pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ValidatorSet, block_number: u64) {
|
||||
for topic in Reattempt::take(txn, set, block_number).unwrap_or(vec![]) {
|
||||
// TODO: Slash all people who preprocessed but didn't share
|
||||
/*
|
||||
TODO: Slash all people who preprocessed but didn't share, and add a delay to their
|
||||
participations in future protocols. When we call accumulate, if the participant has no
|
||||
delay, their accumulation occurs immediately. Else, the accumulation occurs after the
|
||||
specified delay.
|
||||
|
||||
This means even if faulty validators are first to preprocess, they won't be selected for
|
||||
the signing set unless there's a lack of less faulty validators available.
|
||||
|
||||
We need to decrease this delay upon successful partipations, and set it to the maximum upon
|
||||
`f + 1` validators voting to fatally slash the validator in question. This won't issue the
|
||||
fatal slash but should still be effective.
|
||||
*/
|
||||
Self::recognize_topic(txn, set, topic);
|
||||
if let Some(id) = topic.sign_id(set) {
|
||||
Self::send_message(txn, set, messages::sign::CoordinatorMessage::Reattempt { id });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fatal_slash(
|
||||
pub(crate) fn fatal_slash(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
validator: SeraiAddress,
|
||||
reason: &str,
|
||||
) {
|
||||
log::warn!("{validator} fatally slashed: {reason}");
|
||||
SlashPoints::set(txn, set, validator, &u64::MAX);
|
||||
SlashPoints::set(txn, set, validator, &u32::MAX);
|
||||
}
|
||||
|
||||
pub fn is_fatally_slashed(getter: &impl Get, set: ValidatorSet, validator: SeraiAddress) -> bool {
|
||||
SlashPoints::get(getter, set, validator).unwrap_or(0) == u64::MAX
|
||||
pub(crate) fn is_fatally_slashed(
|
||||
getter: &impl Get,
|
||||
set: ValidatorSet,
|
||||
validator: SeraiAddress,
|
||||
) -> bool {
|
||||
SlashPoints::get(getter, set, validator).unwrap_or(0) == u32::MAX
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn accumulate<D: Borshy>(
|
||||
pub(crate) fn accumulate<D: Borshy>(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
validators: &[SeraiAddress],
|
||||
@@ -286,16 +391,17 @@ impl TributaryDb {
|
||||
// Check if we now cross the weight threshold
|
||||
if accumulated_weight >= topic.required_participation(total_weight) {
|
||||
// Queue this for re-attempt after enough time passes
|
||||
if let Some((attempt, reattempt_topic)) = topic.reattempt_topic() {
|
||||
let reattempt_topic = topic.reattempt_topic();
|
||||
if let Some((attempt, reattempt_topic)) = reattempt_topic {
|
||||
// 5 minutes
|
||||
#[cfg(not(feature = "longer-reattempts"))]
|
||||
const BASE_REATTEMPT_DELAY: u32 =
|
||||
(5u32 * 60 * 1000).div_ceil(tributary::tendermint::TARGET_BLOCK_TIME);
|
||||
(5u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME);
|
||||
|
||||
// 10 minutes, intended for latent environments like the GitHub CI
|
||||
#[cfg(feature = "longer-reattempts")]
|
||||
const BASE_REATTEMPT_DELAY: u32 =
|
||||
(10u32 * 60 * 1000).div_ceil(tributary::tendermint::TARGET_BLOCK_TIME);
|
||||
(10u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME);
|
||||
|
||||
// Linearly scale the time for the protocol with the attempt number
|
||||
let blocks_till_reattempt = u64::from(attempt * BASE_REATTEMPT_DELAY);
|
||||
@@ -316,15 +422,11 @@ impl TributaryDb {
|
||||
let mut data_set = HashMap::with_capacity(validators.len());
|
||||
for validator in validators {
|
||||
if let Some(data) = Accumulated::<D>::get(txn, set, topic, *validator) {
|
||||
// Clean this data up if there's not a succeeding topic
|
||||
// If there is, we wait as the succeeding topic checks our participation in this topic
|
||||
if succeeding_topic.is_none() {
|
||||
// Clean this data up if there's not a re-attempt topic
|
||||
// If there is a re-attempt topic, we clean it up upon re-attempt
|
||||
if reattempt_topic.is_none() {
|
||||
Accumulated::<D>::del(txn, set, topic, *validator);
|
||||
}
|
||||
// If this *was* the succeeding topic, clean up the preceding topic's data
|
||||
if let Some(preceding_topic) = preceding_topic {
|
||||
Accumulated::<D>::del(txn, set, preceding_topic, *validator);
|
||||
}
|
||||
data_set.insert(*validator, data);
|
||||
}
|
||||
}
|
||||
@@ -343,4 +445,12 @@ impl TributaryDb {
|
||||
DataSet::None
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn send_message(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
message: impl Into<messages::CoordinatorMessage>,
|
||||
) {
|
||||
ProcessorMessages::send(txn, set, &message.into());
|
||||
}
|
||||
}
|
||||
@@ -1,392 +1,584 @@
|
||||
use core::{marker::PhantomData, fmt::Debug};
|
||||
use std::{sync::Arc, io};
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use async_trait::async_trait;
|
||||
use core::{marker::PhantomData, future::Future};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use ciphersuite::group::GroupEncoding;
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
|
||||
use scale::Decode;
|
||||
use futures_channel::mpsc::UnboundedReceiver;
|
||||
use futures_util::{StreamExt, SinkExt};
|
||||
use ::tendermint::{
|
||||
ext::{BlockNumber, Commit, Block as BlockTrait, Network},
|
||||
SignedMessageFor, SyncedBlock, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,
|
||||
TendermintMachine, TendermintHandle,
|
||||
use serai_client::{
|
||||
primitives::SeraiAddress,
|
||||
validator_sets::primitives::{ValidatorSet, Slash},
|
||||
};
|
||||
|
||||
pub use ::tendermint::Evidence;
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use serai_db::Db;
|
||||
use tributary_sdk::{
|
||||
tendermint::{
|
||||
tx::{TendermintTx, Evidence, decode_signed_message},
|
||||
TendermintNetwork,
|
||||
},
|
||||
Signed as TributarySigned, TransactionKind, TransactionTrait,
|
||||
Transaction as TributaryTransaction, Block, TributaryReader, P2p,
|
||||
};
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
use serai_cosign::CosignIntent;
|
||||
use serai_coordinator_substrate::NewSetInformation;
|
||||
|
||||
mod merkle;
|
||||
pub(crate) use merkle::*;
|
||||
use messages::sign::VariantSignId;
|
||||
|
||||
pub mod transaction;
|
||||
pub use transaction::{TransactionError, Signed, TransactionKind, Transaction as TransactionTrait};
|
||||
mod transaction;
|
||||
pub use transaction::{SigningProtocolRound, Signed, Transaction};
|
||||
|
||||
use crate::tendermint::tx::TendermintTx;
|
||||
mod db;
|
||||
use db::*;
|
||||
|
||||
mod provided;
|
||||
pub(crate) use provided::*;
|
||||
pub use provided::ProvidedError;
|
||||
|
||||
mod block;
|
||||
pub use block::*;
|
||||
|
||||
mod blockchain;
|
||||
pub(crate) use blockchain::*;
|
||||
|
||||
mod mempool;
|
||||
pub(crate) use mempool::*;
|
||||
|
||||
pub mod tendermint;
|
||||
pub(crate) use crate::tendermint::*;
|
||||
|
||||
#[cfg(any(test, feature = "tests"))]
|
||||
pub mod tests;
|
||||
|
||||
/// Size limit for an individual transaction.
|
||||
// This needs to be big enough to participate in a 101-of-150 eVRF DKG with each element taking
|
||||
// `MAX_KEY_LEN`. This also needs to be big enough to pariticpate in signing 520 Bitcoin inputs
|
||||
// with 49 key shares, and signing 120 Monero inputs with 49 key shares.
|
||||
// TODO: Add a test for these properties
|
||||
pub const TRANSACTION_SIZE_LIMIT: usize = 2_000_000;
|
||||
/// Amount of transactions a single account may have in the mempool.
|
||||
pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
||||
/// Block size limit.
|
||||
// This targets a growth limit of roughly 30 GB a day, under load, in order to prevent a malicious
|
||||
// participant from flooding disks and causing out of space errors in order processes.
|
||||
pub const BLOCK_SIZE_LIMIT: usize = 2_001_000;
|
||||
|
||||
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
||||
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum Transaction<T: TransactionTrait> {
|
||||
Tendermint(TendermintTx),
|
||||
Application(T),
|
||||
}
|
||||
|
||||
impl<T: TransactionTrait> ReadWrite for Transaction<T> {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0];
|
||||
reader.read_exact(&mut kind)?;
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let tx = TendermintTx::read(reader)?;
|
||||
Ok(Transaction::Tendermint(tx))
|
||||
}
|
||||
1 => {
|
||||
let tx = T::read(reader)?;
|
||||
Ok(Transaction::Application(tx))
|
||||
}
|
||||
_ => Err(io::Error::other("invalid transaction type")),
|
||||
}
|
||||
}
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
Transaction::Tendermint(tx) => {
|
||||
writer.write_all(&[0])?;
|
||||
tx.write(writer)
|
||||
}
|
||||
Transaction::Application(tx) => {
|
||||
writer.write_all(&[1])?;
|
||||
tx.write(writer)
|
||||
}
|
||||
}
|
||||
/// Messages to send to the Processors.
|
||||
pub struct ProcessorMessages;
|
||||
impl ProcessorMessages {
|
||||
/// Try to receive a message to send to a Processor.
|
||||
pub fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<messages::CoordinatorMessage> {
|
||||
db::ProcessorMessages::try_recv(txn, set)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TransactionTrait> Transaction<T> {
|
||||
pub fn hash(&self) -> [u8; 32] {
|
||||
match self {
|
||||
Transaction::Tendermint(tx) => tx.hash(),
|
||||
Transaction::Application(tx) => tx.hash(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn kind(&self) -> TransactionKind {
|
||||
match self {
|
||||
Transaction::Tendermint(tx) => tx.kind(),
|
||||
Transaction::Application(tx) => tx.kind(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An item which can be read and written.
|
||||
pub trait ReadWrite: Sized {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;
|
||||
|
||||
fn serialize(&self) -> Vec<u8> {
|
||||
// BlockHeader is 64 bytes and likely the smallest item in this system
|
||||
let mut buf = Vec::with_capacity(64);
|
||||
self.write(&mut buf).unwrap();
|
||||
buf
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait P2p: 'static + Send + Sync + Clone + Debug {
|
||||
/// Broadcast a message to all other members of the Tributary with the specified genesis.
|
||||
/// The cosign intents.
|
||||
pub struct CosignIntents;
|
||||
impl CosignIntents {
|
||||
/// Provide a CosignIntent for this Tributary.
|
||||
///
|
||||
/// The Tributary will re-broadcast consensus messages on a fixed interval to ensure they aren't
|
||||
/// prematurely dropped from the P2P layer. THe P2P layer SHOULD perform content-based
|
||||
/// deduplication to ensure a sane amount of load.
|
||||
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>);
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<P: P2p> P2p for Arc<P> {
|
||||
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
|
||||
(*self).broadcast(genesis, msg).await
|
||||
/// This must be done before the associated `Transaction::Cosign` is provided.
|
||||
pub fn provide(txn: &mut impl DbTxn, set: ValidatorSet, intent: &CosignIntent) {
|
||||
db::CosignIntents::set(txn, set, intent.block_hash, intent);
|
||||
}
|
||||
fn take(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
) -> Option<CosignIntent> {
|
||||
db::CosignIntents::take(txn, set, substrate_block_hash)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Tributary<D: Db, T: TransactionTrait, P: P2p> {
|
||||
db: D,
|
||||
|
||||
genesis: [u8; 32],
|
||||
network: TendermintNetwork<D, T, P>,
|
||||
|
||||
synced_block: Arc<RwLock<SyncedBlockSender<TendermintNetwork<D, T, P>>>>,
|
||||
synced_block_result: Arc<RwLock<SyncedBlockResultReceiver>>,
|
||||
messages: Arc<RwLock<MessageSender<TendermintNetwork<D, T, P>>>>,
|
||||
/// The plans to whitelist upon a `Transaction::SubstrateBlock` being included on-chain.
|
||||
pub struct SubstrateBlockPlans;
|
||||
impl SubstrateBlockPlans {
|
||||
/// Set the plans to whitelist upon the associated `Transaction::SubstrateBlock` being included
|
||||
/// on-chain.
|
||||
///
|
||||
/// This must be done before the associated `Transaction::Cosign` is provided.
|
||||
pub fn set(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
plans: &Vec<[u8; 32]>,
|
||||
) {
|
||||
db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, &plans);
|
||||
}
|
||||
fn take(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
) -> Option<Vec<[u8; 32]>> {
|
||||
db::SubstrateBlockPlans::take(txn, set, substrate_block_hash)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
pub async fn new(
|
||||
db: D,
|
||||
genesis: [u8; 32],
|
||||
start_time: u64,
|
||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,
|
||||
p2p: P,
|
||||
) -> Option<Self> {
|
||||
log::info!("new Tributary with genesis {}", hex::encode(genesis));
|
||||
struct ScanBlock<'a, TD: Db, TDT: DbTxn, P: P2p> {
|
||||
_td: PhantomData<TD>,
|
||||
_p2p: PhantomData<P>,
|
||||
tributary_txn: &'a mut TDT,
|
||||
set: ValidatorSet,
|
||||
validators: &'a [SeraiAddress],
|
||||
total_weight: u64,
|
||||
validator_weights: &'a HashMap<SeraiAddress, u64>,
|
||||
}
|
||||
impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
||||
fn potentially_start_cosign(&mut self) {
|
||||
// Don't start a new cosigning instance if we're actively running one
|
||||
if TributaryDb::actively_cosigning(self.tributary_txn, self.set).is_some() {
|
||||
return;
|
||||
}
|
||||
|
||||
let validators_vec = validators.iter().map(|validator| validator.0).collect::<Vec<_>>();
|
||||
|
||||
let signer = Arc::new(Signer::new(genesis, key));
|
||||
let validators = Arc::new(Validators::new(genesis, validators)?);
|
||||
|
||||
let mut blockchain = Blockchain::new(db.clone(), genesis, &validators_vec);
|
||||
let block_number = BlockNumber(blockchain.block_number());
|
||||
|
||||
let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) {
|
||||
Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time
|
||||
} else {
|
||||
start_time
|
||||
// Fetch the latest intended-to-be-cosigned block
|
||||
let Some(latest_substrate_block_to_cosign) =
|
||||
TributaryDb::latest_substrate_block_to_cosign(self.tributary_txn, self.set)
|
||||
else {
|
||||
return;
|
||||
};
|
||||
let proposal = TendermintBlock(
|
||||
blockchain.build_block::<TendermintNetwork<D, T, P>>(&validators).serialize(),
|
||||
|
||||
// If it was already cosigned, return
|
||||
if TributaryDb::cosigned(self.tributary_txn, self.set, latest_substrate_block_to_cosign) {
|
||||
return;
|
||||
}
|
||||
|
||||
let intent =
|
||||
CosignIntents::take(self.tributary_txn, self.set, latest_substrate_block_to_cosign)
|
||||
.expect("Transaction::Cosign locally provided but CosignIntents wasn't populated");
|
||||
assert_eq!(
|
||||
intent.block_hash, latest_substrate_block_to_cosign,
|
||||
"provided CosignIntent wasn't saved by its block hash"
|
||||
);
|
||||
let blockchain = Arc::new(RwLock::new(blockchain));
|
||||
|
||||
let network = TendermintNetwork { genesis, signer, validators, blockchain, p2p };
|
||||
|
||||
let TendermintHandle { synced_block, synced_block_result, messages, machine } =
|
||||
TendermintMachine::new(
|
||||
db.clone(),
|
||||
network.clone(),
|
||||
genesis,
|
||||
block_number,
|
||||
start_time,
|
||||
proposal,
|
||||
)
|
||||
.await;
|
||||
tokio::spawn(machine.run());
|
||||
|
||||
Some(Self {
|
||||
db,
|
||||
genesis,
|
||||
network,
|
||||
synced_block: Arc::new(RwLock::new(synced_block)),
|
||||
synced_block_result: Arc::new(RwLock::new(synced_block_result)),
|
||||
messages: Arc::new(RwLock::new(messages)),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn block_time() -> u32 {
|
||||
TendermintNetwork::<D, T, P>::block_time()
|
||||
}
|
||||
|
||||
pub fn genesis(&self) -> [u8; 32] {
|
||||
self.genesis
|
||||
}
|
||||
|
||||
pub async fn block_number(&self) -> u64 {
|
||||
self.network.blockchain.read().await.block_number()
|
||||
}
|
||||
pub async fn tip(&self) -> [u8; 32] {
|
||||
self.network.blockchain.read().await.tip()
|
||||
}
|
||||
|
||||
pub fn reader(&self) -> TributaryReader<D, T> {
|
||||
TributaryReader(self.db.clone(), self.genesis, PhantomData)
|
||||
}
|
||||
|
||||
pub async fn provide_transaction(&self, tx: T) -> Result<(), ProvidedError> {
|
||||
self.network.blockchain.write().await.provide_transaction(tx)
|
||||
}
|
||||
|
||||
pub async fn next_nonce(
|
||||
&self,
|
||||
signer: &<Ristretto as Ciphersuite>::G,
|
||||
order: &[u8],
|
||||
) -> Option<u32> {
|
||||
self.network.blockchain.read().await.next_nonce(signer, order)
|
||||
}
|
||||
|
||||
// Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error.
|
||||
// Safe to be &self since the only meaningful usage of self is self.network.blockchain which
|
||||
// successfully acquires its own write lock
|
||||
pub async fn add_transaction(&self, tx: T) -> Result<bool, TransactionError> {
|
||||
let tx = Transaction::Application(tx);
|
||||
let mut to_broadcast = vec![TRANSACTION_MESSAGE];
|
||||
tx.write(&mut to_broadcast).unwrap();
|
||||
let res = self.network.blockchain.write().await.add_transaction::<TendermintNetwork<D, T, P>>(
|
||||
true,
|
||||
tx,
|
||||
&self.network.signature_scheme(),
|
||||
// Mark us as actively cosigning
|
||||
TributaryDb::start_cosigning(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
latest_substrate_block_to_cosign,
|
||||
intent.block_number,
|
||||
);
|
||||
// Send the message for the processor to start signing
|
||||
TributaryDb::send_message(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
|
||||
session: self.set.session,
|
||||
intent,
|
||||
},
|
||||
);
|
||||
if res == Ok(true) {
|
||||
self.network.p2p.broadcast(self.genesis, to_broadcast).await;
|
||||
}
|
||||
res
|
||||
}
|
||||
fn handle_application_tx(&mut self, block_number: u64, tx: Transaction) {
|
||||
let signer = |signed: Signed| SeraiAddress(signed.signer().to_bytes());
|
||||
|
||||
async fn sync_block_internal(
|
||||
&self,
|
||||
block: Block<T>,
|
||||
commit: Vec<u8>,
|
||||
result: &mut UnboundedReceiver<bool>,
|
||||
) -> bool {
|
||||
let (tip, block_number) = {
|
||||
let blockchain = self.network.blockchain.read().await;
|
||||
(blockchain.tip(), blockchain.block_number())
|
||||
};
|
||||
|
||||
if block.header.parent != tip {
|
||||
log::debug!("told to sync a block whose parent wasn't our tip");
|
||||
return false;
|
||||
if let TransactionKind::Signed(_, TributarySigned { signer, .. }) = tx.kind() {
|
||||
// Don't handle transactions from those fatally slashed
|
||||
// TODO: The fact they can publish these TXs makes this a notable spam vector
|
||||
if TributaryDb::is_fatally_slashed(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
SeraiAddress(signer.to_bytes()),
|
||||
) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let block = TendermintBlock(block.serialize());
|
||||
let mut commit_ref = commit.as_ref();
|
||||
let Ok(commit) = Commit::<Arc<Validators>>::decode(&mut commit_ref) else {
|
||||
log::error!("sent an invalidly serialized commit");
|
||||
return false;
|
||||
};
|
||||
// Storage DoS vector. We *could* truncate to solely the relevant portion, trying to save this,
|
||||
// yet then we'd have to test the truncation was performed correctly.
|
||||
if !commit_ref.is_empty() {
|
||||
log::error!("sent an commit with additional data after it");
|
||||
return false;
|
||||
}
|
||||
if !self.network.verify_commit(block.id(), &commit) {
|
||||
log::error!("sent an invalid commit");
|
||||
return false;
|
||||
}
|
||||
match tx {
|
||||
// Accumulate this vote and fatally slash the participant if past the threshold
|
||||
Transaction::RemoveParticipant { participant, signed } => {
|
||||
let signer = signer(signed);
|
||||
|
||||
let number = BlockNumber(block_number + 1);
|
||||
self.synced_block.write().await.send(SyncedBlock { number, block, commit }).await.unwrap();
|
||||
result.next().await.unwrap()
|
||||
}
|
||||
|
||||
// Sync a block.
|
||||
// TODO: Since we have a static validator set, we should only need the tail commit?
|
||||
pub async fn sync_block(&self, block: Block<T>, commit: Vec<u8>) -> bool {
|
||||
let mut result = self.synced_block_result.write().await;
|
||||
self.sync_block_internal(block, commit, &mut result).await
|
||||
}
|
||||
|
||||
// Return true if the message should be rebroadcasted.
|
||||
pub async fn handle_message(&self, msg: &[u8]) -> bool {
|
||||
match msg.first() {
|
||||
Some(&TRANSACTION_MESSAGE) => {
|
||||
let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {
|
||||
log::error!("received invalid transaction message");
|
||||
return false;
|
||||
};
|
||||
|
||||
// TODO: Sync mempools with fellow peers
|
||||
// Can we just rebroadcast transactions not included for at least two blocks?
|
||||
let res =
|
||||
self.network.blockchain.write().await.add_transaction::<TendermintNetwork<D, T, P>>(
|
||||
false,
|
||||
tx,
|
||||
&self.network.signature_scheme(),
|
||||
// Check the participant voted to be removed actually exists
|
||||
if !self.validators.iter().any(|validator| *validator == participant) {
|
||||
TributaryDb::fatal_slash(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
signer,
|
||||
"voted to remove non-existent participant",
|
||||
);
|
||||
log::debug!("received transaction message. valid new transaction: {res:?}");
|
||||
res == Ok(true)
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
Some(&TENDERMINT_MESSAGE) => {
|
||||
let Ok(msg) =
|
||||
SignedMessageFor::<TendermintNetwork<D, T, P>>::decode::<&[u8]>(&mut &msg[1 ..])
|
||||
else {
|
||||
log::error!("received invalid tendermint message");
|
||||
return false;
|
||||
match TributaryDb::accumulate(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
self.validators,
|
||||
self.total_weight,
|
||||
block_number,
|
||||
Topic::RemoveParticipant { participant },
|
||||
signer,
|
||||
self.validator_weights[&signer],
|
||||
&(),
|
||||
) {
|
||||
DataSet::None => {}
|
||||
DataSet::Participating(_) => {
|
||||
TributaryDb::fatal_slash(self.tributary_txn, self.set, participant, "voted to remove");
|
||||
}
|
||||
};
|
||||
|
||||
self.messages.write().await.send(msg).await.unwrap();
|
||||
false
|
||||
}
|
||||
|
||||
_ => false,
|
||||
// Send the participation to the processor
|
||||
Transaction::DkgParticipation { participation, signed } => {
|
||||
TributaryDb::send_message(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
messages::key_gen::CoordinatorMessage::Participation {
|
||||
session: self.set.session,
|
||||
participant: todo!("TODO"),
|
||||
participation,
|
||||
},
|
||||
);
|
||||
}
|
||||
Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed } => {
|
||||
// Accumulate the preprocesses into our own FROST attempt manager
|
||||
todo!("TODO")
|
||||
}
|
||||
Transaction::DkgConfirmationShare { attempt, share, signed } => {
|
||||
// Accumulate the shares into our own FROST attempt manager
|
||||
todo!("TODO: SetKeysTask")
|
||||
}
|
||||
|
||||
Transaction::Cosign { substrate_block_hash } => {
|
||||
// Update the latest intended-to-be-cosigned Substrate block
|
||||
TributaryDb::set_latest_substrate_block_to_cosign(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
substrate_block_hash,
|
||||
);
|
||||
// Start a new cosign if we aren't already working on one
|
||||
self.potentially_start_cosign();
|
||||
}
|
||||
Transaction::Cosigned { substrate_block_hash } => {
|
||||
/*
|
||||
We provide one Cosigned per Cosign transaction, but they have independent orders. This
|
||||
means we may receive Cosigned before Cosign. In order to ensure we only start work on
|
||||
not-yet-Cosigned cosigns, we flag all cosigned blocks as cosigned. Then, when we choose
|
||||
the next block to work on, we won't if it's already been cosigned.
|
||||
*/
|
||||
TributaryDb::mark_cosigned(self.tributary_txn, self.set, substrate_block_hash);
|
||||
|
||||
// If we aren't actively cosigning this block, return
|
||||
// This occurs when we have Cosign TXs A, B, C, we received Cosigned for A and start on C,
|
||||
// and then receive Cosigned for B
|
||||
if TributaryDb::actively_cosigning(self.tributary_txn, self.set) !=
|
||||
Some(substrate_block_hash)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Since this is the block we were cosigning, mark us as having finished cosigning
|
||||
TributaryDb::finish_cosigning(self.tributary_txn, self.set);
|
||||
|
||||
// Start working on the next cosign
|
||||
self.potentially_start_cosign();
|
||||
}
|
||||
Transaction::SubstrateBlock { hash } => {
|
||||
// Whitelist all of the IDs this Substrate block causes to be signed
|
||||
let plans = SubstrateBlockPlans::take(self.tributary_txn, self.set, hash).expect(
|
||||
"Transaction::SubstrateBlock locally provided but SubstrateBlockPlans wasn't populated",
|
||||
);
|
||||
for plan in plans {
|
||||
TributaryDb::recognize_topic(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
Topic::Sign {
|
||||
id: VariantSignId::Transaction(plan),
|
||||
attempt: 0,
|
||||
round: SigningProtocolRound::Preprocess,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
Transaction::Batch { hash } => {
|
||||
// Whitelist the signing of this batch
|
||||
TributaryDb::recognize_topic(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
Topic::Sign {
|
||||
id: VariantSignId::Batch(hash),
|
||||
attempt: 0,
|
||||
round: SigningProtocolRound::Preprocess,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
Transaction::SlashReport { slash_points, signed } => {
|
||||
let signer = signer(signed);
|
||||
|
||||
if slash_points.len() != self.validators.len() {
|
||||
TributaryDb::fatal_slash(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
signer,
|
||||
"slash report was for a distinct amount of signers",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Accumulate, and if past the threshold, calculate *the* slash report and start signing it
|
||||
match TributaryDb::accumulate(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
self.validators,
|
||||
self.total_weight,
|
||||
block_number,
|
||||
Topic::SlashReport,
|
||||
signer,
|
||||
self.validator_weights[&signer],
|
||||
&slash_points,
|
||||
) {
|
||||
DataSet::None => {}
|
||||
DataSet::Participating(data_set) => {
|
||||
// Find the median reported slashes for this validator
|
||||
/*
|
||||
TODO: This lets 34% perform a fatal slash. That shouldn't be allowed. We need
|
||||
to accept slash reports for a period past the threshold, and only fatally slash if we
|
||||
have a supermajority agree the slash should be fatal. If there isn't a supermajority,
|
||||
but the median believe the slash should be fatal, we need to fallback to a large
|
||||
constant.
|
||||
|
||||
Also, TODO, each slash point should probably be considered as
|
||||
`MAX_KEY_SHARES_PER_SET * BLOCK_TIME` seconds of downtime. As this time crosses
|
||||
various thresholds (1 day, 3 days, etc), a multiplier should be attached.
|
||||
*/
|
||||
let mut median_slash_report = Vec::with_capacity(self.validators.len());
|
||||
for i in 0 .. self.validators.len() {
|
||||
let mut this_validator =
|
||||
data_set.values().map(|report| report[i]).collect::<Vec<_>>();
|
||||
this_validator.sort_unstable();
|
||||
// Choose the median, where if there are two median values, the lower one is chosen
|
||||
let median_index = if (this_validator.len() % 2) == 1 {
|
||||
this_validator.len() / 2
|
||||
} else {
|
||||
(this_validator.len() / 2) - 1
|
||||
};
|
||||
median_slash_report.push(this_validator[median_index]);
|
||||
}
|
||||
|
||||
// We only publish slashes for the `f` worst performers to:
|
||||
// 1) Effect amnesty if there were network disruptions which affected everyone
|
||||
// 2) Ensure the signing threshold doesn't have a disincentive to do their job
|
||||
|
||||
// Find the worst performer within the signing threshold's slash points
|
||||
let f = (self.validators.len() - 1) / 3;
|
||||
let worst_validator_in_supermajority_slash_points = {
|
||||
let mut sorted_slash_points = median_slash_report.clone();
|
||||
sorted_slash_points.sort_unstable();
|
||||
// This won't be a valid index if `f == 0`, which means we don't have any validators
|
||||
// to slash
|
||||
let index_of_first_validator_to_slash = self.validators.len() - f;
|
||||
let index_of_worst_validator_in_supermajority = index_of_first_validator_to_slash - 1;
|
||||
sorted_slash_points[index_of_worst_validator_in_supermajority]
|
||||
};
|
||||
|
||||
// Perform the amortization
|
||||
for slash_points in &mut median_slash_report {
|
||||
*slash_points =
|
||||
slash_points.saturating_sub(worst_validator_in_supermajority_slash_points)
|
||||
}
|
||||
let amortized_slash_report = median_slash_report;
|
||||
|
||||
// Create the resulting slash report
|
||||
let mut slash_report = vec![];
|
||||
for (validator, points) in self.validators.iter().copied().zip(amortized_slash_report) {
|
||||
// TODO: Natively store this as a `Slash`
|
||||
if points == u32::MAX {
|
||||
slash_report.push(Slash::Fatal);
|
||||
} else {
|
||||
slash_report.push(Slash::Points(points));
|
||||
}
|
||||
}
|
||||
assert!(slash_report.len() <= f);
|
||||
|
||||
// Recognize the topic for signing the slash report
|
||||
TributaryDb::recognize_topic(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
Topic::Sign {
|
||||
id: VariantSignId::SlashReport,
|
||||
attempt: 0,
|
||||
round: SigningProtocolRound::Preprocess,
|
||||
},
|
||||
);
|
||||
// Send the message for the processor to start signing
|
||||
TributaryDb::send_message(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
messages::coordinator::CoordinatorMessage::SignSlashReport {
|
||||
session: self.set.session,
|
||||
report: slash_report,
|
||||
},
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Transaction::Sign { id, attempt, round, data, signed } => {
|
||||
let topic = Topic::Sign { id, attempt, round };
|
||||
let signer = signer(signed);
|
||||
|
||||
if u64::try_from(data.len()).unwrap() != self.validator_weights[&signer] {
|
||||
TributaryDb::fatal_slash(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
signer,
|
||||
"signer signed with a distinct amount of key shares than they had key shares",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
match TributaryDb::accumulate(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
self.validators,
|
||||
self.total_weight,
|
||||
block_number,
|
||||
topic,
|
||||
signer,
|
||||
self.validator_weights[&signer],
|
||||
&data,
|
||||
) {
|
||||
DataSet::None => {}
|
||||
DataSet::Participating(data_set) => {
|
||||
let id = topic.sign_id(self.set).expect("Topic::Sign didn't have SignId");
|
||||
let flatten_data_set = |data_set| todo!("TODO");
|
||||
let data_set = flatten_data_set(data_set);
|
||||
TributaryDb::send_message(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
match round {
|
||||
SigningProtocolRound::Preprocess => {
|
||||
messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set }
|
||||
}
|
||||
SigningProtocolRound::Share => {
|
||||
messages::sign::CoordinatorMessage::Shares { id, shares: data_set }
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a Future which will resolve once the next block has been added.
|
||||
pub async fn next_block_notification(
|
||||
&self,
|
||||
) -> impl Send + Sync + core::future::Future<Output = Result<(), impl Send + Sync>> {
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
self.network.blockchain.write().await.next_block_notifications.push_back(tx);
|
||||
rx
|
||||
fn handle_block(mut self, block_number: u64, block: Block<Transaction>) {
|
||||
TributaryDb::start_of_block(self.tributary_txn, self.set, block_number);
|
||||
|
||||
for tx in block.transactions {
|
||||
match tx {
|
||||
TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => {
|
||||
// Since the evidence is on the chain, it will have already been validated
|
||||
// We can just punish the signer
|
||||
let data = match ev {
|
||||
Evidence::ConflictingMessages(first, second) => (first, Some(second)),
|
||||
Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None),
|
||||
};
|
||||
let msgs = (
|
||||
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(&data.0).unwrap(),
|
||||
if data.1.is_some() {
|
||||
Some(
|
||||
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(&data.1.unwrap())
|
||||
.unwrap(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
);
|
||||
|
||||
// Since anything with evidence is fundamentally faulty behavior, not just temporal
|
||||
// errors, mark the node as fatally slashed
|
||||
TributaryDb::fatal_slash(
|
||||
self.tributary_txn,
|
||||
self.set,
|
||||
SeraiAddress(msgs.0.msg.sender),
|
||||
&format!("invalid tendermint messages: {msgs:?}"),
|
||||
);
|
||||
}
|
||||
TributaryTransaction::Application(tx) => {
|
||||
self.handle_application_tx(block_number, tx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TributaryReader<D: Db, T: TransactionTrait>(D, [u8; 32], PhantomData<T>);
|
||||
impl<D: Db, T: TransactionTrait> TributaryReader<D, T> {
|
||||
pub fn genesis(&self) -> [u8; 32] {
|
||||
self.1
|
||||
}
|
||||
/// The task to scan the Tributary, populating `ProcessorMessages`.
|
||||
pub struct ScanTributaryTask<TD: Db, P: P2p> {
|
||||
tributary_db: TD,
|
||||
set: ValidatorSet,
|
||||
validators: Vec<SeraiAddress>,
|
||||
total_weight: u64,
|
||||
validator_weights: HashMap<SeraiAddress, u64>,
|
||||
tributary: TributaryReader<TD, Transaction>,
|
||||
_p2p: PhantomData<P>,
|
||||
}
|
||||
|
||||
// Since these values are static once set, they can be safely read from the database without lock
|
||||
// acquisition
|
||||
pub fn block(&self, hash: &[u8; 32]) -> Option<Block<T>> {
|
||||
Blockchain::<D, T>::block_from_db(&self.0, self.1, hash)
|
||||
}
|
||||
pub fn commit(&self, hash: &[u8; 32]) -> Option<Vec<u8>> {
|
||||
Blockchain::<D, T>::commit_from_db(&self.0, self.1, hash)
|
||||
}
|
||||
pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option<Commit<Validators>> {
|
||||
self.commit(hash).map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap())
|
||||
}
|
||||
pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> {
|
||||
Blockchain::<D, T>::block_after(&self.0, self.1, hash)
|
||||
}
|
||||
pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> {
|
||||
self
|
||||
.commit(hash)
|
||||
.map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time)
|
||||
}
|
||||
impl<TD: Db, P: P2p> ScanTributaryTask<TD, P> {
|
||||
/// Create a new instance of this task.
|
||||
pub fn new(
|
||||
tributary_db: TD,
|
||||
new_set: &NewSetInformation,
|
||||
tributary: TributaryReader<TD, Transaction>,
|
||||
) -> Self {
|
||||
let mut validators = Vec::with_capacity(new_set.validators.len());
|
||||
let mut total_weight = 0;
|
||||
let mut validator_weights = HashMap::with_capacity(new_set.validators.len());
|
||||
for (validator, weight) in new_set.validators.iter().copied() {
|
||||
let validator = SeraiAddress::from(validator);
|
||||
let weight = u64::from(weight);
|
||||
validators.push(validator);
|
||||
total_weight += weight;
|
||||
validator_weights.insert(validator, weight);
|
||||
}
|
||||
|
||||
pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool {
|
||||
Blockchain::<D, T>::locally_provided_txs_in_block(&self.0, &self.1, hash, order)
|
||||
}
|
||||
|
||||
// This isn't static, yet can be read with only minor discrepancy risks
|
||||
pub fn tip(&self) -> [u8; 32] {
|
||||
Blockchain::<D, T>::tip_from_db(&self.0, self.1)
|
||||
ScanTributaryTask {
|
||||
tributary_db,
|
||||
set: new_set.set,
|
||||
validators,
|
||||
total_weight,
|
||||
validator_weights,
|
||||
tributary,
|
||||
_p2p: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let (mut last_block_number, mut last_block_hash) =
|
||||
TributaryDb::last_handled_tributary_block(&self.tributary_db, self.set)
|
||||
.unwrap_or((0, self.tributary.genesis()));
|
||||
|
||||
let mut made_progress = false;
|
||||
while let Some(next) = self.tributary.block_after(&last_block_hash) {
|
||||
let block = self.tributary.block(&next).unwrap();
|
||||
let block_number = last_block_number + 1;
|
||||
let block_hash = block.hash();
|
||||
|
||||
// Make sure we have all of the provided transactions for this block
|
||||
for tx in &block.transactions {
|
||||
let TransactionKind::Provided(order) = tx.kind() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// make sure we have all the provided txs in this block locally
|
||||
if !self.tributary.locally_provided_txs_in_block(&block_hash, order) {
|
||||
return Err(format!(
|
||||
"didn't have the provided Transactions on-chain for set (ephemeral error): {:?}",
|
||||
self.set
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let mut tributary_txn = self.tributary_db.txn();
|
||||
(ScanBlock {
|
||||
_td: PhantomData::<TD>,
|
||||
_p2p: PhantomData::<P>,
|
||||
tributary_txn: &mut tributary_txn,
|
||||
set: self.set,
|
||||
validators: &self.validators,
|
||||
total_weight: self.total_weight,
|
||||
validator_weights: &self.validator_weights,
|
||||
})
|
||||
.handle_block(block_number, block);
|
||||
TributaryDb::set_last_handled_tributary_block(
|
||||
&mut tributary_txn,
|
||||
self.set,
|
||||
block_number,
|
||||
block_hash,
|
||||
);
|
||||
last_block_number = block_number;
|
||||
last_block_hash = block_hash;
|
||||
tributary_txn.commit();
|
||||
|
||||
made_progress = true;
|
||||
}
|
||||
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create the Transaction::SlashReport to publish per the local view.
|
||||
pub fn slash_report_transaction(getter: &impl Get, set: &NewSetInformation) -> Transaction {
|
||||
let mut slash_points = Vec::with_capacity(set.validators.len());
|
||||
for (validator, _weight) in set.validators.iter().copied() {
|
||||
let validator = SeraiAddress::from(validator);
|
||||
slash_points.push(SlashPoints::get(getter, set.set, validator).unwrap_or(0));
|
||||
}
|
||||
Transaction::SlashReport { slash_points, signed: Signed::default() }
|
||||
}
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
pub use crate::P2p;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DummyP2p;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl P2p for DummyP2p {
|
||||
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
@@ -1,218 +1,365 @@
|
||||
use core::fmt::Debug;
|
||||
use core::{ops::Deref, fmt::Debug};
|
||||
use std::io;
|
||||
|
||||
use zeroize::Zeroize;
|
||||
use thiserror::Error;
|
||||
|
||||
use blake2::{Digest, Blake2b512};
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use blake2::{digest::typenum::U32, Digest, Blake2b};
|
||||
use ciphersuite::{
|
||||
group::{Group, GroupEncoding},
|
||||
group::{ff::Field, Group, GroupEncoding},
|
||||
Ciphersuite, Ristretto,
|
||||
};
|
||||
use schnorr::SchnorrSignature;
|
||||
|
||||
use crate::{TRANSACTION_SIZE_LIMIT, ReadWrite};
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Error)]
|
||||
pub enum TransactionError {
|
||||
/// Transaction exceeded the size limit.
|
||||
#[error("transaction is too large")]
|
||||
TooLargeTransaction,
|
||||
/// Transaction's signer isn't a participant.
|
||||
#[error("invalid signer")]
|
||||
InvalidSigner,
|
||||
/// Transaction's nonce isn't the prior nonce plus one.
|
||||
#[error("invalid nonce")]
|
||||
InvalidNonce,
|
||||
/// Transaction's signature is invalid.
|
||||
#[error("invalid signature")]
|
||||
InvalidSignature,
|
||||
/// Transaction's content is invalid.
|
||||
#[error("transaction content is invalid")]
|
||||
InvalidContent,
|
||||
/// Transaction's signer has too many transactions in the mempool.
|
||||
#[error("signer has too many transactions in the mempool")]
|
||||
TooManyInMempool,
|
||||
/// Provided Transaction added to mempool.
|
||||
#[error("provided transaction added to mempool")]
|
||||
ProvidedAddedToMempool,
|
||||
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::MAX_KEY_SHARES_PER_SET};
|
||||
|
||||
use messages::sign::VariantSignId;
|
||||
|
||||
use tributary_sdk::{
|
||||
ReadWrite,
|
||||
transaction::{
|
||||
Signed as TributarySigned, TransactionError, TransactionKind, Transaction as TransactionTrait,
|
||||
},
|
||||
};
|
||||
|
||||
/// The round this data is for, within a signing protocol.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||
pub enum SigningProtocolRound {
|
||||
/// A preprocess.
|
||||
Preprocess,
|
||||
/// A signature share.
|
||||
Share,
|
||||
}
|
||||
|
||||
/// Data for a signed transaction.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Signed {
|
||||
pub signer: <Ristretto as Ciphersuite>::G,
|
||||
pub nonce: u32,
|
||||
pub signature: SchnorrSignature<Ristretto>,
|
||||
}
|
||||
|
||||
impl ReadWrite for Signed {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let signer = Ristretto::read_G(reader)?;
|
||||
|
||||
let mut nonce = [0; 4];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
let nonce = u32::from_le_bytes(nonce);
|
||||
if nonce >= (u32::MAX - 1) {
|
||||
Err(io::Error::other("nonce exceeded limit"))?;
|
||||
impl SigningProtocolRound {
|
||||
fn nonce(&self) -> u32 {
|
||||
match self {
|
||||
SigningProtocolRound::Preprocess => 0,
|
||||
SigningProtocolRound::Share => 1,
|
||||
}
|
||||
|
||||
let mut signature = SchnorrSignature::<Ristretto>::read(reader)?;
|
||||
if signature.R.is_identity().into() {
|
||||
// Anyone malicious could remove this and try to find zero signatures
|
||||
// We should never produce zero signatures though meaning this should never come up
|
||||
// If it does somehow come up, this is a decent courtesy
|
||||
signature.zeroize();
|
||||
Err(io::Error::other("signature nonce was identity"))?;
|
||||
}
|
||||
|
||||
Ok(Signed { signer, nonce, signature })
|
||||
}
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
// This is either an invalid signature or a private key leak
|
||||
if self.signature.R.is_identity().into() {
|
||||
Err(io::Error::other("signature nonce was identity"))?;
|
||||
}
|
||||
writer.write_all(&self.signer.to_bytes())?;
|
||||
writer.write_all(&self.nonce.to_le_bytes())?;
|
||||
/// `tributary::Signed` but without the nonce.
|
||||
///
|
||||
/// All of our nonces are deterministic to the type of transaction and fields within.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Signed {
|
||||
/// The signer.
|
||||
signer: <Ristretto as Ciphersuite>::G,
|
||||
/// The signature.
|
||||
signature: SchnorrSignature<Ristretto>,
|
||||
}
|
||||
|
||||
impl BorshSerialize for Signed {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> Result<(), io::Error> {
|
||||
writer.write_all(self.signer.to_bytes().as_ref())?;
|
||||
self.signature.write(writer)
|
||||
}
|
||||
}
|
||||
impl BorshDeserialize for Signed {
|
||||
fn deserialize_reader<R: io::Read>(reader: &mut R) -> Result<Self, io::Error> {
|
||||
let signer = Ristretto::read_G(reader)?;
|
||||
let signature = SchnorrSignature::read(reader)?;
|
||||
Ok(Self { signer, signature })
|
||||
}
|
||||
}
|
||||
|
||||
impl Signed {
|
||||
pub fn read_without_nonce<R: io::Read>(reader: &mut R, nonce: u32) -> io::Result<Self> {
|
||||
let signer = Ristretto::read_G(reader)?;
|
||||
|
||||
let mut signature = SchnorrSignature::<Ristretto>::read(reader)?;
|
||||
if signature.R.is_identity().into() {
|
||||
// Anyone malicious could remove this and try to find zero signatures
|
||||
// We should never produce zero signatures though meaning this should never come up
|
||||
// If it does somehow come up, this is a decent courtesy
|
||||
signature.zeroize();
|
||||
Err(io::Error::other("signature nonce was identity"))?;
|
||||
}
|
||||
|
||||
Ok(Signed { signer, nonce, signature })
|
||||
/// Fetch the signer.
|
||||
pub(crate) fn signer(&self) -> <Ristretto as Ciphersuite>::G {
|
||||
self.signer
|
||||
}
|
||||
|
||||
pub fn write_without_nonce<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
// This is either an invalid signature or a private key leak
|
||||
if self.signature.R.is_identity().into() {
|
||||
Err(io::Error::other("signature nonce was identity"))?;
|
||||
}
|
||||
writer.write_all(&self.signer.to_bytes())?;
|
||||
self.signature.write(writer)
|
||||
/// Provide a nonce to convert a `Signed` into a `tributary::Signed`.
|
||||
fn to_tributary_signed(self, nonce: u32) -> TributarySigned {
|
||||
TributarySigned { signer: self.signer, nonce, signature: self.signature }
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum TransactionKind {
|
||||
/// This transaction should be provided by every validator, in an exact order.
|
||||
///
|
||||
/// The contained static string names the orderer to use. This allows two distinct provided
|
||||
/// transaction kinds, without a synchronized order, to be ordered within their own kind without
|
||||
/// requiring ordering with each other.
|
||||
///
|
||||
/// The only malleability is in when this transaction appears on chain. The block producer will
|
||||
/// include it when they have it. Block verification will fail for validators without it.
|
||||
///
|
||||
/// If a supermajority of validators produce a commit for a block with a provided transaction
|
||||
/// which isn't locally held, the block will be added to the local chain. When the transaction is
|
||||
/// locally provided, it will be compared for correctness to the on-chain version
|
||||
///
|
||||
/// In order to ensure TXs aren't accidentally provided multiple times, all provided transactions
|
||||
/// must have a unique hash which is also unique to all Unsigned transactions.
|
||||
Provided(&'static str),
|
||||
|
||||
/// An unsigned transaction, only able to be included by the block producer.
|
||||
///
|
||||
/// Once an Unsigned transaction is included on-chain, it may not be included again. In order to
|
||||
/// have multiple Unsigned transactions with the same values included on-chain, some distinct
|
||||
/// nonce must be included in order to cause a distinct hash.
|
||||
///
|
||||
/// The hash must also be unique with all Provided transactions.
|
||||
Unsigned,
|
||||
|
||||
/// A signed transaction.
|
||||
Signed(Vec<u8>, Signed),
|
||||
impl Default for Signed {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
signer: <Ristretto as Ciphersuite>::G::identity(),
|
||||
signature: SchnorrSignature {
|
||||
R: <Ristretto as Ciphersuite>::G::identity(),
|
||||
s: <Ristretto as Ciphersuite>::F::ZERO,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Should this be renamed TransactionTrait now that a literal Transaction exists?
|
||||
// Or should the literal Transaction be renamed to Event?
|
||||
pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite {
|
||||
/// Return what type of transaction this is.
|
||||
fn kind(&self) -> TransactionKind;
|
||||
/// The Tributary transaction definition used by Serai
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum Transaction {
|
||||
/// A vote to remove a participant for invalid behavior
|
||||
RemoveParticipant {
|
||||
/// The participant to remove
|
||||
participant: SeraiAddress,
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
|
||||
/// Return the hash of this transaction.
|
||||
///
|
||||
/// The hash must NOT commit to the signature.
|
||||
fn hash(&self) -> [u8; 32];
|
||||
/// A participation in the DKG
|
||||
DkgParticipation {
|
||||
/// The serialized participation
|
||||
participation: Vec<u8>,
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
/// The preprocess to confirm the DKG results on-chain
|
||||
DkgConfirmationPreprocess {
|
||||
/// The attempt number of this signing protocol
|
||||
attempt: u32,
|
||||
/// The preprocess
|
||||
preprocess: [u8; 64],
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
/// The signature share to confirm the DKG results on-chain
|
||||
DkgConfirmationShare {
|
||||
/// The attempt number of this signing protocol
|
||||
attempt: u32,
|
||||
/// The signature share
|
||||
share: [u8; 32],
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
|
||||
/// Perform transaction-specific verification.
|
||||
fn verify(&self) -> Result<(), TransactionError>;
|
||||
/// Intend to cosign a finalized Substrate block
|
||||
///
|
||||
/// When the time comes to start a new cosigning protocol, the most recent Substrate block will
|
||||
/// be the one selected to be cosigned.
|
||||
Cosign {
|
||||
/// The hash of the Substrate block to cosign
|
||||
substrate_block_hash: [u8; 32],
|
||||
},
|
||||
|
||||
/// Obtain the challenge for this transaction's signature.
|
||||
/// Note an intended-to-be-cosigned Substrate block as cosigned
|
||||
///
|
||||
/// Do not override this unless you know what you're doing.
|
||||
/// After producing this cosign, we need to start work on the latest intended-to-be cosigned
|
||||
/// block. That requires agreement on when this cosign was produced, which we solve by noting
|
||||
/// this cosign on-chain.
|
||||
///
|
||||
/// Panics if called on non-signed transactions.
|
||||
fn sig_hash(&self, genesis: [u8; 32]) -> <Ristretto as Ciphersuite>::F {
|
||||
match self.kind() {
|
||||
TransactionKind::Signed(order, Signed { signature, .. }) => {
|
||||
<Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(
|
||||
&Blake2b512::digest(
|
||||
[
|
||||
b"Tributary Signed Transaction",
|
||||
genesis.as_ref(),
|
||||
&self.hash(),
|
||||
order.as_ref(),
|
||||
signature.R.to_bytes().as_ref(),
|
||||
]
|
||||
.concat(),
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
/// We ideally don't have this transaction at all. The coordinator, without access to any of the
|
||||
/// key shares, could observe the FROST signing session and determine a successful completion.
|
||||
/// Unfortunately, that functionality is not present in modular-frost, so we do need to support
|
||||
/// *some* asynchronous flow (where the processor or P2P network informs us of the successful
|
||||
/// completion).
|
||||
///
|
||||
/// If we use a `Provided` transaction, that requires everyone observe this cosign.
|
||||
///
|
||||
/// If we use an `Unsigned` transaction, we can't verify the cosign signature inside
|
||||
/// `Transaction::verify` unless we embedded the full `SignedCosign` on-chain. The issue is since
|
||||
/// a Tributary is stateless with regards to the on-chain logic, including `Transaction::verify`,
|
||||
/// we can't verify the signature against the group's public key unless we also include that (but
|
||||
/// then we open a DoS where arbitrary group keys are specified to cause inclusion of arbitrary
|
||||
/// blobs on chain).
|
||||
///
|
||||
/// If we use a `Signed` transaction, we mitigate the DoS risk by having someone to fatally
|
||||
/// slash. We have horrible performance though as for 100 validators, all 100 will publish this
|
||||
/// transaction.
|
||||
///
|
||||
/// We could use a signed `Unsigned` transaction, where it includes a signer and signature but
|
||||
/// isn't technically a Signed transaction. This lets us de-duplicate the transaction premised on
|
||||
/// its contents.
|
||||
///
|
||||
/// The optimal choice is likely to use a `Provided` transaction. We don't actually need to
|
||||
/// observe the produced cosign (which is ephemeral). As long as it's agreed the cosign in
|
||||
/// question no longer needs to produced, which would mean the cosigning protocol at-large
|
||||
/// cosigning the block in question, it'd be safe to provide this and move on to the next cosign.
|
||||
Cosigned {
|
||||
/// The hash of the Substrate block which was cosigned
|
||||
substrate_block_hash: [u8; 32],
|
||||
},
|
||||
|
||||
/// Acknowledge a Substrate block
|
||||
///
|
||||
/// This is provided after the block has been cosigned.
|
||||
///
|
||||
/// With the acknowledgement of a Substrate block, we can whitelist all the `VariantSignId`s
|
||||
/// resulting from its handling.
|
||||
SubstrateBlock {
|
||||
/// The hash of the Substrate block
|
||||
hash: [u8; 32],
|
||||
},
|
||||
|
||||
/// Acknowledge a Batch
|
||||
///
|
||||
/// Once everyone has acknowledged the Batch, we can begin signing it.
|
||||
Batch {
|
||||
/// The hash of the Batch's serialization.
|
||||
///
|
||||
/// Generally, we refer to a Batch by its ID/the hash of its instructions. Here, we want to
|
||||
/// ensure consensus on the Batch, and achieving consensus on its hash is the most effective
|
||||
/// way to do that.
|
||||
hash: [u8; 32],
|
||||
},
|
||||
|
||||
/// Data from a signing protocol.
|
||||
Sign {
|
||||
/// The ID of the object being signed
|
||||
id: VariantSignId,
|
||||
/// The attempt number of this signing protocol
|
||||
attempt: u32,
|
||||
/// The round this data is for, within the signing protocol
|
||||
round: SigningProtocolRound,
|
||||
/// The data itself
|
||||
///
|
||||
/// There will be `n` blobs of data where `n` is the amount of key shares the validator sending
|
||||
/// this transaction has.
|
||||
data: Vec<Vec<u8>>,
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
|
||||
/// The local view of slashes observed by the transaction's sender
|
||||
SlashReport {
|
||||
/// The slash points accrued by each validator
|
||||
slash_points: Vec<u32>,
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
}
|
||||
|
||||
impl ReadWrite for Transaction {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
borsh::from_reader(reader)
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
borsh::to_writer(writer, self)
|
||||
}
|
||||
}
|
||||
|
||||
impl TransactionTrait for Transaction {
|
||||
fn kind(&self) -> TransactionKind {
|
||||
match self {
|
||||
Transaction::RemoveParticipant { participant, signed } => TransactionKind::Signed(
|
||||
(b"RemoveParticipant", participant).encode(),
|
||||
signed.to_tributary_signed(0),
|
||||
),
|
||||
|
||||
Transaction::DkgParticipation { signed, .. } => {
|
||||
TransactionKind::Signed(b"DkgParticipation".encode(), signed.to_tributary_signed(0))
|
||||
}
|
||||
Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => TransactionKind::Signed(
|
||||
(b"DkgConfirmation", attempt).encode(),
|
||||
signed.to_tributary_signed(0),
|
||||
),
|
||||
Transaction::DkgConfirmationShare { attempt, signed, .. } => TransactionKind::Signed(
|
||||
(b"DkgConfirmation", attempt).encode(),
|
||||
signed.to_tributary_signed(1),
|
||||
),
|
||||
|
||||
Transaction::Cosign { .. } => TransactionKind::Provided("Cosign"),
|
||||
Transaction::Cosigned { .. } => TransactionKind::Provided("Cosigned"),
|
||||
// TODO: Provide this
|
||||
Transaction::SubstrateBlock { .. } => TransactionKind::Provided("SubstrateBlock"),
|
||||
// TODO: Provide this
|
||||
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
||||
|
||||
Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed(
|
||||
(b"Sign", id, attempt).encode(),
|
||||
signed.to_tributary_signed(round.nonce()),
|
||||
),
|
||||
|
||||
Transaction::SlashReport { signed, .. } => {
|
||||
TransactionKind::Signed(b"SlashReport".encode(), signed.to_tributary_signed(0))
|
||||
}
|
||||
_ => panic!("sig_hash called on non-signed transaction"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait GAIN: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32> {}
|
||||
impl<F: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32>> GAIN for F {}
|
||||
|
||||
pub(crate) fn verify_transaction<F: GAIN, T: Transaction>(
|
||||
tx: &T,
|
||||
genesis: [u8; 32],
|
||||
get_and_increment_nonce: &mut F,
|
||||
) -> Result<(), TransactionError> {
|
||||
if tx.serialize().len() > TRANSACTION_SIZE_LIMIT {
|
||||
Err(TransactionError::TooLargeTransaction)?;
|
||||
fn hash(&self) -> [u8; 32] {
|
||||
let mut tx = ReadWrite::serialize(self);
|
||||
if let TransactionKind::Signed(_, signed) = self.kind() {
|
||||
// Make sure the part we're cutting off is the signature
|
||||
assert_eq!(tx.drain((tx.len() - 64) ..).collect::<Vec<_>>(), signed.signature.serialize());
|
||||
}
|
||||
Blake2b::<U32>::digest(&tx).into()
|
||||
}
|
||||
|
||||
tx.verify()?;
|
||||
// This is a stateless verification which we use to enforce some size limits.
|
||||
fn verify(&self) -> Result<(), TransactionError> {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
// Fixed-length TX
|
||||
Transaction::RemoveParticipant { .. } => {}
|
||||
|
||||
match tx.kind() {
|
||||
TransactionKind::Provided(_) | TransactionKind::Unsigned => {}
|
||||
TransactionKind::Signed(order, Signed { signer, nonce, signature }) => {
|
||||
if let Some(next_nonce) = get_and_increment_nonce(&signer, &order) {
|
||||
if nonce != next_nonce {
|
||||
Err(TransactionError::InvalidNonce)?;
|
||||
// TODO: MAX_DKG_PARTICIPATION_LEN
|
||||
Transaction::DkgParticipation { .. } => {}
|
||||
// These are fixed-length TXs
|
||||
Transaction::DkgConfirmationPreprocess { .. } | Transaction::DkgConfirmationShare { .. } => {}
|
||||
|
||||
// Provided TXs
|
||||
Transaction::Cosign { .. } |
|
||||
Transaction::Cosigned { .. } |
|
||||
Transaction::SubstrateBlock { .. } |
|
||||
Transaction::Batch { .. } => {}
|
||||
|
||||
Transaction::Sign { data, .. } => {
|
||||
if data.len() > usize::from(MAX_KEY_SHARES_PER_SET) {
|
||||
Err(TransactionError::InvalidContent)?
|
||||
}
|
||||
} else {
|
||||
// Not a participant
|
||||
Err(TransactionError::InvalidSigner)?;
|
||||
// TODO: MAX_SIGN_LEN
|
||||
}
|
||||
|
||||
// TODO: Use a batch verification here
|
||||
if !signature.verify(signer, tx.sig_hash(genesis)) {
|
||||
Err(TransactionError::InvalidSignature)?;
|
||||
Transaction::SlashReport { slash_points, .. } => {
|
||||
if slash_points.len() > usize::from(MAX_KEY_SHARES_PER_SET) {
|
||||
Err(TransactionError::InvalidContent)?
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Transaction {
|
||||
/// Sign a transaction.
|
||||
///
|
||||
/// Panics if signing a transaction whose type isn't `TransactionKind::Signed`.
|
||||
pub fn sign<R: RngCore + CryptoRng>(
|
||||
&mut self,
|
||||
rng: &mut R,
|
||||
genesis: [u8; 32],
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
) {
|
||||
fn signed(tx: &mut Transaction) -> &mut Signed {
|
||||
#[allow(clippy::match_same_arms)] // This doesn't make semantic sense here
|
||||
match tx {
|
||||
Transaction::RemoveParticipant { ref mut signed, .. } |
|
||||
Transaction::DkgParticipation { ref mut signed, .. } |
|
||||
Transaction::DkgConfirmationPreprocess { ref mut signed, .. } => signed,
|
||||
Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
|
||||
|
||||
Transaction::Cosign { .. } => panic!("signing CosignSubstrateBlock"),
|
||||
Transaction::Cosigned { .. } => panic!("signing Cosigned"),
|
||||
Transaction::SubstrateBlock { .. } => panic!("signing SubstrateBlock"),
|
||||
Transaction::Batch { .. } => panic!("signing Batch"),
|
||||
|
||||
Transaction::Sign { ref mut signed, .. } => signed,
|
||||
|
||||
Transaction::SlashReport { ref mut signed, .. } => signed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
// Decide the nonce to sign with
|
||||
let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));
|
||||
|
||||
{
|
||||
// Set the signer and the nonce
|
||||
let signed = signed(self);
|
||||
signed.signer = Ristretto::generator() * key.deref();
|
||||
signed.signature.R = <Ristretto as Ciphersuite>::generator() * sig_nonce.deref();
|
||||
}
|
||||
|
||||
// Get the signature hash (which now includes `R || A` making it valid as the challenge)
|
||||
let sig_hash = self.sig_hash(genesis);
|
||||
|
||||
// Sign the signature
|
||||
signed(self).signature = SchnorrSignature::<Ristretto>::sign(key, sig_nonce, sig_hash);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,9 +72,12 @@ exceptions = [
|
||||
{ allow = ["AGPL-3.0"], name = "serai-ethereum-processor" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-monero-processor" },
|
||||
|
||||
{ allow = ["AGPL-3.0"], name = "tributary-chain" },
|
||||
{ allow = ["AGPL-3.0"], name = "tributary-sdk" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-cosign" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-coordinator-substrate" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-coordinator-tributary" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-coordinator-p2p" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-coordinator-libp2p-p2p" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-coordinator" },
|
||||
|
||||
{ allow = ["AGPL-3.0"], name = "serai-coins-pallet" },
|
||||
|
||||
@@ -64,22 +64,20 @@ impl MessageQueue {
|
||||
Self::new(service, url, priv_key)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
async fn send(socket: &mut TcpStream, msg: MessageQueueRequest) -> bool {
|
||||
async fn send(socket: &mut TcpStream, msg: MessageQueueRequest) -> Result<(), String> {
|
||||
let msg = borsh::to_vec(&msg).unwrap();
|
||||
let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else {
|
||||
log::warn!("couldn't send the message len");
|
||||
return false;
|
||||
match socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await {
|
||||
Ok(()) => {}
|
||||
Err(e) => Err(format!("couldn't send the message len: {e:?}"))?,
|
||||
};
|
||||
let Ok(()) = socket.write_all(&msg).await else {
|
||||
log::warn!("couldn't write the message");
|
||||
return false;
|
||||
};
|
||||
true
|
||||
match socket.write_all(&msg).await {
|
||||
Ok(()) => {}
|
||||
Err(e) => Err(format!("couldn't write the message: {e:?}"))?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn queue(&self, metadata: Metadata, msg: Vec<u8>) {
|
||||
// TODO: Should this use OsRng? Deterministic or deterministic + random may be better.
|
||||
pub async fn queue(&self, metadata: Metadata, msg: Vec<u8>) -> Result<(), String> {
|
||||
let nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
||||
let nonce_pub = Ristretto::generator() * nonce.deref();
|
||||
let sig = SchnorrSignature::<Ristretto>::sign(
|
||||
@@ -97,6 +95,21 @@ impl MessageQueue {
|
||||
.serialize();
|
||||
|
||||
let msg = MessageQueueRequest::Queue { meta: metadata, msg, sig };
|
||||
|
||||
let mut socket = match TcpStream::connect(&self.url).await {
|
||||
Ok(socket) => socket,
|
||||
Err(e) => Err(format!("failed to connect to the message-queue service: {e:?}"))?,
|
||||
};
|
||||
Self::send(&mut socket, msg.clone()).await?;
|
||||
match socket.read_u8().await {
|
||||
Ok(1) => {}
|
||||
Ok(b) => Err(format!("message-queue didn't return for 1 for its ack, recieved: {b}"))?,
|
||||
Err(e) => Err(format!("failed to read the response from the message-queue service: {e:?}"))?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn queue_with_retry(&self, metadata: Metadata, msg: Vec<u8>) {
|
||||
let mut first = true;
|
||||
loop {
|
||||
// Sleep, so we don't hammer re-attempts
|
||||
@@ -105,14 +118,9 @@ impl MessageQueue {
|
||||
}
|
||||
first = false;
|
||||
|
||||
let Ok(mut socket) = TcpStream::connect(&self.url).await else { continue };
|
||||
if !Self::send(&mut socket, msg.clone()).await {
|
||||
continue;
|
||||
if self.queue(metadata.clone(), msg.clone()).await.is_ok() {
|
||||
break;
|
||||
}
|
||||
if socket.read_u8().await.ok() != Some(1) {
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,7 +144,7 @@ impl MessageQueue {
|
||||
log::trace!("opened socket for next");
|
||||
|
||||
loop {
|
||||
if !Self::send(&mut socket, msg.clone()).await {
|
||||
if Self::send(&mut socket, msg.clone()).await.is_err() {
|
||||
continue 'outer;
|
||||
}
|
||||
let status = match socket.read_u8().await {
|
||||
@@ -224,7 +232,7 @@ impl MessageQueue {
|
||||
first = false;
|
||||
|
||||
let Ok(mut socket) = TcpStream::connect(&self.url).await else { continue };
|
||||
if !Self::send(&mut socket, msg.clone()).await {
|
||||
if Self::send(&mut socket, msg.clone()).await.is_err() {
|
||||
continue;
|
||||
}
|
||||
if socket.read_u8().await.ok() != Some(1) {
|
||||
|
||||
@@ -95,6 +95,7 @@ impl Coordinator {
|
||||
message_queue.ack(Service::Coordinator, msg.id).await;
|
||||
|
||||
// Fire that there's a new message
|
||||
// This assumes the success path, not the just-rebooted-path
|
||||
received_message_send
|
||||
.send(())
|
||||
.expect("failed to tell the Coordinator there's a new message");
|
||||
@@ -103,6 +104,7 @@ impl Coordinator {
|
||||
});
|
||||
|
||||
// Spawn a task to send messages to the message-queue
|
||||
// TODO: Define a proper task for this and remove use of queue_with_retry
|
||||
tokio::spawn({
|
||||
let mut db = db.clone();
|
||||
async move {
|
||||
@@ -115,12 +117,12 @@ impl Coordinator {
|
||||
to: Service::Coordinator,
|
||||
intent: borsh::from_slice::<messages::ProcessorMessage>(&msg).unwrap().intent(),
|
||||
};
|
||||
message_queue.queue(metadata, msg).await;
|
||||
message_queue.queue_with_retry(metadata, msg).await;
|
||||
txn.commit();
|
||||
}
|
||||
None => {
|
||||
let _ =
|
||||
tokio::time::timeout(core::time::Duration::from_secs(60), sent_message_recv.recv())
|
||||
tokio::time::timeout(core::time::Duration::from_secs(6), sent_message_recv.recv())
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,7 +39,9 @@ pub(crate) fn script_pubkey_for_on_chain_output(
|
||||
pub(crate) struct TxIndexTask<D: Db>(pub(crate) Rpc<D>);
|
||||
|
||||
impl<D: Db> ContinuallyRan for TxIndexTask<D> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let latest_block_number = self
|
||||
.0
|
||||
|
||||
@@ -29,3 +29,5 @@ serai-primitives = { path = "../../substrate/primitives", default-features = fal
|
||||
in-instructions-primitives = { package = "serai-in-instructions-primitives", path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std", "borsh"] }
|
||||
coins-primitives = { package = "serai-coins-primitives", path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] }
|
||||
validator-sets-primitives = { package = "serai-validator-sets-primitives", path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std", "borsh"] }
|
||||
|
||||
serai-cosign = { path = "../../coordinator/cosign", default-features = false }
|
||||
|
||||
@@ -11,6 +11,8 @@ use validator_sets_primitives::{Session, KeyPair, Slash};
|
||||
use coins_primitives::OutInstructionWithBalance;
|
||||
use in_instructions_primitives::SignedBatch;
|
||||
|
||||
use serai_cosign::{CosignIntent, SignedCosign};
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct SubstrateContext {
|
||||
pub serai_time: u64,
|
||||
@@ -22,9 +24,13 @@ pub mod key_gen {
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
|
||||
pub enum CoordinatorMessage {
|
||||
// Instructs the Processor to begin the key generation process.
|
||||
/// Instructs the Processor to begin the key generation process.
|
||||
///
|
||||
/// This is sent by the Coordinator when it creates the Tributary.
|
||||
GenerateKey { session: Session, threshold: u16, evrf_public_keys: Vec<([u8; 32], Vec<u8>)> },
|
||||
// Received participations for the specified key generation protocol.
|
||||
/// Received participations for the specified key generation protocol.
|
||||
///
|
||||
/// This is sent by the Coordinator's Tributary scanner.
|
||||
Participation { session: Session, participant: Participant, participation: Vec<u8> },
|
||||
}
|
||||
|
||||
@@ -46,7 +52,8 @@ pub mod key_gen {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
|
||||
// This set of messages is sent entirely and solely by serai-processor-key-gen.
|
||||
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||
pub enum ProcessorMessage {
|
||||
// Participated in the specified key generation protocol.
|
||||
Participation { session: Session, participation: Vec<u8> },
|
||||
@@ -113,11 +120,17 @@ pub mod sign {
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum CoordinatorMessage {
|
||||
// Received preprocesses for the specified signing protocol.
|
||||
/// Received preprocesses for the specified signing protocol.
|
||||
///
|
||||
/// This is sent by the Coordinator's Tributary scanner.
|
||||
Preprocesses { id: SignId, preprocesses: HashMap<Participant, Vec<u8>> },
|
||||
// Received shares for the specified signing protocol.
|
||||
///
|
||||
/// This is sent by the Coordinator's Tributary scanner.
|
||||
Shares { id: SignId, shares: HashMap<Participant, Vec<u8>> },
|
||||
// Re-attempt a signing protocol.
|
||||
///
|
||||
/// This is sent by the Coordinator's Tributary re-attempt scheduling logic.
|
||||
Reattempt { id: SignId },
|
||||
}
|
||||
|
||||
@@ -131,7 +144,8 @@ pub mod sign {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
// This set of messages is sent entirely and solely by serai-processor-frost-attempt-manager.
|
||||
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum ProcessorMessage {
|
||||
// Participant sent an invalid message during the sign protocol.
|
||||
InvalidParticipant { session: Session, participant: Participant },
|
||||
@@ -145,33 +159,25 @@ pub mod sign {
|
||||
pub mod coordinator {
|
||||
use super::*;
|
||||
|
||||
// TODO: Remove this for the one defined in serai-cosign
|
||||
pub fn cosign_block_msg(block_number: u64, block: [u8; 32]) -> Vec<u8> {
|
||||
const DST: &[u8] = b"Cosign";
|
||||
let mut res = vec![u8::try_from(DST.len()).unwrap()];
|
||||
res.extend(DST);
|
||||
res.extend(block_number.to_le_bytes());
|
||||
res.extend(block);
|
||||
res
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum CoordinatorMessage {
|
||||
CosignSubstrateBlock { session: Session, block_number: u64, block: [u8; 32] },
|
||||
/// Cosign the specified Substrate block.
|
||||
///
|
||||
/// This is sent by the Coordinator's Tributary scanner.
|
||||
CosignSubstrateBlock { session: Session, intent: CosignIntent },
|
||||
/// Sign the slash report for this session.
|
||||
///
|
||||
/// This is sent by the Coordinator's Tributary scanner.
|
||||
SignSlashReport { session: Session, report: Vec<Slash> },
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct PlanMeta {
|
||||
pub session: Session,
|
||||
pub id: [u8; 32],
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
// This set of messages is sent entirely and solely by serai-processor-bin's implementation of
|
||||
// the signers::Coordinator trait.
|
||||
// TODO: Move message creation into serai-processor-signers
|
||||
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum ProcessorMessage {
|
||||
CosignedBlock { block_number: u64, block: [u8; 32], signature: Vec<u8> },
|
||||
CosignedBlock { cosign: SignedCosign },
|
||||
SignedBatch { batch: SignedBatch },
|
||||
SubstrateBlockAck { block: u64, plans: Vec<PlanMeta> },
|
||||
SignedSlashReport { session: Session, signature: Vec<u8> },
|
||||
}
|
||||
}
|
||||
@@ -196,12 +202,18 @@ pub mod substrate {
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum CoordinatorMessage {
|
||||
/// Keys set on the Serai blockchain.
|
||||
///
|
||||
/// This is sent by the Coordinator's Substrate canonical event stream.
|
||||
SetKeys { serai_time: u64, session: Session, key_pair: KeyPair },
|
||||
/// Slashes reported on the Serai blockchain OR the process timed out.
|
||||
///
|
||||
/// This is the final message for a session,
|
||||
///
|
||||
/// This is sent by the Coordinator's Substrate canonical event stream.
|
||||
SlashesReported { session: Session },
|
||||
/// A block from Serai with relevance to this processor.
|
||||
///
|
||||
/// This is sent by the Coordinator's Substrate canonical event stream.
|
||||
Block {
|
||||
serai_block_number: u64,
|
||||
batch: Option<ExecutedBatch>,
|
||||
@@ -209,17 +221,16 @@ pub mod substrate {
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum ProcessorMessage {}
|
||||
impl BorshSerialize for ProcessorMessage {
|
||||
fn serialize<W: borsh::io::Write>(&self, _writer: &mut W) -> borsh::io::Result<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct PlanMeta {
|
||||
pub session: Session,
|
||||
pub transaction_plan_id: [u8; 32],
|
||||
}
|
||||
impl BorshDeserialize for ProcessorMessage {
|
||||
fn deserialize_reader<R: borsh::io::Read>(_reader: &mut R) -> borsh::io::Result<Self> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum ProcessorMessage {
|
||||
// TODO: Have the processor send this
|
||||
SubstrateBlockAck { block: [u8; 32], plans: Vec<PlanMeta> },
|
||||
}
|
||||
}
|
||||
|
||||
@@ -246,7 +257,7 @@ impl_from!(sign, CoordinatorMessage, Sign);
|
||||
impl_from!(coordinator, CoordinatorMessage, Coordinator);
|
||||
impl_from!(substrate, CoordinatorMessage, Substrate);
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum ProcessorMessage {
|
||||
KeyGen(key_gen::ProcessorMessage),
|
||||
Sign(sign::ProcessorMessage),
|
||||
@@ -309,8 +320,8 @@ impl CoordinatorMessage {
|
||||
CoordinatorMessage::Coordinator(msg) => {
|
||||
let (sub, id) = match msg {
|
||||
// We only cosign a block once, and Reattempt is a separate message
|
||||
coordinator::CoordinatorMessage::CosignSubstrateBlock { block_number, .. } => {
|
||||
(0, block_number.encode())
|
||||
coordinator::CoordinatorMessage::CosignSubstrateBlock { intent, .. } => {
|
||||
(0, intent.block_number.encode())
|
||||
}
|
||||
// We only sign one slash report, and Reattempt is a separate message
|
||||
coordinator::CoordinatorMessage::SignSlashReport { session, .. } => (1, session.encode()),
|
||||
@@ -382,17 +393,26 @@ impl ProcessorMessage {
|
||||
}
|
||||
ProcessorMessage::Coordinator(msg) => {
|
||||
let (sub, id) = match msg {
|
||||
coordinator::ProcessorMessage::CosignedBlock { block, .. } => (0, block.encode()),
|
||||
coordinator::ProcessorMessage::CosignedBlock { cosign } => {
|
||||
(0, cosign.cosign.block_hash.encode())
|
||||
}
|
||||
coordinator::ProcessorMessage::SignedBatch { batch, .. } => (1, batch.batch.id.encode()),
|
||||
coordinator::ProcessorMessage::SubstrateBlockAck { block, .. } => (2, block.encode()),
|
||||
coordinator::ProcessorMessage::SignedSlashReport { session, .. } => (3, session.encode()),
|
||||
coordinator::ProcessorMessage::SignedSlashReport { session, .. } => (2, session.encode()),
|
||||
};
|
||||
|
||||
let mut res = vec![PROCESSOR_UID, TYPE_COORDINATOR_UID, sub];
|
||||
res.extend(&id);
|
||||
res
|
||||
}
|
||||
ProcessorMessage::Substrate(_) => panic!("requesting intent for empty message type"),
|
||||
ProcessorMessage::Substrate(msg) => {
|
||||
let (sub, id) = match msg {
|
||||
substrate::ProcessorMessage::SubstrateBlockAck { block, .. } => (0, block.encode()),
|
||||
};
|
||||
|
||||
let mut res = vec![PROCESSOR_UID, TYPE_SUBSTRATE_UID, sub];
|
||||
res.extend(&id);
|
||||
res
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,10 @@ use serai_db::{DbTxn, Db};
|
||||
|
||||
use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch};
|
||||
|
||||
use primitives::{EncodableG, task::ContinuallyRan};
|
||||
use primitives::{
|
||||
EncodableG,
|
||||
task::{DoesNotError, ContinuallyRan},
|
||||
};
|
||||
use crate::{
|
||||
db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToBatchDb, BatchData, BatchToReportDb},
|
||||
index,
|
||||
@@ -60,7 +63,9 @@ impl<D: Db, S: ScannerFeed> BatchTask<D, S> {
|
||||
}
|
||||
|
||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for BatchTask<D, S> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
type Error = DoesNotError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let highest_batchable = {
|
||||
// Fetch the next to scan block
|
||||
|
||||
@@ -190,7 +190,9 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> EventualityTask<D, S, Sch> {
|
||||
}
|
||||
|
||||
impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTask<D, S, Sch> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
// Fetch the highest acknowledged block
|
||||
let Some(highest_acknowledged) = ScannerGlobalDb::<S>::highest_acknowledged_block(&self.db)
|
||||
|
||||
@@ -58,7 +58,9 @@ impl<D: Db, S: ScannerFeed> IndexTask<D, S> {
|
||||
}
|
||||
|
||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for IndexTask<D, S> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
// Fetch the latest finalized block
|
||||
let our_latest_finalized = IndexDb::latest_finalized_block(&self.db)
|
||||
|
||||
@@ -4,7 +4,7 @@ use serai_db::{DbTxn, Db};
|
||||
|
||||
use serai_validator_sets_primitives::Session;
|
||||
|
||||
use primitives::task::ContinuallyRan;
|
||||
use primitives::task::{DoesNotError, ContinuallyRan};
|
||||
use crate::{
|
||||
db::{BatchData, BatchToReportDb, BatchesToSign},
|
||||
substrate, ScannerFeed,
|
||||
@@ -27,7 +27,9 @@ impl<D: Db, S: ScannerFeed> ReportTask<D, S> {
|
||||
}
|
||||
|
||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for ReportTask<D, S> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
type Error = DoesNotError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
|
||||
@@ -98,7 +98,9 @@ impl<D: Db, S: ScannerFeed> ScanTask<D, S> {
|
||||
}
|
||||
|
||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for ScanTask<D, S> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
// Fetch the safe to scan block
|
||||
let latest_scannable =
|
||||
|
||||
@@ -5,7 +5,7 @@ use serai_db::{Get, DbTxn, Db};
|
||||
use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance};
|
||||
|
||||
use messages::substrate::ExecutedBatch;
|
||||
use primitives::task::ContinuallyRan;
|
||||
use primitives::task::{DoesNotError, ContinuallyRan};
|
||||
use crate::{
|
||||
db::{ScannerGlobalDb, SubstrateToEventualityDb, AcknowledgedBatches},
|
||||
index, batch, ScannerFeed, KeyFor,
|
||||
@@ -50,7 +50,9 @@ impl<D: Db, S: ScannerFeed> SubstrateTask<D, S> {
|
||||
}
|
||||
|
||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for SubstrateTask<D, S> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
type Error = DoesNotError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
|
||||
@@ -24,6 +24,7 @@ workspace = true
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
zeroize = { version = "1", default-features = false, features = ["std"] }
|
||||
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false }
|
||||
frost-schnorrkel = { path = "../../crypto/schnorrkel", default-features = false }
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user