mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-12 14:09:25 +00:00
Compare commits
32 Commits
201a444e89
...
undroppabl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce3b90541e | ||
|
|
cb410cc4e0 | ||
|
|
6c145a5ec3 | ||
|
|
a7fef2ba7a | ||
|
|
291ebf5e24 | ||
|
|
5e0e91c85d | ||
|
|
b5a6b0693e | ||
|
|
3cc2abfedc | ||
|
|
0ce9aad9b2 | ||
|
|
e35aa04afb | ||
|
|
e7de5125a2 | ||
|
|
158140c3a7 | ||
|
|
df9a9adaa8 | ||
|
|
d854807edd | ||
|
|
f501d46d44 | ||
|
|
74106b025f | ||
|
|
e731b546ab | ||
|
|
77d60660d2 | ||
|
|
3c664ff05f | ||
|
|
c05b0c9eba | ||
|
|
6d5049cab2 | ||
|
|
1419ba570a | ||
|
|
542bf2170a | ||
|
|
378d6b90cf | ||
|
|
cbe83956aa | ||
|
|
091d485fd8 | ||
|
|
2a3eaf4d7e | ||
|
|
23122712cb | ||
|
|
47eb793ce9 | ||
|
|
9b0b5fd1e2 | ||
|
|
893a24a1cc | ||
|
|
b101e2211a |
5
.github/workflows/msrv.yml
vendored
5
.github/workflows/msrv.yml
vendored
@@ -173,10 +173,11 @@ jobs:
|
|||||||
|
|
||||||
- name: Run cargo msrv on coordinator
|
- name: Run cargo msrv on coordinator
|
||||||
run: |
|
run: |
|
||||||
cargo msrv verify --manifest-path coordinator/tributary/tendermint/Cargo.toml
|
cargo msrv verify --manifest-path coordinator/tributary-sdk/tendermint/Cargo.toml
|
||||||
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
|
cargo msrv verify --manifest-path coordinator/tributary-sdk/Cargo.toml
|
||||||
cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml
|
cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml
|
||||||
cargo msrv verify --manifest-path coordinator/substrate/Cargo.toml
|
cargo msrv verify --manifest-path coordinator/substrate/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
|
||||||
cargo msrv verify --manifest-path coordinator/p2p/Cargo.toml
|
cargo msrv verify --manifest-path coordinator/p2p/Cargo.toml
|
||||||
cargo msrv verify --manifest-path coordinator/p2p/libp2p/Cargo.toml
|
cargo msrv verify --manifest-path coordinator/p2p/libp2p/Cargo.toml
|
||||||
cargo msrv verify --manifest-path coordinator/Cargo.toml
|
cargo msrv verify --manifest-path coordinator/Cargo.toml
|
||||||
|
|||||||
3
.github/workflows/tests.yml
vendored
3
.github/workflows/tests.yml
vendored
@@ -60,9 +60,10 @@ jobs:
|
|||||||
-p serai-ethereum-processor \
|
-p serai-ethereum-processor \
|
||||||
-p serai-monero-processor \
|
-p serai-monero-processor \
|
||||||
-p tendermint-machine \
|
-p tendermint-machine \
|
||||||
-p tributary-chain \
|
-p tributary-sdk \
|
||||||
-p serai-cosign \
|
-p serai-cosign \
|
||||||
-p serai-coordinator-substrate \
|
-p serai-coordinator-substrate \
|
||||||
|
-p serai-coordinator-tributary \
|
||||||
-p serai-coordinator-p2p \
|
-p serai-coordinator-p2p \
|
||||||
-p serai-coordinator-libp2p-p2p \
|
-p serai-coordinator-libp2p-p2p \
|
||||||
-p serai-coordinator \
|
-p serai-coordinator \
|
||||||
|
|||||||
63
Cargo.lock
generated
63
Cargo.lock
generated
@@ -840,18 +840,6 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "async-channel"
|
|
||||||
version = "2.3.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a"
|
|
||||||
dependencies = [
|
|
||||||
"concurrent-queue",
|
|
||||||
"event-listener-strategy",
|
|
||||||
"futures-core",
|
|
||||||
"pin-project-lite",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-io"
|
name = "async-io"
|
||||||
version = "2.4.0"
|
version = "2.4.0"
|
||||||
@@ -7465,7 +7453,7 @@ version = "0.10.0-dev"
|
|||||||
source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a"
|
source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"array-bytes",
|
"array-bytes",
|
||||||
"async-channel 1.9.0",
|
"async-channel",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"asynchronous-codec",
|
"asynchronous-codec",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -7506,7 +7494,7 @@ name = "sc-network-bitswap"
|
|||||||
version = "0.10.0-dev"
|
version = "0.10.0-dev"
|
||||||
source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a"
|
source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-channel 1.9.0",
|
"async-channel",
|
||||||
"cid",
|
"cid",
|
||||||
"futures",
|
"futures",
|
||||||
"libp2p-identity",
|
"libp2p-identity",
|
||||||
@@ -7563,7 +7551,7 @@ version = "0.10.0-dev"
|
|||||||
source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a"
|
source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"array-bytes",
|
"array-bytes",
|
||||||
"async-channel 1.9.0",
|
"async-channel",
|
||||||
"futures",
|
"futures",
|
||||||
"libp2p-identity",
|
"libp2p-identity",
|
||||||
"log",
|
"log",
|
||||||
@@ -7584,7 +7572,7 @@ version = "0.10.0-dev"
|
|||||||
source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a"
|
source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"array-bytes",
|
"array-bytes",
|
||||||
"async-channel 1.9.0",
|
"async-channel",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"fork-tree",
|
"fork-tree",
|
||||||
"futures",
|
"futures",
|
||||||
@@ -7958,7 +7946,7 @@ name = "sc-utils"
|
|||||||
version = "4.0.0-dev"
|
version = "4.0.0-dev"
|
||||||
source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a"
|
source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-channel 1.9.0",
|
"async-channel",
|
||||||
"futures",
|
"futures",
|
||||||
"futures-timer",
|
"futures-timer",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
@@ -8331,7 +8319,6 @@ dependencies = [
|
|||||||
"borsh",
|
"borsh",
|
||||||
"ciphersuite",
|
"ciphersuite",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"flexible-transcript",
|
|
||||||
"frost-schnorrkel",
|
"frost-schnorrkel",
|
||||||
"hex",
|
"hex",
|
||||||
"log",
|
"log",
|
||||||
@@ -8344,15 +8331,15 @@ dependencies = [
|
|||||||
"serai-coordinator-libp2p-p2p",
|
"serai-coordinator-libp2p-p2p",
|
||||||
"serai-coordinator-p2p",
|
"serai-coordinator-p2p",
|
||||||
"serai-coordinator-substrate",
|
"serai-coordinator-substrate",
|
||||||
|
"serai-coordinator-tributary",
|
||||||
"serai-cosign",
|
"serai-cosign",
|
||||||
"serai-db",
|
"serai-db",
|
||||||
"serai-env",
|
"serai-env",
|
||||||
"serai-message-queue",
|
"serai-message-queue",
|
||||||
"serai-processor-messages",
|
"serai-processor-messages",
|
||||||
"serai-task",
|
"serai-task",
|
||||||
"sp-application-crypto",
|
"tokio",
|
||||||
"sp-runtime",
|
"tributary-sdk",
|
||||||
"tributary-chain",
|
|
||||||
"zalloc",
|
"zalloc",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
@@ -8375,8 +8362,7 @@ dependencies = [
|
|||||||
"serai-cosign",
|
"serai-cosign",
|
||||||
"serai-task",
|
"serai-task",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tributary-chain",
|
"tributary-sdk",
|
||||||
"void",
|
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -8384,7 +8370,6 @@ dependencies = [
|
|||||||
name = "serai-coordinator-p2p"
|
name = "serai-coordinator-p2p"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-channel 2.3.1",
|
|
||||||
"borsh",
|
"borsh",
|
||||||
"futures-lite",
|
"futures-lite",
|
||||||
"log",
|
"log",
|
||||||
@@ -8392,13 +8377,15 @@ dependencies = [
|
|||||||
"serai-cosign",
|
"serai-cosign",
|
||||||
"serai-db",
|
"serai-db",
|
||||||
"serai-task",
|
"serai-task",
|
||||||
"tributary-chain",
|
"tokio",
|
||||||
|
"tributary-sdk",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serai-coordinator-substrate"
|
name = "serai-coordinator-substrate"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"bitvec",
|
||||||
"borsh",
|
"borsh",
|
||||||
"futures",
|
"futures",
|
||||||
"log",
|
"log",
|
||||||
@@ -8415,7 +8402,6 @@ dependencies = [
|
|||||||
name = "serai-coordinator-tests"
|
name = "serai-coordinator-tests"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
|
||||||
"blake2",
|
"blake2",
|
||||||
"borsh",
|
"borsh",
|
||||||
"ciphersuite",
|
"ciphersuite",
|
||||||
@@ -8436,6 +8422,27 @@ dependencies = [
|
|||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serai-coordinator-tributary"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"blake2",
|
||||||
|
"borsh",
|
||||||
|
"ciphersuite",
|
||||||
|
"log",
|
||||||
|
"parity-scale-codec",
|
||||||
|
"rand_core",
|
||||||
|
"schnorr-signatures",
|
||||||
|
"serai-client",
|
||||||
|
"serai-coordinator-substrate",
|
||||||
|
"serai-cosign",
|
||||||
|
"serai-db",
|
||||||
|
"serai-processor-messages",
|
||||||
|
"serai-task",
|
||||||
|
"tributary-sdk",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serai-cosign"
|
name = "serai-cosign"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@@ -8597,7 +8604,6 @@ dependencies = [
|
|||||||
name = "serai-full-stack-tests"
|
name = "serai-full-stack-tests"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
|
||||||
"bitcoin-serai",
|
"bitcoin-serai",
|
||||||
"curve25519-dalek",
|
"curve25519-dalek",
|
||||||
"dockertest",
|
"dockertest",
|
||||||
@@ -8993,6 +8999,7 @@ dependencies = [
|
|||||||
"hex",
|
"hex",
|
||||||
"parity-scale-codec",
|
"parity-scale-codec",
|
||||||
"serai-coins-primitives",
|
"serai-coins-primitives",
|
||||||
|
"serai-cosign",
|
||||||
"serai-in-instructions-primitives",
|
"serai-in-instructions-primitives",
|
||||||
"serai-primitives",
|
"serai-primitives",
|
||||||
"serai-validator-sets-primitives",
|
"serai-validator-sets-primitives",
|
||||||
@@ -10989,7 +10996,7 @@ dependencies = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tributary-chain"
|
name = "tributary-sdk"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"blake2",
|
"blake2",
|
||||||
|
|||||||
@@ -96,10 +96,11 @@ members = [
|
|||||||
"processor/ethereum",
|
"processor/ethereum",
|
||||||
"processor/monero",
|
"processor/monero",
|
||||||
|
|
||||||
"coordinator/tributary/tendermint",
|
"coordinator/tributary-sdk/tendermint",
|
||||||
"coordinator/tributary",
|
"coordinator/tributary-sdk",
|
||||||
"coordinator/cosign",
|
"coordinator/cosign",
|
||||||
"coordinator/substrate",
|
"coordinator/substrate",
|
||||||
|
"coordinator/tributary",
|
||||||
"coordinator/p2p",
|
"coordinator/p2p",
|
||||||
"coordinator/p2p/libp2p",
|
"coordinator/p2p/libp2p",
|
||||||
"coordinator",
|
"coordinator",
|
||||||
|
|||||||
@@ -30,13 +30,53 @@ pub trait Get {
|
|||||||
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
||||||
/// randomly, or any other action, at time of write or at time of commit.
|
/// randomly, or any other action, at time of write or at time of commit.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub trait DbTxn: Send + Get {
|
pub trait DbTxn: Sized + Send + Get {
|
||||||
/// Write a value to this key.
|
/// Write a value to this key.
|
||||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
||||||
/// Delete the value from this key.
|
/// Delete the value from this key.
|
||||||
fn del(&mut self, key: impl AsRef<[u8]>);
|
fn del(&mut self, key: impl AsRef<[u8]>);
|
||||||
/// Commit this transaction.
|
/// Commit this transaction.
|
||||||
fn commit(self);
|
fn commit(self);
|
||||||
|
/// Close this transaction.
|
||||||
|
///
|
||||||
|
/// This is equivalent to `Drop` on transactions which can be dropped. This is explicit and works
|
||||||
|
/// with transactions which can't be dropped.
|
||||||
|
fn close(self) {
|
||||||
|
drop(self);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Credit for the idea goes to https://jack.wrenn.fyi/blog/undroppable
|
||||||
|
pub struct Undroppable<T>(Option<T>);
|
||||||
|
impl<T> Drop for Undroppable<T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// Use an assertion at compile time to prevent this code from compiling if generated
|
||||||
|
#[allow(clippy::assertions_on_constants)]
|
||||||
|
const {
|
||||||
|
assert!(false, "Undroppable DbTxn was dropped. Ensure all code paths call commit or close");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<T: DbTxn> Get for Undroppable<T> {
|
||||||
|
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||||
|
self.0.as_ref().unwrap().get(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<T: DbTxn> DbTxn for Undroppable<T> {
|
||||||
|
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||||
|
self.0.as_mut().unwrap().put(key, value);
|
||||||
|
}
|
||||||
|
fn del(&mut self, key: impl AsRef<[u8]>) {
|
||||||
|
self.0.as_mut().unwrap().del(key);
|
||||||
|
}
|
||||||
|
fn commit(mut self) {
|
||||||
|
self.0.take().unwrap().commit();
|
||||||
|
let _ = core::mem::ManuallyDrop::new(self);
|
||||||
|
}
|
||||||
|
fn close(mut self) {
|
||||||
|
drop(self.0.take().unwrap());
|
||||||
|
let _ = core::mem::ManuallyDrop::new(self);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A database supporting atomic transaction.
|
/// A database supporting atomic transaction.
|
||||||
@@ -51,6 +91,10 @@ pub trait Db: 'static + Send + Sync + Clone + Get {
|
|||||||
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
||||||
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
||||||
}
|
}
|
||||||
/// Open a new transaction.
|
/// Open a new transaction which may be dropped.
|
||||||
fn txn(&mut self) -> Self::Transaction<'_>;
|
fn unsafe_txn(&mut self) -> Self::Transaction<'_>;
|
||||||
|
/// Open a new transaction which must be committed or closed.
|
||||||
|
fn txn(&mut self) -> Undroppable<Self::Transaction<'_>> {
|
||||||
|
Undroppable(Some(self.unsafe_txn()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ impl Get for MemDb {
|
|||||||
}
|
}
|
||||||
impl Db for MemDb {
|
impl Db for MemDb {
|
||||||
type Transaction<'a> = MemDbTxn<'a>;
|
type Transaction<'a> = MemDbTxn<'a>;
|
||||||
fn txn(&mut self) -> MemDbTxn<'_> {
|
fn unsafe_txn(&mut self) -> MemDbTxn<'_> {
|
||||||
MemDbTxn(self, HashMap::new(), HashSet::new())
|
MemDbTxn(self, HashMap::new(), HashSet::new())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ impl Get for Arc<ParityDb> {
|
|||||||
}
|
}
|
||||||
impl Db for Arc<ParityDb> {
|
impl Db for Arc<ParityDb> {
|
||||||
type Transaction<'a> = Transaction<'a>;
|
type Transaction<'a> = Transaction<'a>;
|
||||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
|
||||||
Transaction(self, vec![])
|
Transaction(self, vec![])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
|
|||||||
}
|
}
|
||||||
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
||||||
type Transaction<'a> = Transaction<'a, T>;
|
type Transaction<'a> = Transaction<'a, T>;
|
||||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
|
||||||
let mut opts = WriteOptions::default();
|
let mut opts = WriteOptions::default();
|
||||||
opts.set_sync(true);
|
opts.set_sync(true);
|
||||||
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
||||||
|
|||||||
@@ -2,10 +2,16 @@
|
|||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use core::{future::Future, time::Duration};
|
use core::{
|
||||||
|
fmt::{self, Debug},
|
||||||
|
future::Future,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
mod type_name;
|
||||||
|
|
||||||
/// A handle for a task.
|
/// A handle for a task.
|
||||||
///
|
///
|
||||||
/// The task will only stop running once all handles for it are dropped.
|
/// The task will only stop running once all handles for it are dropped.
|
||||||
@@ -45,8 +51,6 @@ impl Task {
|
|||||||
|
|
||||||
impl TaskHandle {
|
impl TaskHandle {
|
||||||
/// Tell the task to run now (and not whenever its next iteration on a timer is).
|
/// Tell the task to run now (and not whenever its next iteration on a timer is).
|
||||||
///
|
|
||||||
/// Panics if the task has been dropped.
|
|
||||||
pub fn run_now(&self) {
|
pub fn run_now(&self) {
|
||||||
#[allow(clippy::match_same_arms)]
|
#[allow(clippy::match_same_arms)]
|
||||||
match self.run_now.try_send(()) {
|
match self.run_now.try_send(()) {
|
||||||
@@ -54,12 +58,22 @@ impl TaskHandle {
|
|||||||
// NOP on full, as this task will already be ran as soon as possible
|
// NOP on full, as this task will already be ran as soon as possible
|
||||||
Err(mpsc::error::TrySendError::Full(())) => {}
|
Err(mpsc::error::TrySendError::Full(())) => {}
|
||||||
Err(mpsc::error::TrySendError::Closed(())) => {
|
Err(mpsc::error::TrySendError::Closed(())) => {
|
||||||
|
// The task should only be closed if all handles are dropped, and this one hasn't been
|
||||||
panic!("task was unexpectedly closed when calling run_now")
|
panic!("task was unexpectedly closed when calling run_now")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// An enum which can't be constructed, representing that the task does not error.
|
||||||
|
pub enum DoesNotError {}
|
||||||
|
impl Debug for DoesNotError {
|
||||||
|
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
|
// This type can't be constructed so we'll never have a `&self` to call this fn with
|
||||||
|
unreachable!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A task to be continually ran.
|
/// A task to be continually ran.
|
||||||
pub trait ContinuallyRan: Sized + Send {
|
pub trait ContinuallyRan: Sized + Send {
|
||||||
/// The amount of seconds before this task should be polled again.
|
/// The amount of seconds before this task should be polled again.
|
||||||
@@ -69,11 +83,14 @@ pub trait ContinuallyRan: Sized + Send {
|
|||||||
/// Upon error, the amount of time waited will be linearly increased until this limit.
|
/// Upon error, the amount of time waited will be linearly increased until this limit.
|
||||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120;
|
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120;
|
||||||
|
|
||||||
|
/// The error potentially yielded upon running an iteration of this task.
|
||||||
|
type Error: Debug;
|
||||||
|
|
||||||
/// Run an iteration of the task.
|
/// Run an iteration of the task.
|
||||||
///
|
///
|
||||||
/// If this returns `true`, all dependents of the task will immediately have a new iteration ran
|
/// If this returns `true`, all dependents of the task will immediately have a new iteration ran
|
||||||
/// (without waiting for whatever timer they were already on).
|
/// (without waiting for whatever timer they were already on).
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>>;
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>>;
|
||||||
|
|
||||||
/// Continually run the task.
|
/// Continually run the task.
|
||||||
fn continually_run(
|
fn continually_run(
|
||||||
@@ -115,12 +132,20 @@ pub trait ContinuallyRan: Sized + Send {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log::warn!("{}", e);
|
// Get the type name
|
||||||
|
let type_name = type_name::strip_type_name(core::any::type_name::<Self>());
|
||||||
|
// Print the error as a warning, prefixed by the task's type
|
||||||
|
log::warn!("{type_name}: {e:?}");
|
||||||
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
|
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't run the task again for another few seconds UNLESS told to run now
|
// Don't run the task again for another few seconds UNLESS told to run now
|
||||||
|
/*
|
||||||
|
We could replace tokio::mpsc with async_channel, tokio::time::sleep with
|
||||||
|
patchable_async_sleep::sleep, and tokio::select with futures_lite::future::or
|
||||||
|
It isn't worth the effort when patchable_async_sleep::sleep will still resolve to tokio
|
||||||
|
*/
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
() = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {},
|
() = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {},
|
||||||
msg = task.run_now.recv() => {
|
msg = task.run_now.recv() => {
|
||||||
|
|||||||
31
common/task/src/type_name.rs
Normal file
31
common/task/src/type_name.rs
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
/// Strip the modules from a type name.
|
||||||
|
// This may be of the form `a::b::C`, in which case we only want `C`
|
||||||
|
pub(crate) fn strip_type_name(full_type_name: &'static str) -> String {
|
||||||
|
// It also may be `a::b::C<d::e::F>`, in which case, we only attempt to strip `a::b`
|
||||||
|
let mut by_generics = full_type_name.split('<');
|
||||||
|
|
||||||
|
// Strip to just `C`
|
||||||
|
let full_outer_object_name = by_generics.next().unwrap();
|
||||||
|
let mut outer_object_name_parts = full_outer_object_name.split("::");
|
||||||
|
let mut last_part_in_outer_object_name = outer_object_name_parts.next().unwrap();
|
||||||
|
for part in outer_object_name_parts {
|
||||||
|
last_part_in_outer_object_name = part;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push back on the generic terms
|
||||||
|
let mut type_name = last_part_in_outer_object_name.to_string();
|
||||||
|
for generic in by_generics {
|
||||||
|
type_name.push('<');
|
||||||
|
type_name.push_str(generic);
|
||||||
|
}
|
||||||
|
type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_strip_type_name() {
|
||||||
|
assert_eq!(strip_type_name("core::option::Option"), "Option");
|
||||||
|
assert_eq!(
|
||||||
|
strip_type_name("core::option::Option<alloc::string::String>"),
|
||||||
|
"Option<alloc::string::String>"
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -25,13 +25,12 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
|||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
|
||||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||||
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
|
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
|
||||||
frost = { package = "modular-frost", path = "../crypto/frost" }
|
frost = { package = "modular-frost", path = "../crypto/frost" }
|
||||||
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||||
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||||
|
|
||||||
zalloc = { path = "../common/zalloc" }
|
zalloc = { path = "../common/zalloc" }
|
||||||
serai-db = { path = "../common/db" }
|
serai-db = { path = "../common/db" }
|
||||||
@@ -40,9 +39,8 @@ serai-task = { path = "../common/task", version = "0.1" }
|
|||||||
|
|
||||||
messages = { package = "serai-processor-messages", path = "../processor/messages" }
|
messages = { package = "serai-processor-messages", path = "../processor/messages" }
|
||||||
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||||
tributary = { package = "tributary-chain", path = "./tributary" }
|
tributary-sdk = { path = "./tributary-sdk" }
|
||||||
|
|
||||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
|
||||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
@@ -51,17 +49,15 @@ borsh = { version = "1", default-features = false, features = ["std", "derive",
|
|||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||||
|
|
||||||
|
tokio = { version = "1", default-features = false, features = ["time", "sync", "macros", "rt-multi-thread"] }
|
||||||
|
|
||||||
serai-cosign = { path = "./cosign" }
|
serai-cosign = { path = "./cosign" }
|
||||||
serai-coordinator-substrate = { path = "./substrate" }
|
serai-coordinator-substrate = { path = "./substrate" }
|
||||||
|
serai-coordinator-tributary = { path = "./tributary" }
|
||||||
serai-coordinator-p2p = { path = "./p2p" }
|
serai-coordinator-p2p = { path = "./p2p" }
|
||||||
serai-coordinator-libp2p-p2p = { path = "./p2p/libp2p" }
|
serai-coordinator-libp2p-p2p = { path = "./p2p/libp2p" }
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
|
||||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
|
||||||
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
longer-reattempts = []
|
longer-reattempts = ["serai-coordinator-tributary/longer-reattempts"]
|
||||||
parity-db = ["serai-db/parity-db"]
|
parity-db = ["serai-db/parity-db"]
|
||||||
rocksdb = ["serai-db/rocksdb"]
|
rocksdb = ["serai-db/rocksdb"]
|
||||||
|
|||||||
@@ -1,19 +1,29 @@
|
|||||||
# Coordinator
|
# Coordinator
|
||||||
|
|
||||||
- [`tendermint`](/tributary/tendermint) is an implementation of the Tendermint BFT algorithm.
|
- [`tendermint`](/tributary/tendermint) is an implementation of the Tendermint
|
||||||
|
BFT algorithm.
|
||||||
|
|
||||||
- [`tributary`](./tributary) is a micro-blockchain framework. Instead of a producing a blockchain
|
- [`tributary-sdk`](./tributary-sdk) is a micro-blockchain framework. Instead
|
||||||
daemon like the Polkadot SDK or Cosmos SDK intend to, `tributary` is solely intended to be an
|
of a producing a blockchain daemon like the Polkadot SDK or Cosmos SDK intend
|
||||||
embedded asynchronous task within an application.
|
to, `tributary` is solely intended to be an embedded asynchronous task within
|
||||||
|
an application.
|
||||||
|
|
||||||
The Serai coordinator spawns a tributary for each validator set it's coordinating. This allows
|
The Serai coordinator spawns a tributary for each validator set it's
|
||||||
the participating validators to communicate in a byzantine-fault-tolerant manner (relying on
|
coordinating. This allows the participating validators to communicate in a
|
||||||
Tendermint for consensus).
|
byzantine-fault-tolerant manner (relying on Tendermint for consensus).
|
||||||
|
|
||||||
- [`cosign`](./cosign) contains a library to decide which Substrate blocks should be cosigned and
|
- [`cosign`](./cosign) contains a library to decide which Substrate blocks
|
||||||
to evaluate cosigns.
|
should be cosigned and to evaluate cosigns.
|
||||||
|
|
||||||
- [`substrate`](./substrate) contains a library to index the Substrate blockchain and handle its
|
- [`substrate`](./substrate) contains a library to index the Substrate
|
||||||
events.
|
blockchain and handle its events.
|
||||||
|
|
||||||
|
- [`tributary`](./tributary) is our instantiation of the Tributary SDK for the
|
||||||
|
Serai processor. It includes the `Transaction` definition and deferred
|
||||||
|
execution logic.
|
||||||
|
|
||||||
|
- [`p2p`](./p2p) is our abstract P2P API to service the Coordinator.
|
||||||
|
|
||||||
|
- [`libp2p`](./p2p/libp2p) is our libp2p-backed implementation of the P2P API.
|
||||||
|
|
||||||
- [`src`](./src) contains the source code for the Coordinator binary itself.
|
- [`src`](./src) contains the source code for the Coordinator binary itself.
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use core::future::Future;
|
|||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::{DoesNotError, ContinuallyRan};
|
||||||
|
|
||||||
use crate::evaluator::CosignedBlocks;
|
use crate::evaluator::CosignedBlocks;
|
||||||
|
|
||||||
@@ -24,8 +24,19 @@ pub(crate) struct CosignDelayTask<D: Db> {
|
|||||||
pub(crate) db: D,
|
pub(crate) db: D,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct AwaitUndroppable<T: DbTxn>(Option<core::mem::ManuallyDrop<Undroppable<T>>>);
|
||||||
|
impl<T: DbTxn> Drop for AwaitUndroppable<T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Some(mut txn) = self.0.take() {
|
||||||
|
(unsafe { core::mem::ManuallyDrop::take(&mut txn) }).close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
loop {
|
loop {
|
||||||
@@ -33,14 +44,18 @@ impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
|||||||
|
|
||||||
// Receive the next block to mark as cosigned
|
// Receive the next block to mark as cosigned
|
||||||
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
||||||
|
txn.close();
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Calculate when we should mark it as valid
|
// Calculate when we should mark it as valid
|
||||||
let time_valid =
|
let time_valid =
|
||||||
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
||||||
// Sleep until then
|
// Sleep until then
|
||||||
|
let mut txn = AwaitUndroppable(Some(core::mem::ManuallyDrop::new(txn)));
|
||||||
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
||||||
.await;
|
.await;
|
||||||
|
let mut txn = core::mem::ManuallyDrop::into_inner(txn.0.take().unwrap());
|
||||||
|
|
||||||
// Set the cosigned block
|
// Set the cosigned block
|
||||||
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
||||||
|
|||||||
@@ -80,12 +80,14 @@ pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut known_cosign = None;
|
let mut known_cosign = None;
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.unsafe_txn();
|
||||||
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
||||||
else {
|
else {
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::collections::HashMap;
|
use std::{sync::Arc, collections::HashMap};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{SeraiAddress, Amount},
|
primitives::{SeraiAddress, Amount},
|
||||||
@@ -57,18 +57,20 @@ async fn block_has_events_justifying_a_cosign(
|
|||||||
/// A task to determine which blocks we should intend to cosign.
|
/// A task to determine which blocks we should intend to cosign.
|
||||||
pub(crate) struct CosignIntendTask<D: Db> {
|
pub(crate) struct CosignIntendTask<D: Db> {
|
||||||
pub(crate) db: D,
|
pub(crate) db: D,
|
||||||
pub(crate) serai: Serai,
|
pub(crate) serai: Arc<Serai>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
|
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
|
||||||
let latest_block_number =
|
let latest_block_number =
|
||||||
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
||||||
|
|
||||||
for block_number in start_block_number ..= latest_block_number {
|
for block_number in start_block_number ..= latest_block_number {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.unsafe_txn();
|
||||||
|
|
||||||
let (block, mut has_events) =
|
let (block, mut has_events) =
|
||||||
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
||||||
@@ -78,7 +80,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
// Check we are indexing a linear chain
|
// Check we are indexing a linear chain
|
||||||
if (block_number > 1) &&
|
if (block_number > 1) &&
|
||||||
(<[u8; 32]>::from(block.header.parent_hash) !=
|
(<[u8; 32]>::from(block.header.parent_hash) !=
|
||||||
SubstrateBlocks::get(&txn, block_number - 1)
|
SubstrateBlockHash::get(&txn, block_number - 1)
|
||||||
.expect("indexing a block but haven't indexed its parent"))
|
.expect("indexing a block but haven't indexed its parent"))
|
||||||
{
|
{
|
||||||
Err(format!(
|
Err(format!(
|
||||||
@@ -86,14 +88,15 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
block_number - 1
|
block_number - 1
|
||||||
))?;
|
))?;
|
||||||
}
|
}
|
||||||
SubstrateBlocks::set(&mut txn, block_number, &block.hash());
|
let block_hash = block.hash();
|
||||||
|
SubstrateBlockHash::set(&mut txn, block_number, &block_hash);
|
||||||
|
|
||||||
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
|
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
|
||||||
|
|
||||||
// If this is notable, it creates a new global session, which we index into the database
|
// If this is notable, it creates a new global session, which we index into the database
|
||||||
// now
|
// now
|
||||||
if has_events == HasEvents::Notable {
|
if has_events == HasEvents::Notable {
|
||||||
let serai = self.serai.as_of(block.hash());
|
let serai = self.serai.as_of(block_hash);
|
||||||
let sets_and_keys = cosigning_sets(&serai).await?;
|
let sets_and_keys = cosigning_sets(&serai).await?;
|
||||||
let global_session =
|
let global_session =
|
||||||
GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
|
GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
|
||||||
@@ -159,7 +162,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
&CosignIntent {
|
&CosignIntent {
|
||||||
global_session: global_session_for_this_block,
|
global_session: global_session_for_this_block,
|
||||||
block_number,
|
block_number,
|
||||||
block_hash: block.hash(),
|
block_hash,
|
||||||
notable: has_events == HasEvents::Notable,
|
notable: has_events == HasEvents::Notable,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use core::{fmt::Debug, future::Future};
|
use core::{fmt::Debug, future::Future};
|
||||||
use std::collections::HashMap;
|
use std::{sync::Arc, collections::HashMap};
|
||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
@@ -82,13 +82,13 @@ enum HasEvents {
|
|||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub struct CosignIntent {
|
pub struct CosignIntent {
|
||||||
/// The global session this cosign is being performed under.
|
/// The global session this cosign is being performed under.
|
||||||
global_session: [u8; 32],
|
pub global_session: [u8; 32],
|
||||||
/// The number of the block to cosign.
|
/// The number of the block to cosign.
|
||||||
block_number: u64,
|
pub block_number: u64,
|
||||||
/// The hash of the block to cosign.
|
/// The hash of the block to cosign.
|
||||||
block_hash: [u8; 32],
|
pub block_hash: [u8; 32],
|
||||||
/// If this cosign must be handled before further cosigns are.
|
/// If this cosign must be handled before further cosigns are.
|
||||||
notable: bool,
|
pub notable: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A cosign.
|
/// A cosign.
|
||||||
@@ -127,7 +127,7 @@ create_db! {
|
|||||||
// The following are populated by the intend task and used throughout the library
|
// The following are populated by the intend task and used throughout the library
|
||||||
|
|
||||||
// An index of Substrate blocks
|
// An index of Substrate blocks
|
||||||
SubstrateBlocks: (block_number: u64) -> [u8; 32],
|
SubstrateBlockHash: (block_number: u64) -> [u8; 32],
|
||||||
// A mapping from a global session's ID to its relevant information.
|
// A mapping from a global session's ID to its relevant information.
|
||||||
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
|
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
|
||||||
// The last block to be cosigned by a global session.
|
// The last block to be cosigned by a global session.
|
||||||
@@ -228,6 +228,43 @@ pub trait RequestNotableCosigns: 'static + Send {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Faulted;
|
pub struct Faulted;
|
||||||
|
|
||||||
|
/// An error incurred while intaking a cosign.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum IntakeCosignError {
|
||||||
|
/// Cosign is for a not-yet-indexed block
|
||||||
|
NotYetIndexedBlock,
|
||||||
|
/// A later cosign for this cosigner has already been handled
|
||||||
|
StaleCosign,
|
||||||
|
/// The cosign's global session isn't recognized
|
||||||
|
UnrecognizedGlobalSession,
|
||||||
|
/// The cosign is for a block before its global session starts
|
||||||
|
BeforeGlobalSessionStart,
|
||||||
|
/// The cosign is for a block after its global session ends
|
||||||
|
AfterGlobalSessionEnd,
|
||||||
|
/// The cosign's signing network wasn't a participant in this global session
|
||||||
|
NonParticipatingNetwork,
|
||||||
|
/// The cosign had an invalid signature
|
||||||
|
InvalidSignature,
|
||||||
|
/// The cosign is for a global session which has yet to have its declaration block cosigned
|
||||||
|
FutureGlobalSession,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IntakeCosignError {
|
||||||
|
/// If this error is temporal to the local view
|
||||||
|
pub fn temporal(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
IntakeCosignError::NotYetIndexedBlock |
|
||||||
|
IntakeCosignError::StaleCosign |
|
||||||
|
IntakeCosignError::UnrecognizedGlobalSession |
|
||||||
|
IntakeCosignError::FutureGlobalSession => true,
|
||||||
|
IntakeCosignError::BeforeGlobalSessionStart |
|
||||||
|
IntakeCosignError::AfterGlobalSessionEnd |
|
||||||
|
IntakeCosignError::NonParticipatingNetwork |
|
||||||
|
IntakeCosignError::InvalidSignature => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The interface to manage cosigning with.
|
/// The interface to manage cosigning with.
|
||||||
pub struct Cosigning<D: Db> {
|
pub struct Cosigning<D: Db> {
|
||||||
db: D,
|
db: D,
|
||||||
@@ -239,7 +276,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
/// only used once at any given time.
|
/// only used once at any given time.
|
||||||
pub fn spawn<R: RequestNotableCosigns>(
|
pub fn spawn<R: RequestNotableCosigns>(
|
||||||
db: D,
|
db: D,
|
||||||
serai: Serai,
|
serai: Arc<Serai>,
|
||||||
request: R,
|
request: R,
|
||||||
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
|
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@@ -270,14 +307,14 @@ impl<D: Db> Cosigning<D> {
|
|||||||
Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0))
|
Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetch an cosigned Substrate block by its block number.
|
/// Fetch a cosigned Substrate block's hash by its block number.
|
||||||
pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result<Option<[u8; 32]>, Faulted> {
|
pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result<Option<[u8; 32]>, Faulted> {
|
||||||
if block_number > Self::latest_cosigned_block_number(getter)? {
|
if block_number > Self::latest_cosigned_block_number(getter)? {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Some(
|
Ok(Some(
|
||||||
SubstrateBlocks::get(getter, block_number).expect("cosigned block but didn't index it"),
|
SubstrateBlockHash::get(getter, block_number).expect("cosigned block but didn't index it"),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,27 +363,16 @@ impl<D: Db> Cosigning<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Intake a cosign from the Serai network.
|
/// Intake a cosign.
|
||||||
///
|
|
||||||
/// - Returns Err(_) if there was an error trying to validate the cosign and it should be retired
|
|
||||||
/// later.
|
|
||||||
/// - Returns Ok(true) if the cosign was successfully handled or could not be handled at this
|
|
||||||
/// time.
|
|
||||||
/// - Returns Ok(false) if the cosign was invalid.
|
|
||||||
//
|
|
||||||
// We collapse a cosign which shouldn't be handled yet into a valid cosign (`Ok(true)`) as we
|
|
||||||
// assume we'll either explicitly request it if we need it or we'll naturally see it (or a later,
|
|
||||||
// more relevant, cosign) again.
|
|
||||||
//
|
//
|
||||||
// Takes `&mut self` as this should only be called once at any given moment.
|
// Takes `&mut self` as this should only be called once at any given moment.
|
||||||
// TODO: Don't overload bool here
|
pub fn intake_cosign(&mut self, signed_cosign: &SignedCosign) -> Result<(), IntakeCosignError> {
|
||||||
pub fn intake_cosign(&mut self, signed_cosign: &SignedCosign) -> Result<bool, String> {
|
|
||||||
let cosign = &signed_cosign.cosign;
|
let cosign = &signed_cosign.cosign;
|
||||||
let network = cosign.cosigner;
|
let network = cosign.cosigner;
|
||||||
|
|
||||||
// Check our indexed blockchain includes a block with this block number
|
// Check our indexed blockchain includes a block with this block number
|
||||||
let Some(our_block_hash) = SubstrateBlocks::get(&self.db, cosign.block_number) else {
|
let Some(our_block_hash) = SubstrateBlockHash::get(&self.db, cosign.block_number) else {
|
||||||
return Ok(true);
|
Err(IntakeCosignError::NotYetIndexedBlock)?
|
||||||
};
|
};
|
||||||
let faulty = cosign.block_hash != our_block_hash;
|
let faulty = cosign.block_hash != our_block_hash;
|
||||||
|
|
||||||
@@ -356,20 +382,19 @@ impl<D: Db> Cosigning<D> {
|
|||||||
NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network)
|
NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network)
|
||||||
{
|
{
|
||||||
if existing.cosign.block_number >= cosign.block_number {
|
if existing.cosign.block_number >= cosign.block_number {
|
||||||
return Ok(true);
|
Err(IntakeCosignError::StaleCosign)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else {
|
let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else {
|
||||||
// Unrecognized global session
|
Err(IntakeCosignError::UnrecognizedGlobalSession)?
|
||||||
return Ok(true);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check the cosigned block number is in range to the global session
|
// Check the cosigned block number is in range to the global session
|
||||||
if cosign.block_number < global_session.start_block_number {
|
if cosign.block_number < global_session.start_block_number {
|
||||||
// Cosign is for a block predating the global session
|
// Cosign is for a block predating the global session
|
||||||
return Ok(false);
|
Err(IntakeCosignError::BeforeGlobalSessionStart)?;
|
||||||
}
|
}
|
||||||
if !faulty {
|
if !faulty {
|
||||||
// This prevents a malicious validator set, on the same chain, from producing a cosign after
|
// This prevents a malicious validator set, on the same chain, from producing a cosign after
|
||||||
@@ -377,7 +402,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) {
|
if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) {
|
||||||
if cosign.block_number > last_block {
|
if cosign.block_number > last_block {
|
||||||
// Cosign is for a block after the last block this global session should have signed
|
// Cosign is for a block after the last block this global session should have signed
|
||||||
return Ok(false);
|
Err(IntakeCosignError::AfterGlobalSessionEnd)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -386,20 +411,20 @@ impl<D: Db> Cosigning<D> {
|
|||||||
{
|
{
|
||||||
let key = Public::from({
|
let key = Public::from({
|
||||||
let Some(key) = global_session.keys.get(&network) else {
|
let Some(key) = global_session.keys.get(&network) else {
|
||||||
return Ok(false);
|
Err(IntakeCosignError::NonParticipatingNetwork)?
|
||||||
};
|
};
|
||||||
*key
|
*key
|
||||||
});
|
});
|
||||||
|
|
||||||
if !signed_cosign.verify_signature(key) {
|
if !signed_cosign.verify_signature(key) {
|
||||||
return Ok(false);
|
Err(IntakeCosignError::InvalidSignature)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
||||||
// cosign
|
// cosign
|
||||||
|
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.unsafe_txn();
|
||||||
|
|
||||||
if !faulty {
|
if !faulty {
|
||||||
// If this is for a future global session, we don't acknowledge this cosign at this time
|
// If this is for a future global session, we don't acknowledge this cosign at this time
|
||||||
@@ -408,7 +433,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
// block declaring it was cosigned
|
// block declaring it was cosigned
|
||||||
if (global_session.start_block_number - 1) > latest_cosigned_block_number {
|
if (global_session.start_block_number - 1) > latest_cosigned_block_number {
|
||||||
drop(txn);
|
drop(txn);
|
||||||
return Ok(true);
|
return Err(IntakeCosignError::FutureGlobalSession);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is safe as it's in-range and newer, as prior checked since it isn't faulty
|
// This is safe as it's in-range and newer, as prior checked since it isn't faulty
|
||||||
@@ -422,9 +447,10 @@ impl<D: Db> Cosigning<D> {
|
|||||||
|
|
||||||
let mut weight_cosigned = 0;
|
let mut weight_cosigned = 0;
|
||||||
for fault in &faults {
|
for fault in &faults {
|
||||||
let Some(stake) = global_session.stakes.get(&fault.cosign.cosigner) else {
|
let stake = global_session
|
||||||
Err("cosigner with recognized key didn't have a stake entry saved".to_string())?
|
.stakes
|
||||||
};
|
.get(&fault.cosign.cosigner)
|
||||||
|
.expect("cosigner with recognized key didn't have a stake entry saved");
|
||||||
weight_cosigned += stake;
|
weight_cosigned += stake;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -436,7 +462,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
Ok(true)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Receive intended cosigns to produce for this ValidatorSet.
|
/// Receive intended cosigns to produce for this ValidatorSet.
|
||||||
@@ -454,3 +480,30 @@ impl<D: Db> Cosigning<D> {
|
|||||||
res
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
struct RNC;
|
||||||
|
impl RequestNotableCosigns for RNC {
|
||||||
|
/// The error type which may be encountered when requesting notable cosigns.
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
/// Request the notable cosigns for this global session.
|
||||||
|
fn request_notable_cosigns(
|
||||||
|
&self,
|
||||||
|
global_session: [u8; 32],
|
||||||
|
) -> impl Send + Future<Output = Result<(), Self::Error>> {
|
||||||
|
async move { Ok(()) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test() {
|
||||||
|
let db: serai_db::MemDb = serai_db::MemDb::new();
|
||||||
|
let serai = unsafe { core::mem::transmute(0u64) };
|
||||||
|
let request = RNC;
|
||||||
|
let tasks = vec![];
|
||||||
|
let _ = Cosigning::spawn(db, serai, request, tasks);
|
||||||
|
core::future::pending().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -24,10 +24,10 @@ serai-db = { path = "../../common/db", version = "0.1" }
|
|||||||
|
|
||||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
serai-cosign = { path = "../cosign" }
|
serai-cosign = { path = "../cosign" }
|
||||||
tributary = { package = "tributary-chain", path = "../tributary" }
|
tributary-sdk = { path = "../tributary-sdk" }
|
||||||
|
|
||||||
async-channel = { version = "2", default-features = false, features = ["std"] }
|
|
||||||
futures-lite = { version = "2", default-features = false, features = ["std"] }
|
futures-lite = { version = "2", default-features = false, features = ["std"] }
|
||||||
|
tokio = { version = "1", default-features = false, features = ["sync", "macros"] }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
serai-task = { path = "../../common/task", version = "0.1" }
|
serai-task = { path = "../../common/task", version = "0.1" }
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
# Serai Coordinator P2P
|
# Serai Coordinator P2P
|
||||||
|
|
||||||
The P2P abstraction used by Serai's coordinator.
|
The P2P abstraction used by Serai's coordinator, and tasks over it.
|
||||||
|
|||||||
@@ -31,9 +31,8 @@ borsh = { version = "1", default-features = false, features = ["std", "derive",
|
|||||||
|
|
||||||
serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
serai-cosign = { path = "../../cosign" }
|
serai-cosign = { path = "../../cosign" }
|
||||||
tributary = { package = "tributary-chain", path = "../../tributary" }
|
tributary-sdk = { path = "../../tributary-sdk" }
|
||||||
|
|
||||||
void = { version = "1", default-features = false }
|
|
||||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
tokio = { version = "1", default-features = false, features = ["sync"] }
|
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
|
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::collections::HashSet;
|
use std::{sync::Arc, collections::HashSet};
|
||||||
|
|
||||||
use rand_core::{RngCore, OsRng};
|
use rand_core::{RngCore, OsRng};
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use serai_client::Serai;
|
use serai_client::{SeraiError, Serai};
|
||||||
|
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
core::multiaddr::{Protocol, Multiaddr},
|
core::multiaddr::{Protocol, Multiaddr},
|
||||||
@@ -29,14 +29,18 @@ const TARGET_PEERS_PER_NETWORK: usize = 5;
|
|||||||
// TODO const TARGET_DIALED_PEERS_PER_NETWORK: usize = 3;
|
// TODO const TARGET_DIALED_PEERS_PER_NETWORK: usize = 3;
|
||||||
|
|
||||||
pub(crate) struct DialTask {
|
pub(crate) struct DialTask {
|
||||||
serai: Serai,
|
serai: Arc<Serai>,
|
||||||
validators: Validators,
|
validators: Validators,
|
||||||
peers: Peers,
|
peers: Peers,
|
||||||
to_dial: mpsc::UnboundedSender<DialOpts>,
|
to_dial: mpsc::UnboundedSender<DialOpts>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DialTask {
|
impl DialTask {
|
||||||
pub(crate) fn new(serai: Serai, peers: Peers, to_dial: mpsc::UnboundedSender<DialOpts>) -> Self {
|
pub(crate) fn new(
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
peers: Peers,
|
||||||
|
to_dial: mpsc::UnboundedSender<DialOpts>,
|
||||||
|
) -> Self {
|
||||||
DialTask { serai: serai.clone(), validators: Validators::new(serai).0, peers, to_dial }
|
DialTask { serai: serai.clone(), validators: Validators::new(serai).0, peers, to_dial }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -46,7 +50,9 @@ impl ContinuallyRan for DialTask {
|
|||||||
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
|
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = SeraiError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
self.validators.update().await?;
|
self.validators.update().await?;
|
||||||
|
|
||||||
@@ -79,8 +85,7 @@ impl ContinuallyRan for DialTask {
|
|||||||
.unwrap_or(0)
|
.unwrap_or(0)
|
||||||
.saturating_sub(1))
|
.saturating_sub(1))
|
||||||
{
|
{
|
||||||
let mut potential_peers =
|
let mut potential_peers = self.serai.p2p_validators(network).await?;
|
||||||
self.serai.p2p_validators(network).await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
for _ in 0 .. (TARGET_PEERS_PER_NETWORK - peer_count) {
|
for _ in 0 .. (TARGET_PEERS_PER_NETWORK - peer_count) {
|
||||||
if potential_peers.is_empty() {
|
if potential_peers.is_empty() {
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ pub use libp2p::gossipsub::Event;
|
|||||||
use serai_cosign::SignedCosign;
|
use serai_cosign::SignedCosign;
|
||||||
|
|
||||||
// Block size limit + 16 KB of space for signatures/metadata
|
// Block size limit + 16 KB of space for signatures/metadata
|
||||||
pub(crate) const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 16384;
|
pub(crate) const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary_sdk::BLOCK_SIZE_LIMIT + 16384;
|
||||||
|
|
||||||
const LIBP2P_PROTOCOL: &str = "/serai/coordinator/gossip/1.0.0";
|
const LIBP2P_PROTOCOL: &str = "/serai/coordinator/gossip/1.0.0";
|
||||||
const BASE_TOPIC: &str = "/";
|
const BASE_TOPIC: &str = "/";
|
||||||
@@ -42,9 +42,10 @@ pub(crate) type Behavior = Behaviour<IdentityTransform, AllowAllSubscriptionFilt
|
|||||||
pub(crate) fn new_behavior() -> Behavior {
|
pub(crate) fn new_behavior() -> Behavior {
|
||||||
// The latency used by the Tendermint protocol, used here as the gossip epoch duration
|
// The latency used by the Tendermint protocol, used here as the gossip epoch duration
|
||||||
// libp2p-rs defaults to 1 second, whereas ours will be ~2
|
// libp2p-rs defaults to 1 second, whereas ours will be ~2
|
||||||
let heartbeat_interval = tributary::tendermint::LATENCY_TIME;
|
let heartbeat_interval = tributary_sdk::tendermint::LATENCY_TIME;
|
||||||
// The amount of heartbeats which will occur within a single Tributary block
|
// The amount of heartbeats which will occur within a single Tributary block
|
||||||
let heartbeats_per_block = tributary::tendermint::TARGET_BLOCK_TIME.div_ceil(heartbeat_interval);
|
let heartbeats_per_block =
|
||||||
|
tributary_sdk::tendermint::TARGET_BLOCK_TIME.div_ceil(heartbeat_interval);
|
||||||
// libp2p-rs defaults to 5, whereas ours will be ~8
|
// libp2p-rs defaults to 5, whereas ours will be ~8
|
||||||
let heartbeats_to_keep = 2 * heartbeats_per_block;
|
let heartbeats_to_keep = 2 * heartbeats_per_block;
|
||||||
// libp2p-rs defaults to 3 whereas ours will be ~4
|
// libp2p-rs defaults to 3 whereas ours will be ~4
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ use serai_client::{
|
|||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
|
|
||||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
use tokio::sync::{mpsc, oneshot, Mutex, RwLock};
|
||||||
|
|
||||||
use serai_task::{Task, ContinuallyRan};
|
use serai_task::{Task, ContinuallyRan};
|
||||||
|
|
||||||
@@ -35,7 +35,7 @@ use libp2p::{
|
|||||||
SwarmBuilder,
|
SwarmBuilder,
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_coordinator_p2p::{oneshot, Heartbeat, TributaryBlockWithCommit};
|
use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit};
|
||||||
|
|
||||||
/// A struct to sync the validators from the Serai node in order to keep track of them.
|
/// A struct to sync the validators from the Serai node in order to keep track of them.
|
||||||
mod validators;
|
mod validators;
|
||||||
@@ -131,33 +131,35 @@ struct Behavior {
|
|||||||
gossip: gossip::Behavior,
|
gossip: gossip::Behavior,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The libp2p-backed P2P implementation.
|
|
||||||
///
|
|
||||||
/// The P2p trait implementation does not support backpressure and is expected to be fully
|
|
||||||
/// utilized. Failure to poll the entire API will cause unbounded memory growth.
|
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
#[derive(Clone)]
|
struct Libp2pInner {
|
||||||
pub struct Libp2p {
|
|
||||||
peers: Peers,
|
peers: Peers,
|
||||||
|
|
||||||
gossip: mpsc::UnboundedSender<Message>,
|
gossip: mpsc::UnboundedSender<Message>,
|
||||||
outbound_requests: mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
|
outbound_requests: mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||||
|
|
||||||
tributary_gossip: Arc<Mutex<mpsc::UnboundedReceiver<([u8; 32], Vec<u8>)>>>,
|
tributary_gossip: Mutex<mpsc::UnboundedReceiver<([u8; 32], Vec<u8>)>>,
|
||||||
|
|
||||||
signed_cosigns: Arc<Mutex<mpsc::UnboundedReceiver<SignedCosign>>>,
|
signed_cosigns: Mutex<mpsc::UnboundedReceiver<SignedCosign>>,
|
||||||
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
|
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
|
||||||
|
|
||||||
heartbeat_requests: Arc<Mutex<mpsc::UnboundedReceiver<(RequestId, ValidatorSet, [u8; 32])>>>,
|
heartbeat_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, ValidatorSet, [u8; 32])>>,
|
||||||
notable_cosign_requests: Arc<Mutex<mpsc::UnboundedReceiver<(RequestId, [u8; 32])>>>,
|
notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, [u8; 32])>>,
|
||||||
inbound_request_responses: mpsc::UnboundedSender<(RequestId, Response)>,
|
inbound_request_responses: mpsc::UnboundedSender<(RequestId, Response)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The libp2p-backed P2P implementation.
|
||||||
|
///
|
||||||
|
/// The P2p trait implementation does not support backpressure and is expected to be fully
|
||||||
|
/// utilized. Failure to poll the entire API will cause unbounded memory growth.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct Libp2p(Arc<Libp2pInner>);
|
||||||
|
|
||||||
impl Libp2p {
|
impl Libp2p {
|
||||||
/// Create a new libp2p-backed P2P instance.
|
/// Create a new libp2p-backed P2P instance.
|
||||||
///
|
///
|
||||||
/// This will spawn all of the internal tasks necessary for functioning.
|
/// This will spawn all of the internal tasks necessary for functioning.
|
||||||
pub fn new(serai_key: &Zeroizing<Keypair>, serai: Serai) -> Libp2p {
|
pub fn new(serai_key: &Zeroizing<Keypair>, serai: Arc<Serai>) -> Libp2p {
|
||||||
// Define the object we track peers with
|
// Define the object we track peers with
|
||||||
let peers = Peers { peers: Arc::new(RwLock::new(HashMap::new())) };
|
let peers = Peers { peers: Arc::new(RwLock::new(HashMap::new())) };
|
||||||
|
|
||||||
@@ -186,7 +188,7 @@ impl Libp2p {
|
|||||||
|
|
||||||
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
|
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
|
||||||
.with_tokio()
|
.with_tokio()
|
||||||
.with_tcp(TcpConfig::default().nodelay(false), new_only_validators, new_yamux)
|
.with_tcp(TcpConfig::default().nodelay(true), new_only_validators, new_yamux)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.with_behaviour(|_| Behavior {
|
.with_behaviour(|_| Behavior {
|
||||||
allow_list: allow_block_list::Behaviour::default(),
|
allow_list: allow_block_list::Behaviour::default(),
|
||||||
@@ -239,28 +241,29 @@ impl Libp2p {
|
|||||||
inbound_request_responses_recv,
|
inbound_request_responses_recv,
|
||||||
);
|
);
|
||||||
|
|
||||||
Libp2p {
|
Libp2p(Arc::new(Libp2pInner {
|
||||||
peers,
|
peers,
|
||||||
|
|
||||||
gossip: gossip_send,
|
gossip: gossip_send,
|
||||||
outbound_requests: outbound_requests_send,
|
outbound_requests: outbound_requests_send,
|
||||||
|
|
||||||
tributary_gossip: Arc::new(Mutex::new(tributary_gossip_recv)),
|
tributary_gossip: Mutex::new(tributary_gossip_recv),
|
||||||
|
|
||||||
signed_cosigns: Arc::new(Mutex::new(signed_cosigns_recv)),
|
signed_cosigns: Mutex::new(signed_cosigns_recv),
|
||||||
signed_cosigns_send,
|
signed_cosigns_send,
|
||||||
|
|
||||||
heartbeat_requests: Arc::new(Mutex::new(heartbeat_requests_recv)),
|
heartbeat_requests: Mutex::new(heartbeat_requests_recv),
|
||||||
notable_cosign_requests: Arc::new(Mutex::new(notable_cosign_requests_recv)),
|
notable_cosign_requests: Mutex::new(notable_cosign_requests_recv),
|
||||||
inbound_request_responses: inbound_request_responses_send,
|
inbound_request_responses: inbound_request_responses_send,
|
||||||
}
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl tributary::P2p for Libp2p {
|
impl tributary_sdk::P2p for Libp2p {
|
||||||
fn broadcast(&self, tributary: [u8; 32], message: Vec<u8>) -> impl Send + Future<Output = ()> {
|
fn broadcast(&self, tributary: [u8; 32], message: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||||
async move {
|
async move {
|
||||||
self
|
self
|
||||||
|
.0
|
||||||
.gossip
|
.gossip
|
||||||
.send(Message::Tributary { tributary, message })
|
.send(Message::Tributary { tributary, message })
|
||||||
.expect("gossip recv channel was dropped?");
|
.expect("gossip recv channel was dropped?");
|
||||||
@@ -281,7 +284,7 @@ impl serai_cosign::RequestNotableCosigns for Libp2p {
|
|||||||
|
|
||||||
let request = Request::NotableCosigns { global_session };
|
let request = Request::NotableCosigns { global_session };
|
||||||
|
|
||||||
let peers = self.peers.peers.read().await.clone();
|
let peers = self.0.peers.peers.read().await.clone();
|
||||||
// HashSet of all peers
|
// HashSet of all peers
|
||||||
let peers = peers.into_values().flat_map(<_>::into_iter).collect::<HashSet<_>>();
|
let peers = peers.into_values().flat_map(<_>::into_iter).collect::<HashSet<_>>();
|
||||||
// Vec of all peers
|
// Vec of all peers
|
||||||
@@ -297,6 +300,7 @@ impl serai_cosign::RequestNotableCosigns for Libp2p {
|
|||||||
|
|
||||||
let (sender, receiver) = oneshot::channel();
|
let (sender, receiver) = oneshot::channel();
|
||||||
self
|
self
|
||||||
|
.0
|
||||||
.outbound_requests
|
.outbound_requests
|
||||||
.send((peer, request, sender))
|
.send((peer, request, sender))
|
||||||
.expect("outbound requests recv channel was dropped?");
|
.expect("outbound requests recv channel was dropped?");
|
||||||
@@ -310,6 +314,7 @@ impl serai_cosign::RequestNotableCosigns for Libp2p {
|
|||||||
{
|
{
|
||||||
for cosign in cosigns {
|
for cosign in cosigns {
|
||||||
self
|
self
|
||||||
|
.0
|
||||||
.signed_cosigns_send
|
.signed_cosigns_send
|
||||||
.send(cosign)
|
.send(cosign)
|
||||||
.expect("signed_cosigns recv in this object was dropped?");
|
.expect("signed_cosigns recv in this object was dropped?");
|
||||||
@@ -327,22 +332,29 @@ impl serai_coordinator_p2p::P2p for Libp2p {
|
|||||||
|
|
||||||
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
|
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
|
||||||
async move {
|
async move {
|
||||||
let Some(peer_ids) = self.peers.peers.read().await.get(&network).cloned() else {
|
let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else {
|
||||||
return vec![];
|
return vec![];
|
||||||
};
|
};
|
||||||
let mut res = vec![];
|
let mut res = vec![];
|
||||||
for id in peer_ids {
|
for id in peer_ids {
|
||||||
res.push(Peer { outbound_requests: &self.outbound_requests, id });
|
res.push(Peer { outbound_requests: &self.0.outbound_requests, id });
|
||||||
}
|
}
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()> {
|
||||||
|
async move {
|
||||||
|
self.0.gossip.send(Message::Cosign(cosign)).expect("gossip recv channel was dropped?");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn heartbeat(
|
fn heartbeat(
|
||||||
&self,
|
&self,
|
||||||
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)> {
|
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)> {
|
||||||
async move {
|
async move {
|
||||||
let (request_id, set, latest_block_hash) = self
|
let (request_id, set, latest_block_hash) = self
|
||||||
|
.0
|
||||||
.heartbeat_requests
|
.heartbeat_requests
|
||||||
.lock()
|
.lock()
|
||||||
.await
|
.await
|
||||||
@@ -351,7 +363,7 @@ impl serai_coordinator_p2p::P2p for Libp2p {
|
|||||||
.expect("heartbeat_requests_send was dropped?");
|
.expect("heartbeat_requests_send was dropped?");
|
||||||
let (sender, receiver) = oneshot::channel();
|
let (sender, receiver) = oneshot::channel();
|
||||||
tokio::spawn({
|
tokio::spawn({
|
||||||
let respond = self.inbound_request_responses.clone();
|
let respond = self.0.inbound_request_responses.clone();
|
||||||
async move {
|
async move {
|
||||||
// The swarm task expects us to respond to every request. If the caller drops this
|
// The swarm task expects us to respond to every request. If the caller drops this
|
||||||
// channel, we'll receive `Err` and respond with `vec![]`, safely satisfying that bound
|
// channel, we'll receive `Err` and respond with `vec![]`, safely satisfying that bound
|
||||||
@@ -375,6 +387,7 @@ impl serai_coordinator_p2p::P2p for Libp2p {
|
|||||||
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)> {
|
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)> {
|
||||||
async move {
|
async move {
|
||||||
let (request_id, global_session) = self
|
let (request_id, global_session) = self
|
||||||
|
.0
|
||||||
.notable_cosign_requests
|
.notable_cosign_requests
|
||||||
.lock()
|
.lock()
|
||||||
.await
|
.await
|
||||||
@@ -383,7 +396,7 @@ impl serai_coordinator_p2p::P2p for Libp2p {
|
|||||||
.expect("notable_cosign_requests_send was dropped?");
|
.expect("notable_cosign_requests_send was dropped?");
|
||||||
let (sender, receiver) = oneshot::channel();
|
let (sender, receiver) = oneshot::channel();
|
||||||
tokio::spawn({
|
tokio::spawn({
|
||||||
let respond = self.inbound_request_responses.clone();
|
let respond = self.0.inbound_request_responses.clone();
|
||||||
async move {
|
async move {
|
||||||
let response = if let Ok(notable_cosigns) = receiver.await {
|
let response = if let Ok(notable_cosigns) = receiver.await {
|
||||||
Response::NotableCosigns(notable_cosigns)
|
Response::NotableCosigns(notable_cosigns)
|
||||||
@@ -401,13 +414,14 @@ impl serai_coordinator_p2p::P2p for Libp2p {
|
|||||||
|
|
||||||
fn tributary_message(&self) -> impl Send + Future<Output = ([u8; 32], Vec<u8>)> {
|
fn tributary_message(&self) -> impl Send + Future<Output = ([u8; 32], Vec<u8>)> {
|
||||||
async move {
|
async move {
|
||||||
self.tributary_gossip.lock().await.recv().await.expect("tributary_gossip send was dropped?")
|
self.0.tributary_gossip.lock().await.recv().await.expect("tributary_gossip send was dropped?")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cosign(&self) -> impl Send + Future<Output = SignedCosign> {
|
fn cosign(&self) -> impl Send + Future<Output = SignedCosign> {
|
||||||
async move {
|
async move {
|
||||||
self
|
self
|
||||||
|
.0
|
||||||
.signed_cosigns
|
.signed_cosigns
|
||||||
.lock()
|
.lock()
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use core::time::Duration;
|
use core::time::Duration;
|
||||||
|
|
||||||
use tributary::tendermint::LATENCY_TIME;
|
use tributary_sdk::tendermint::LATENCY_TIME;
|
||||||
|
|
||||||
use libp2p::ping::{self, Config, Behaviour};
|
use libp2p::ping::{self, Config, Behaviour};
|
||||||
pub use ping::Event;
|
pub use ping::Event;
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit};
|
|||||||
/// The maximum message size for the request-response protocol
|
/// The maximum message size for the request-response protocol
|
||||||
// This is derived from the heartbeat message size as it's our largest message
|
// This is derived from the heartbeat message size as it's our largest message
|
||||||
pub(crate) const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize =
|
pub(crate) const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize =
|
||||||
(tributary::BLOCK_SIZE_LIMIT * serai_coordinator_p2p::heartbeat::BLOCKS_PER_BATCH) + 1024;
|
1024 + serai_coordinator_p2p::heartbeat::BATCH_SIZE_LIMIT;
|
||||||
|
|
||||||
const PROTOCOL: &str = "/serai/coordinator/reqres/1.0.0";
|
const PROTOCOL: &str = "/serai/coordinator/reqres/1.0.0";
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ use borsh::BorshDeserialize;
|
|||||||
|
|
||||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||||
|
|
||||||
use tokio::sync::{mpsc, RwLock};
|
use tokio::sync::{mpsc, oneshot, RwLock};
|
||||||
|
|
||||||
use serai_task::TaskHandle;
|
use serai_task::TaskHandle;
|
||||||
|
|
||||||
@@ -21,7 +21,7 @@ use libp2p::{
|
|||||||
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
|
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_coordinator_p2p::{oneshot, Heartbeat};
|
use serai_coordinator_p2p::Heartbeat;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Peers, BehaviorEvent, Behavior,
|
Peers, BehaviorEvent, Behavior,
|
||||||
@@ -69,11 +69,6 @@ pub(crate) struct SwarmTask {
|
|||||||
|
|
||||||
inbound_request_response_channels: HashMap<RequestId, ResponseChannel<Response>>,
|
inbound_request_response_channels: HashMap<RequestId, ResponseChannel<Response>>,
|
||||||
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
||||||
/* TODO
|
|
||||||
let cosigns = Cosigning::<D>::notable_cosigns(&self.db, global_session);
|
|
||||||
let res = reqres::Response::NotableCosigns(cosigns);
|
|
||||||
let _: Result<_, _> = self.swarm.behaviour_mut().reqres.send_response(channel, res);
|
|
||||||
*/
|
|
||||||
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
||||||
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
||||||
}
|
}
|
||||||
@@ -230,8 +225,8 @@ impl SwarmTask {
|
|||||||
SwarmEvent::Behaviour(
|
SwarmEvent::Behaviour(
|
||||||
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event)
|
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event)
|
||||||
) => {
|
) => {
|
||||||
// Ensure these are unreachable cases, not actual events
|
// This *is* an exhaustive match as these events are empty enums
|
||||||
let _: void::Void = event;
|
match event {}
|
||||||
}
|
}
|
||||||
SwarmEvent::Behaviour(
|
SwarmEvent::Behaviour(
|
||||||
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, })
|
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, })
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use std::{
|
|||||||
collections::{HashSet, HashMap},
|
collections::{HashSet, HashMap},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, Serai};
|
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, SeraiError, Serai};
|
||||||
|
|
||||||
use serai_task::{Task, ContinuallyRan};
|
use serai_task::{Task, ContinuallyRan};
|
||||||
|
|
||||||
@@ -21,7 +21,7 @@ pub(crate) struct Changes {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct Validators {
|
pub(crate) struct Validators {
|
||||||
serai: Serai,
|
serai: Arc<Serai>,
|
||||||
|
|
||||||
// A cache for which session we're populated with the validators of
|
// A cache for which session we're populated with the validators of
|
||||||
sessions: HashMap<NetworkId, Session>,
|
sessions: HashMap<NetworkId, Session>,
|
||||||
@@ -35,7 +35,7 @@ pub(crate) struct Validators {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Validators {
|
impl Validators {
|
||||||
pub(crate) fn new(serai: Serai) -> (Self, mpsc::UnboundedReceiver<Changes>) {
|
pub(crate) fn new(serai: Arc<Serai>) -> (Self, mpsc::UnboundedReceiver<Changes>) {
|
||||||
let (send, recv) = mpsc::unbounded_channel();
|
let (send, recv) = mpsc::unbounded_channel();
|
||||||
let validators = Validators {
|
let validators = Validators {
|
||||||
serai,
|
serai,
|
||||||
@@ -50,9 +50,8 @@ impl Validators {
|
|||||||
async fn session_changes(
|
async fn session_changes(
|
||||||
serai: impl Borrow<Serai>,
|
serai: impl Borrow<Serai>,
|
||||||
sessions: impl Borrow<HashMap<NetworkId, Session>>,
|
sessions: impl Borrow<HashMap<NetworkId, Session>>,
|
||||||
) -> Result<Vec<(NetworkId, Session, HashSet<PeerId>)>, String> {
|
) -> Result<Vec<(NetworkId, Session, HashSet<PeerId>)>, SeraiError> {
|
||||||
let temporal_serai =
|
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
|
||||||
serai.borrow().as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
let temporal_serai = temporal_serai.validator_sets();
|
let temporal_serai = temporal_serai.validator_sets();
|
||||||
|
|
||||||
let mut session_changes = vec![];
|
let mut session_changes = vec![];
|
||||||
@@ -69,7 +68,7 @@ impl Validators {
|
|||||||
let session = match temporal_serai.session(network).await {
|
let session = match temporal_serai.session(network).await {
|
||||||
Ok(Some(session)) => session,
|
Ok(Some(session)) => session,
|
||||||
Ok(None) => return Ok(None),
|
Ok(None) => return Ok(None),
|
||||||
Err(e) => return Err(format!("{e:?}")),
|
Err(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
||||||
if sessions.get(&network) == Some(&session) {
|
if sessions.get(&network) == Some(&session) {
|
||||||
@@ -81,7 +80,7 @@ impl Validators {
|
|||||||
session,
|
session,
|
||||||
validators.into_iter().map(peer_id_from_public).collect(),
|
validators.into_iter().map(peer_id_from_public).collect(),
|
||||||
))),
|
))),
|
||||||
Err(e) => Err(format!("{e:?}")),
|
Err(e) => Err(e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -147,8 +146,8 @@ impl Validators {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Update the view of the validators.
|
/// Update the view of the validators.
|
||||||
pub(crate) async fn update(&mut self) -> Result<(), String> {
|
pub(crate) async fn update(&mut self) -> Result<(), SeraiError> {
|
||||||
let session_changes = Self::session_changes(&self.serai, &self.sessions).await?;
|
let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?;
|
||||||
self.incorporate_session_changes(session_changes);
|
self.incorporate_session_changes(session_changes);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -174,7 +173,9 @@ impl UpdateValidatorsTask {
|
|||||||
/// Spawn a new instance of the UpdateValidatorsTask.
|
/// Spawn a new instance of the UpdateValidatorsTask.
|
||||||
///
|
///
|
||||||
/// This returns a reference to the Validators it updates after spawning itself.
|
/// This returns a reference to the Validators it updates after spawning itself.
|
||||||
pub(crate) fn spawn(serai: Serai) -> (Arc<RwLock<Validators>>, mpsc::UnboundedReceiver<Changes>) {
|
pub(crate) fn spawn(
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
) -> (Arc<RwLock<Validators>>, mpsc::UnboundedReceiver<Changes>) {
|
||||||
// The validators which will be updated
|
// The validators which will be updated
|
||||||
let (validators, changes) = Validators::new(serai);
|
let (validators, changes) = Validators::new(serai);
|
||||||
let validators = Arc::new(RwLock::new(validators));
|
let validators = Arc::new(RwLock::new(validators));
|
||||||
@@ -198,13 +199,13 @@ impl ContinuallyRan for UpdateValidatorsTask {
|
|||||||
const DELAY_BETWEEN_ITERATIONS: u64 = 60;
|
const DELAY_BETWEEN_ITERATIONS: u64 = 60;
|
||||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = SeraiError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let session_changes = {
|
let session_changes = {
|
||||||
let validators = self.validators.read().await;
|
let validators = self.validators.read().await;
|
||||||
Validators::session_changes(validators.serai.clone(), validators.sessions.clone())
|
Validators::session_changes(validators.serai.clone(), validators.sessions.clone()).await?
|
||||||
.await
|
|
||||||
.map_err(|e| format!("{e:?}"))?
|
|
||||||
};
|
};
|
||||||
self.validators.write().await.incorporate_session_changes(session_changes);
|
self.validators.write().await.incorporate_session_changes(session_changes);
|
||||||
Ok(true)
|
Ok(true)
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet};
|
||||||
|
|
||||||
use futures_lite::FutureExt;
|
use futures_lite::FutureExt;
|
||||||
|
|
||||||
use tributary::{ReadWrite, TransactionTrait, Block, Tributary, TributaryReader};
|
use tributary_sdk::{ReadWrite, TransactionTrait, Block, Tributary, TributaryReader};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
@@ -13,25 +13,41 @@ use serai_task::ContinuallyRan;
|
|||||||
use crate::{Heartbeat, Peer, P2p};
|
use crate::{Heartbeat, Peer, P2p};
|
||||||
|
|
||||||
// Amount of blocks in a minute
|
// Amount of blocks in a minute
|
||||||
const BLOCKS_PER_MINUTE: usize = (60 / (tributary::tendermint::TARGET_BLOCK_TIME / 1000)) as usize;
|
const BLOCKS_PER_MINUTE: usize =
|
||||||
|
(60 / (tributary_sdk::tendermint::TARGET_BLOCK_TIME / 1000)) as usize;
|
||||||
|
|
||||||
/// The maximum amount of blocks to include/included within a batch.
|
/// The minimum amount of blocks to include/included within a batch, assuming there's blocks to
|
||||||
pub const BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
|
/// include in the batch.
|
||||||
|
///
|
||||||
|
/// This decides the size limit of the Batch (the Block size limit multiplied by the minimum amount
|
||||||
|
/// of blocks we'll send). The actual amount of blocks sent will be the amount which fits within
|
||||||
|
/// the size limit.
|
||||||
|
pub const MIN_BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
|
||||||
|
|
||||||
|
/// The size limit for a batch of blocks sent in response to a Heartbeat.
|
||||||
|
///
|
||||||
|
/// This estimates the size of a commit as `32 + (MAX_VALIDATORS * 128)`. At the time of writing, a
|
||||||
|
/// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators,
|
||||||
|
/// and aggregate signature). Accordingly, this should be a safe over-estimate.
|
||||||
|
pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
|
||||||
|
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((MAX_KEY_SHARES_PER_SET as usize) * 128));
|
||||||
|
|
||||||
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
|
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
|
||||||
/// tip.
|
/// tip.
|
||||||
///
|
///
|
||||||
/// If the other validator has more blocks then we do, they're expected to inform us. This forms
|
/// If the other validator has more blocks then we do, they're expected to inform us. This forms
|
||||||
/// the sync protocol for our Tributaries.
|
/// the sync protocol for our Tributaries.
|
||||||
pub struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> {
|
pub(crate) struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> {
|
||||||
set: ValidatorSet,
|
pub(crate) set: ValidatorSet,
|
||||||
tributary: Tributary<TD, Tx, P>,
|
pub(crate) tributary: Tributary<TD, Tx, P>,
|
||||||
reader: TributaryReader<TD, Tx>,
|
pub(crate) reader: TributaryReader<TD, Tx>,
|
||||||
p2p: P,
|
pub(crate) p2p: P,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TD: Db, Tx: TransactionTrait, P: P2p> ContinuallyRan for HeartbeatTask<TD, Tx, P> {
|
impl<TD: Db, Tx: TransactionTrait, P: P2p> ContinuallyRan for HeartbeatTask<TD, Tx, P> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
// If our blockchain hasn't had a block in the past minute, trigger the heartbeat protocol
|
// If our blockchain hasn't had a block in the past minute, trigger the heartbeat protocol
|
||||||
const TIME_TO_TRIGGER_SYNCING: Duration = Duration::from_secs(60);
|
const TIME_TO_TRIGGER_SYNCING: Duration = Duration::from_secs(60);
|
||||||
@@ -80,7 +96,7 @@ impl<TD: Db, Tx: TransactionTrait, P: P2p> ContinuallyRan for HeartbeatTask<TD,
|
|||||||
|
|
||||||
// This is the final batch if it has less than the maximum amount of blocks
|
// This is the final batch if it has less than the maximum amount of blocks
|
||||||
// (signifying there weren't more blocks after this to fill the batch with)
|
// (signifying there weren't more blocks after this to fill the batch with)
|
||||||
let final_batch = blocks.len() < BLOCKS_PER_BATCH;
|
let final_batch = blocks.len() < MIN_BLOCKS_PER_BATCH;
|
||||||
|
|
||||||
// Sync each block
|
// Sync each block
|
||||||
for block_with_commit in blocks {
|
for block_with_commit in blocks {
|
||||||
|
|||||||
@@ -3,18 +3,23 @@
|
|||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
||||||
|
|
||||||
use serai_cosign::SignedCosign;
|
use serai_db::Db;
|
||||||
|
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};
|
||||||
|
use serai_cosign::{SignedCosign, Cosigning};
|
||||||
|
|
||||||
/// A oneshot channel.
|
use tokio::sync::{mpsc, oneshot};
|
||||||
pub mod oneshot;
|
|
||||||
|
use serai_task::{Task, ContinuallyRan};
|
||||||
|
|
||||||
/// The heartbeat task, effecting sync of Tributaries
|
/// The heartbeat task, effecting sync of Tributaries
|
||||||
pub mod heartbeat;
|
pub mod heartbeat;
|
||||||
|
use crate::heartbeat::HeartbeatTask;
|
||||||
|
|
||||||
/// A heartbeat for a Tributary.
|
/// A heartbeat for a Tributary.
|
||||||
#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)]
|
#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)]
|
||||||
@@ -44,17 +49,23 @@ pub trait Peer<'a>: Send {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The representation of the P2P network.
|
/// The representation of the P2P network.
|
||||||
pub trait P2p: Send + Sync + Clone + tributary::P2p + serai_cosign::RequestNotableCosigns {
|
pub trait P2p:
|
||||||
|
Send + Sync + Clone + tributary_sdk::P2p + serai_cosign::RequestNotableCosigns
|
||||||
|
{
|
||||||
/// The representation of a peer.
|
/// The representation of a peer.
|
||||||
type Peer<'a>: Peer<'a>;
|
type Peer<'a>: Peer<'a>;
|
||||||
|
|
||||||
/// Fetch the peers for this network.
|
/// Fetch the peers for this network.
|
||||||
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
|
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
|
||||||
|
|
||||||
|
/// Broadcast a cosign.
|
||||||
|
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()>;
|
||||||
|
|
||||||
/// A cancel-safe future for the next heartbeat received over the P2P network.
|
/// A cancel-safe future for the next heartbeat received over the P2P network.
|
||||||
///
|
///
|
||||||
/// Yields the validator set its for, the latest block hash observed, and a channel to return the
|
/// Yields the validator set its for, the latest block hash observed, and a channel to return the
|
||||||
/// descending blocks.
|
/// descending blocks. This channel MUST NOT and will not have its receiver dropped before a
|
||||||
|
/// message is sent.
|
||||||
fn heartbeat(
|
fn heartbeat(
|
||||||
&self,
|
&self,
|
||||||
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)>;
|
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)>;
|
||||||
@@ -62,6 +73,7 @@ pub trait P2p: Send + Sync + Clone + tributary::P2p + serai_cosign::RequestNotab
|
|||||||
/// A cancel-safe future for the next request for the notable cosigns of a gloabl session.
|
/// A cancel-safe future for the next request for the notable cosigns of a gloabl session.
|
||||||
///
|
///
|
||||||
/// Yields the global session the request is for and a channel to return the notable cosigns.
|
/// Yields the global session the request is for and a channel to return the notable cosigns.
|
||||||
|
/// This channel MUST NOT and will not have its receiver dropped before a message is sent.
|
||||||
fn notable_cosigns_request(
|
fn notable_cosigns_request(
|
||||||
&self,
|
&self,
|
||||||
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)>;
|
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)>;
|
||||||
@@ -74,3 +86,119 @@ pub trait P2p: Send + Sync + Clone + tributary::P2p + serai_cosign::RequestNotab
|
|||||||
/// A cancel-safe future for the next cosign received.
|
/// A cancel-safe future for the next cosign received.
|
||||||
fn cosign(&self) -> impl Send + Future<Output = SignedCosign>;
|
fn cosign(&self) -> impl Send + Future<Output = SignedCosign>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn handle_notable_cosigns_request<D: Db>(
|
||||||
|
db: &D,
|
||||||
|
global_session: [u8; 32],
|
||||||
|
channel: oneshot::Sender<Vec<SignedCosign>>,
|
||||||
|
) {
|
||||||
|
let cosigns = Cosigning::<D>::notable_cosigns(db, global_session);
|
||||||
|
channel.send(cosigns).expect("channel listening for cosign oneshot response was dropped?");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_heartbeat<D: Db, T: TransactionTrait>(
|
||||||
|
reader: &TributaryReader<D, T>,
|
||||||
|
mut latest_block_hash: [u8; 32],
|
||||||
|
channel: oneshot::Sender<Vec<TributaryBlockWithCommit>>,
|
||||||
|
) {
|
||||||
|
let mut res_size = 8;
|
||||||
|
let mut res = vec![];
|
||||||
|
// This former case should be covered by this latter case
|
||||||
|
while (res.len() < heartbeat::MIN_BLOCKS_PER_BATCH) || (res_size < heartbeat::BATCH_SIZE_LIMIT) {
|
||||||
|
let Some(block_after) = reader.block_after(&latest_block_hash) else { break };
|
||||||
|
|
||||||
|
// These `break` conditions should only occur under edge cases, such as if we're actively
|
||||||
|
// deleting this Tributary due to being done with it
|
||||||
|
let Some(block) = reader.block(&block_after) else { break };
|
||||||
|
let block = block.serialize();
|
||||||
|
let Some(commit) = reader.commit(&block_after) else { break };
|
||||||
|
res_size += 8 + block.len() + 8 + commit.len();
|
||||||
|
res.push(TributaryBlockWithCommit { block, commit });
|
||||||
|
|
||||||
|
latest_block_hash = block_after;
|
||||||
|
}
|
||||||
|
channel
|
||||||
|
.send(res)
|
||||||
|
.map_err(|_| ())
|
||||||
|
.expect("channel listening for heartbeat oneshot response was dropped?");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run the P2P instance.
|
||||||
|
///
|
||||||
|
/// `add_tributary`'s and `retire_tributary's senders, along with `send_cosigns`'s receiver, must
|
||||||
|
/// never be dropped. `retire_tributary` is not required to only be instructed with added
|
||||||
|
/// Tributaries.
|
||||||
|
pub async fn run<TD: Db, Tx: TransactionTrait, P: P2p>(
|
||||||
|
db: impl Db,
|
||||||
|
p2p: P,
|
||||||
|
mut add_tributary: mpsc::UnboundedReceiver<(ValidatorSet, Tributary<TD, Tx, P>)>,
|
||||||
|
mut retire_tributary: mpsc::UnboundedReceiver<ValidatorSet>,
|
||||||
|
send_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||||
|
) {
|
||||||
|
let mut readers = HashMap::<ValidatorSet, TributaryReader<TD, Tx>>::new();
|
||||||
|
let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender<Vec<u8>>>::new();
|
||||||
|
let mut heartbeat_tasks = HashMap::<ValidatorSet, _>::new();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
tributary = add_tributary.recv() => {
|
||||||
|
let (set, tributary) = tributary.expect("add_tributary send was dropped");
|
||||||
|
let reader = tributary.reader();
|
||||||
|
readers.insert(set, reader.clone());
|
||||||
|
|
||||||
|
let (heartbeat_task_def, heartbeat_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(HeartbeatTask {
|
||||||
|
set,
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
reader: reader.clone(),
|
||||||
|
p2p: p2p.clone(),
|
||||||
|
}).continually_run(heartbeat_task_def, vec![])
|
||||||
|
);
|
||||||
|
heartbeat_tasks.insert(set, heartbeat_task);
|
||||||
|
|
||||||
|
let (tributary_message_send, mut tributary_message_recv) = mpsc::unbounded_channel();
|
||||||
|
tributaries.insert(tributary.genesis(), tributary_message_send);
|
||||||
|
// For as long as this sender exists, handle the messages from it on a dedicated task
|
||||||
|
tokio::spawn(async move {
|
||||||
|
while let Some(message) = tributary_message_recv.recv().await {
|
||||||
|
tributary.handle_message(&message).await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
set = retire_tributary.recv() => {
|
||||||
|
let set = set.expect("retire_tributary send was dropped");
|
||||||
|
let Some(reader) = readers.remove(&set) else { continue };
|
||||||
|
tributaries.remove(&reader.genesis()).expect("tributary reader but no tributary");
|
||||||
|
heartbeat_tasks.remove(&set).expect("tributary but no heartbeat task");
|
||||||
|
}
|
||||||
|
|
||||||
|
(heartbeat, channel) = p2p.heartbeat() => {
|
||||||
|
if let Some(reader) = readers.get(&heartbeat.set) {
|
||||||
|
let reader = reader.clone(); // This is a cheap clone
|
||||||
|
// We spawn this on a task due to the DB reads needed
|
||||||
|
tokio::spawn(async move {
|
||||||
|
handle_heartbeat(&reader, heartbeat.latest_block_hash, channel)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(global_session, channel) = p2p.notable_cosigns_request() => {
|
||||||
|
tokio::spawn({
|
||||||
|
let db = db.clone();
|
||||||
|
async move { handle_notable_cosigns_request(&db, global_session, channel) }
|
||||||
|
});
|
||||||
|
}
|
||||||
|
(tributary, message) = p2p.tributary_message() => {
|
||||||
|
if let Some(tributary) = tributaries.get(&tributary) {
|
||||||
|
tributary.send(message).expect("tributary message recv was dropped?");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cosign = p2p.cosign() => {
|
||||||
|
// We don't call `Cosigning::intake_cosign` here as that can only be called from a single
|
||||||
|
// location. We also need to intake the cosigns we produce, which means we need to merge
|
||||||
|
// these streams (signing, network) somehow. That's done with this mpsc channel
|
||||||
|
send_cosigns.send(cosign).expect("channel receiving cosigns was dropped");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,35 +0,0 @@
|
|||||||
use core::{
|
|
||||||
pin::Pin,
|
|
||||||
task::{Poll, Context},
|
|
||||||
future::Future,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub use async_channel::{SendError, RecvError};
|
|
||||||
|
|
||||||
/// The sender for a oneshot channel.
|
|
||||||
pub struct Sender<T: Send>(async_channel::Sender<T>);
|
|
||||||
impl<T: Send> Sender<T> {
|
|
||||||
/// Send a value down the channel.
|
|
||||||
///
|
|
||||||
/// Returns an error if the channel's receiver was dropped.
|
|
||||||
pub fn send(self, msg: T) -> Result<(), SendError<T>> {
|
|
||||||
self.0.send_blocking(msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The receiver for a oneshot channel.
|
|
||||||
pub struct Receiver<T: Send>(async_channel::Receiver<T>);
|
|
||||||
impl<T: Send> Future for Receiver<T> {
|
|
||||||
type Output = Result<T, RecvError>;
|
|
||||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
|
||||||
let recv = self.0.recv();
|
|
||||||
futures_lite::pin!(recv);
|
|
||||||
recv.poll(cx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new oneshot channel.
|
|
||||||
pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
|
|
||||||
let (send, recv) = async_channel::bounded(1);
|
|
||||||
(Sender(send), Receiver(recv))
|
|
||||||
}
|
|
||||||
113
coordinator/src/db.rs
Normal file
113
coordinator/src/db.rs
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
use std::{path::Path, fs};
|
||||||
|
|
||||||
|
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait};
|
||||||
|
use serai_db::{create_db, db_channel};
|
||||||
|
|
||||||
|
use serai_client::{
|
||||||
|
primitives::NetworkId,
|
||||||
|
validator_sets::primitives::{Session, ValidatorSet},
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_cosign::SignedCosign;
|
||||||
|
use serai_coordinator_substrate::NewSetInformation;
|
||||||
|
use serai_coordinator_tributary::Transaction;
|
||||||
|
|
||||||
|
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||||
|
pub(crate) type Db = serai_db::ParityDb;
|
||||||
|
#[cfg(feature = "rocksdb")]
|
||||||
|
pub(crate) type Db = serai_db::RocksDB;
|
||||||
|
|
||||||
|
#[allow(unused_variables, unreachable_code)]
|
||||||
|
fn db(path: &str) -> Db {
|
||||||
|
{
|
||||||
|
let path: &Path = path.as_ref();
|
||||||
|
// This may error if this path already exists, which we shouldn't propagate/panic on. If this
|
||||||
|
// is a problem (such as we don't have the necessary permissions to write to this path), we
|
||||||
|
// expect the following DB opening to error.
|
||||||
|
let _: Result<_, _> = fs::create_dir_all(path.parent().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(feature = "parity-db", feature = "rocksdb"))]
|
||||||
|
panic!("built with parity-db and rocksdb");
|
||||||
|
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||||
|
let db = serai_db::new_parity_db(path);
|
||||||
|
#[cfg(feature = "rocksdb")]
|
||||||
|
let db = serai_db::new_rocksdb(path);
|
||||||
|
db
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn coordinator_db() -> Db {
|
||||||
|
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
||||||
|
db(&format!("{root_path}/coordinator/db"))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tributary_db_folder(set: ValidatorSet) -> String {
|
||||||
|
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
||||||
|
let network = match set.network {
|
||||||
|
NetworkId::Serai => panic!("creating Tributary for the Serai network"),
|
||||||
|
NetworkId::Bitcoin => "Bitcoin",
|
||||||
|
NetworkId::Ethereum => "Ethereum",
|
||||||
|
NetworkId::Monero => "Monero",
|
||||||
|
};
|
||||||
|
format!("{root_path}/tributary-{network}-{}", set.session.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn tributary_db(set: ValidatorSet) -> Db {
|
||||||
|
db(&format!("{}/db", tributary_db_folder(set)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn prune_tributary_db(set: ValidatorSet) {
|
||||||
|
log::info!("pruning data directory for tributary {set:?}");
|
||||||
|
let db = tributary_db_folder(set);
|
||||||
|
if fs::exists(&db).expect("couldn't check if tributary DB exists") {
|
||||||
|
fs::remove_dir_all(db).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
create_db! {
|
||||||
|
Coordinator {
|
||||||
|
// The currently active Tributaries
|
||||||
|
ActiveTributaries: () -> Vec<NewSetInformation>,
|
||||||
|
// The latest Tributary to have been retired for a network
|
||||||
|
// Since Tributaries are retired sequentially, this is informative to if any Tributary has been
|
||||||
|
// retired
|
||||||
|
RetiredTributary: (network: NetworkId) -> Session,
|
||||||
|
// The last handled message from a Processor
|
||||||
|
LastProcessorMessage: (network: NetworkId) -> u64,
|
||||||
|
// Cosigns we produced and tried to intake yet incurred an error while doing so
|
||||||
|
ErroneousCosigns: () -> Vec<SignedCosign>,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db_channel! {
|
||||||
|
Coordinator {
|
||||||
|
// Cosigns we produced
|
||||||
|
SignedCosigns: () -> SignedCosign,
|
||||||
|
// Tributaries to clean up upon reboot
|
||||||
|
TributaryCleanup: () -> ValidatorSet,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod _internal_db {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
db_channel! {
|
||||||
|
Coordinator {
|
||||||
|
// Tributary transactions to publish
|
||||||
|
TributaryTransactions: (set: ValidatorSet) -> Transaction,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct TributaryTransactions;
|
||||||
|
impl TributaryTransactions {
|
||||||
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, tx: &Transaction) {
|
||||||
|
// If this set has yet to be retired, send this transaction
|
||||||
|
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||||
|
_internal_db::TributaryTransactions::send(txn, set, tx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<Transaction> {
|
||||||
|
_internal_db::TributaryTransactions::try_recv(txn, set)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,10 +1,430 @@
|
|||||||
|
use core::{ops::Deref, time::Duration};
|
||||||
|
use std::{sync::Arc, collections::HashMap, time::Instant};
|
||||||
|
|
||||||
|
use zeroize::{Zeroize, Zeroizing};
|
||||||
|
use rand_core::{RngCore, OsRng};
|
||||||
|
|
||||||
|
use ciphersuite::{
|
||||||
|
group::{ff::PrimeField, GroupEncoding},
|
||||||
|
Ciphersuite, Ristretto,
|
||||||
|
};
|
||||||
|
|
||||||
|
use borsh::BorshDeserialize;
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
use serai_client::{
|
||||||
|
primitives::{NetworkId, PublicKey},
|
||||||
|
validator_sets::primitives::ValidatorSet,
|
||||||
|
Serai,
|
||||||
|
};
|
||||||
|
use message_queue::{Service, client::MessageQueue};
|
||||||
|
|
||||||
|
use serai_task::{Task, TaskHandle, ContinuallyRan};
|
||||||
|
|
||||||
|
use serai_cosign::{Faulted, SignedCosign, Cosigning};
|
||||||
|
use serai_coordinator_substrate::{CanonicalEventStream, EphemeralEventStream, SignSlashReport};
|
||||||
|
use serai_coordinator_tributary::{Signed, Transaction, SubstrateBlockPlans};
|
||||||
|
|
||||||
|
mod db;
|
||||||
|
use db::*;
|
||||||
|
|
||||||
mod tributary;
|
mod tributary;
|
||||||
|
|
||||||
|
mod substrate;
|
||||||
|
use substrate::SubstrateTask;
|
||||||
|
|
||||||
mod p2p {
|
mod p2p {
|
||||||
use serai_coordinator_p2p::*;
|
pub use serai_coordinator_p2p::*;
|
||||||
pub use serai_coordinator_libp2p_p2p::Libp2p;
|
pub use serai_coordinator_libp2p_p2p::Libp2p;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
// Use a zeroizing allocator for this entire application
|
||||||
todo!("TODO")
|
// While secrets should already be zeroized, the presence of secret keys in a networked application
|
||||||
|
// (at increased risk of OOB reads) justifies the performance hit in case any secrets weren't
|
||||||
|
// already
|
||||||
|
#[global_allocator]
|
||||||
|
static ALLOCATOR: zalloc::ZeroizingAlloc<std::alloc::System> =
|
||||||
|
zalloc::ZeroizingAlloc(std::alloc::System);
|
||||||
|
|
||||||
|
async fn serai() -> Arc<Serai> {
|
||||||
|
const SERAI_CONNECTION_DELAY: Duration = Duration::from_secs(10);
|
||||||
|
const MAX_SERAI_CONNECTION_DELAY: Duration = Duration::from_secs(300);
|
||||||
|
|
||||||
|
let mut delay = SERAI_CONNECTION_DELAY;
|
||||||
|
loop {
|
||||||
|
let Ok(serai) = Serai::new(format!(
|
||||||
|
"http://{}:9944",
|
||||||
|
serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided")
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
else {
|
||||||
|
log::error!("couldn't connect to the Serai node");
|
||||||
|
tokio::time::sleep(delay).await;
|
||||||
|
delay = (delay + SERAI_CONNECTION_DELAY).min(MAX_SERAI_CONNECTION_DELAY);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
log::info!("made initial connection to Serai node");
|
||||||
|
return Arc::new(serai);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn spawn_cosigning<D: serai_db::Db>(
|
||||||
|
mut db: D,
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
p2p: impl p2p::P2p,
|
||||||
|
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
|
||||||
|
mut p2p_cosigns: mpsc::UnboundedReceiver<SignedCosign>,
|
||||||
|
) {
|
||||||
|
let mut cosigning = Cosigning::spawn(db.clone(), serai, p2p.clone(), tasks_to_run_upon_cosigning);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
const COSIGN_LOOP_INTERVAL: Duration = Duration::from_secs(5);
|
||||||
|
|
||||||
|
let last_cosign_rebroadcast = Instant::now();
|
||||||
|
loop {
|
||||||
|
// Intake our own cosigns
|
||||||
|
match Cosigning::<D>::latest_cosigned_block_number(&db) {
|
||||||
|
Ok(latest_cosigned_block_number) => {
|
||||||
|
let mut txn = db.txn();
|
||||||
|
// The cosigns we prior tried to intake yet failed to
|
||||||
|
let mut cosigns = ErroneousCosigns::get(&txn).unwrap_or(vec![]);
|
||||||
|
// The cosigns we have yet to intake
|
||||||
|
while let Some(cosign) = SignedCosigns::try_recv(&mut txn) {
|
||||||
|
cosigns.push(cosign);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut erroneous = vec![];
|
||||||
|
for cosign in cosigns {
|
||||||
|
// If this cosign is stale, move on
|
||||||
|
if cosign.cosign.block_number <= latest_cosigned_block_number {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
match cosigning.intake_cosign(&cosign) {
|
||||||
|
// Publish this cosign
|
||||||
|
Ok(()) => p2p.publish_cosign(cosign).await,
|
||||||
|
Err(e) => {
|
||||||
|
assert!(e.temporal(), "signed an invalid cosign: {e:?}");
|
||||||
|
// Since this had a temporal error, queue it to try again later
|
||||||
|
erroneous.push(cosign);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the cosigns with temporal errors to the database
|
||||||
|
ErroneousCosigns::set(&mut txn, &erroneous);
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
Err(Faulted) => {
|
||||||
|
// We don't panic here as the following code rebroadcasts our cosigns which is
|
||||||
|
// necessary to inform other coordinators of the faulty cosigns
|
||||||
|
log::error!("cosigning faulted");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let time_till_cosign_rebroadcast = (last_cosign_rebroadcast +
|
||||||
|
serai_cosign::BROADCAST_FREQUENCY)
|
||||||
|
.saturating_duration_since(Instant::now());
|
||||||
|
tokio::select! {
|
||||||
|
() = tokio::time::sleep(time_till_cosign_rebroadcast) => {
|
||||||
|
for cosign in cosigning.cosigns_to_rebroadcast() {
|
||||||
|
p2p.publish_cosign(cosign).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cosign = p2p_cosigns.recv() => {
|
||||||
|
let cosign = cosign.expect("p2p cosigns channel was dropped?");
|
||||||
|
if cosigning.intake_cosign(&cosign).is_ok() {
|
||||||
|
p2p.publish_cosign(cosign).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Make sure this loop runs at least this often
|
||||||
|
() = tokio::time::sleep(COSIGN_LOOP_INTERVAL) => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_processor_messages(
|
||||||
|
mut db: impl serai_db::Db,
|
||||||
|
message_queue: Arc<MessageQueue>,
|
||||||
|
network: NetworkId,
|
||||||
|
) {
|
||||||
|
loop {
|
||||||
|
let (msg_id, msg) = {
|
||||||
|
let msg = message_queue.next(Service::Processor(network)).await;
|
||||||
|
// Check this message's sender is as expected
|
||||||
|
assert_eq!(msg.from, Service::Processor(network));
|
||||||
|
|
||||||
|
// Check this message's ID is as expected
|
||||||
|
let last = LastProcessorMessage::get(&db, network);
|
||||||
|
let next = last.map(|id| id + 1).unwrap_or(0);
|
||||||
|
// This should either be the last message's ID, if we committed but didn't send our ACK, or
|
||||||
|
// the expected next message's ID
|
||||||
|
assert!((Some(msg.id) == last) || (msg.id == next));
|
||||||
|
|
||||||
|
// TODO: Check msg.sig
|
||||||
|
|
||||||
|
// If this is the message we already handled, and just failed to ACK, ACK it now and move on
|
||||||
|
if Some(msg.id) == last {
|
||||||
|
message_queue.ack(Service::Processor(network), msg.id).await;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
(msg.id, messages::ProcessorMessage::deserialize(&mut msg.msg.as_slice()).unwrap())
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut txn = db.txn();
|
||||||
|
|
||||||
|
match msg {
|
||||||
|
messages::ProcessorMessage::KeyGen(msg) => match msg {
|
||||||
|
messages::key_gen::ProcessorMessage::Participation { session, participation } => {
|
||||||
|
let set = ValidatorSet { network, session };
|
||||||
|
TributaryTransactions::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::DkgParticipation { participation, signed: Signed::default() },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
messages::key_gen::ProcessorMessage::GeneratedKeyPair {
|
||||||
|
session,
|
||||||
|
substrate_key,
|
||||||
|
network_key,
|
||||||
|
} => todo!("TODO Transaction::DkgConfirmationPreprocess"),
|
||||||
|
messages::key_gen::ProcessorMessage::Blame { session, participant } => {
|
||||||
|
let set = ValidatorSet { network, session };
|
||||||
|
TributaryTransactions::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::RemoveParticipant {
|
||||||
|
participant: todo!("TODO"),
|
||||||
|
signed: Signed::default(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
messages::ProcessorMessage::Sign(msg) => match msg {
|
||||||
|
messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => {
|
||||||
|
let set = ValidatorSet { network, session };
|
||||||
|
TributaryTransactions::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::RemoveParticipant {
|
||||||
|
participant: todo!("TODO"),
|
||||||
|
signed: Signed::default(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => {
|
||||||
|
todo!("TODO Transaction::Batch + Transaction::Sign")
|
||||||
|
}
|
||||||
|
messages::sign::ProcessorMessage::Shares { id, shares } => todo!("TODO Transaction::Sign"),
|
||||||
|
},
|
||||||
|
messages::ProcessorMessage::Coordinator(msg) => match msg {
|
||||||
|
messages::coordinator::ProcessorMessage::CosignedBlock { cosign } => {
|
||||||
|
SignedCosigns::send(&mut txn, &cosign);
|
||||||
|
}
|
||||||
|
messages::coordinator::ProcessorMessage::SignedBatch { batch } => {
|
||||||
|
todo!("TODO PublishBatchTask")
|
||||||
|
}
|
||||||
|
messages::coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
|
||||||
|
todo!("TODO PublishSlashReportTask")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
messages::ProcessorMessage::Substrate(msg) => match msg {
|
||||||
|
messages::substrate::ProcessorMessage::SubstrateBlockAck { block, plans } => {
|
||||||
|
let mut by_session = HashMap::new();
|
||||||
|
for plan in plans {
|
||||||
|
by_session
|
||||||
|
.entry(plan.session)
|
||||||
|
.or_insert_with(|| Vec::with_capacity(1))
|
||||||
|
.push(plan.transaction_plan_id);
|
||||||
|
}
|
||||||
|
for (session, plans) in by_session {
|
||||||
|
let set = ValidatorSet { network, session };
|
||||||
|
SubstrateBlockPlans::set(&mut txn, set, block, &plans);
|
||||||
|
TributaryTransactions::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::SubstrateBlock { hash: block },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark this as the last handled message
|
||||||
|
LastProcessorMessage::set(&mut txn, network, &msg_id);
|
||||||
|
// Commit the txn
|
||||||
|
txn.commit();
|
||||||
|
// Now that we won't handle this message again, acknowledge it so we won't see it again
|
||||||
|
message_queue.ack(Service::Processor(network), msg_id).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
// Override the panic handler with one which will panic if any tokio task panics
|
||||||
|
{
|
||||||
|
let existing = std::panic::take_hook();
|
||||||
|
std::panic::set_hook(Box::new(move |panic| {
|
||||||
|
existing(panic);
|
||||||
|
const MSG: &str = "exiting the process due to a task panicking";
|
||||||
|
println!("{MSG}");
|
||||||
|
log::error!("{MSG}");
|
||||||
|
std::process::exit(1);
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the logger
|
||||||
|
if std::env::var("RUST_LOG").is_err() {
|
||||||
|
std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string()));
|
||||||
|
}
|
||||||
|
env_logger::init();
|
||||||
|
log::info!("starting coordinator service...");
|
||||||
|
|
||||||
|
// Read the Serai key from the env
|
||||||
|
let serai_key = {
|
||||||
|
let mut key_hex = serai_env::var("SERAI_KEY").expect("Serai key wasn't provided");
|
||||||
|
let mut key_vec = hex::decode(&key_hex).map_err(|_| ()).expect("Serai key wasn't hex-encoded");
|
||||||
|
key_hex.zeroize();
|
||||||
|
if key_vec.len() != 32 {
|
||||||
|
key_vec.zeroize();
|
||||||
|
panic!("Serai key had an invalid length");
|
||||||
|
}
|
||||||
|
let mut key_bytes = [0; 32];
|
||||||
|
key_bytes.copy_from_slice(&key_vec);
|
||||||
|
key_vec.zeroize();
|
||||||
|
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(key_bytes).unwrap());
|
||||||
|
key_bytes.zeroize();
|
||||||
|
key
|
||||||
|
};
|
||||||
|
|
||||||
|
// Open the database
|
||||||
|
let mut db = coordinator_db();
|
||||||
|
|
||||||
|
let existing_tributaries_at_boot = {
|
||||||
|
let mut txn = db.txn();
|
||||||
|
|
||||||
|
// Cleanup all historic Tributaries
|
||||||
|
while let Some(to_cleanup) = TributaryCleanup::try_recv(&mut txn) {
|
||||||
|
prune_tributary_db(to_cleanup);
|
||||||
|
// Drain the cosign intents created for this set
|
||||||
|
while !Cosigning::<Db>::intended_cosigns(&mut txn, to_cleanup).is_empty() {}
|
||||||
|
// Drain the transactions to publish for this set
|
||||||
|
while TributaryTransactions::try_recv(&mut txn, to_cleanup).is_some() {}
|
||||||
|
// Remove the SignSlashReport notification
|
||||||
|
SignSlashReport::try_recv(&mut txn, to_cleanup);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove retired Tributaries from ActiveTributaries
|
||||||
|
let mut active_tributaries = ActiveTributaries::get(&txn).unwrap_or(vec![]);
|
||||||
|
active_tributaries.retain(|tributary| {
|
||||||
|
RetiredTributary::get(&txn, tributary.set.network).map(|session| session.0) <
|
||||||
|
Some(tributary.set.session.0)
|
||||||
|
});
|
||||||
|
ActiveTributaries::set(&mut txn, &active_tributaries);
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
active_tributaries
|
||||||
|
};
|
||||||
|
|
||||||
|
// Connect to the message-queue
|
||||||
|
let message_queue = Arc::new(MessageQueue::from_env(Service::Coordinator));
|
||||||
|
|
||||||
|
// Connect to the Serai node
|
||||||
|
let serai = serai().await;
|
||||||
|
|
||||||
|
let (p2p_add_tributary_send, p2p_add_tributary_recv) = mpsc::unbounded_channel();
|
||||||
|
let (p2p_retire_tributary_send, p2p_retire_tributary_recv) = mpsc::unbounded_channel();
|
||||||
|
let (p2p_cosigns_send, p2p_cosigns_recv) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
// Spawn the P2P network
|
||||||
|
let p2p = {
|
||||||
|
let serai_keypair = {
|
||||||
|
let mut key_bytes = serai_key.to_bytes();
|
||||||
|
// Schnorrkel SecretKey is the key followed by 32 bytes of entropy for nonces
|
||||||
|
let mut expanded_key = Zeroizing::new([0; 64]);
|
||||||
|
expanded_key.as_mut_slice()[.. 32].copy_from_slice(&key_bytes);
|
||||||
|
OsRng.fill_bytes(&mut expanded_key.as_mut_slice()[32 ..]);
|
||||||
|
key_bytes.zeroize();
|
||||||
|
Zeroizing::new(
|
||||||
|
schnorrkel::SecretKey::from_bytes(expanded_key.as_slice()).unwrap().to_keypair(),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
let p2p = p2p::Libp2p::new(&serai_keypair, serai.clone());
|
||||||
|
tokio::spawn(p2p::run::<Db, Transaction, _>(
|
||||||
|
db.clone(),
|
||||||
|
p2p.clone(),
|
||||||
|
p2p_add_tributary_recv,
|
||||||
|
p2p_retire_tributary_recv,
|
||||||
|
p2p_cosigns_send,
|
||||||
|
));
|
||||||
|
p2p
|
||||||
|
};
|
||||||
|
|
||||||
|
// Spawn the Substrate scanners
|
||||||
|
let (substrate_task_def, substrate_task) = Task::new();
|
||||||
|
let (substrate_canonical_task_def, substrate_canonical_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
CanonicalEventStream::new(db.clone(), serai.clone())
|
||||||
|
.continually_run(substrate_canonical_task_def, vec![substrate_task.clone()]),
|
||||||
|
);
|
||||||
|
let (substrate_ephemeral_task_def, substrate_ephemeral_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
EphemeralEventStream::new(
|
||||||
|
db.clone(),
|
||||||
|
serai.clone(),
|
||||||
|
PublicKey::from_raw((<Ristretto as Ciphersuite>::generator() * serai_key.deref()).to_bytes()),
|
||||||
|
)
|
||||||
|
.continually_run(substrate_ephemeral_task_def, vec![substrate_task]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the cosign handler
|
||||||
|
spawn_cosigning(
|
||||||
|
db.clone(),
|
||||||
|
serai.clone(),
|
||||||
|
p2p.clone(),
|
||||||
|
// Run the Substrate scanners once we cosign new blocks
|
||||||
|
vec![substrate_canonical_task, substrate_ephemeral_task],
|
||||||
|
p2p_cosigns_recv,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn all Tributaries on-disk
|
||||||
|
for tributary in existing_tributaries_at_boot {
|
||||||
|
crate::tributary::spawn_tributary(
|
||||||
|
db.clone(),
|
||||||
|
message_queue.clone(),
|
||||||
|
p2p.clone(),
|
||||||
|
&p2p_add_tributary_send,
|
||||||
|
tributary,
|
||||||
|
serai_key.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle the events from the Substrate scanner
|
||||||
|
tokio::spawn(
|
||||||
|
(SubstrateTask {
|
||||||
|
serai_key: serai_key.clone(),
|
||||||
|
db: db.clone(),
|
||||||
|
message_queue: message_queue.clone(),
|
||||||
|
p2p: p2p.clone(),
|
||||||
|
p2p_add_tributary: p2p_add_tributary_send.clone(),
|
||||||
|
p2p_retire_tributary: p2p_retire_tributary_send.clone(),
|
||||||
|
})
|
||||||
|
.continually_run(substrate_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Handle all of the Processors' messages
|
||||||
|
for network in serai_client::primitives::NETWORKS {
|
||||||
|
if network == NetworkId::Serai {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
tokio::spawn(handle_processor_messages(db.clone(), message_queue.clone(), network));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the spawned tasks ad-infinitum
|
||||||
|
core::future::pending().await
|
||||||
}
|
}
|
||||||
|
|||||||
159
coordinator/src/substrate.rs
Normal file
159
coordinator/src/substrate.rs
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
|
use ciphersuite::{Ciphersuite, Ristretto};
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
use serai_db::{DbTxn, Db as DbTrait};
|
||||||
|
|
||||||
|
use serai_client::validator_sets::primitives::{Session, ValidatorSet};
|
||||||
|
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||||
|
|
||||||
|
use tributary_sdk::Tributary;
|
||||||
|
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use serai_coordinator_tributary::Transaction;
|
||||||
|
use serai_coordinator_p2p::P2p;
|
||||||
|
|
||||||
|
use crate::Db;
|
||||||
|
|
||||||
|
pub(crate) struct SubstrateTask<P: P2p> {
|
||||||
|
pub(crate) serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
pub(crate) db: Db,
|
||||||
|
pub(crate) message_queue: Arc<MessageQueue>,
|
||||||
|
pub(crate) p2p: P,
|
||||||
|
pub(crate) p2p_add_tributary:
|
||||||
|
mpsc::UnboundedSender<(ValidatorSet, Tributary<Db, Transaction, P>)>,
|
||||||
|
pub(crate) p2p_retire_tributary: mpsc::UnboundedSender<ValidatorSet>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
||||||
|
type Error = String; // TODO
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
|
||||||
|
// Handle the Canonical events
|
||||||
|
for network in serai_client::primitives::NETWORKS {
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
match msg {
|
||||||
|
// TODO: Stop trying to confirm the DKG
|
||||||
|
messages::substrate::CoordinatorMessage::SetKeys { .. } => todo!("TODO"),
|
||||||
|
messages::substrate::CoordinatorMessage::SlashesReported { session } => {
|
||||||
|
let prior_retired = crate::db::RetiredTributary::get(&txn, network);
|
||||||
|
let next_to_be_retired =
|
||||||
|
prior_retired.map(|session| Session(session.0 + 1)).unwrap_or(Session(0));
|
||||||
|
assert_eq!(session, next_to_be_retired);
|
||||||
|
crate::db::RetiredTributary::set(&mut txn, network, &session);
|
||||||
|
self
|
||||||
|
.p2p_retire_tributary
|
||||||
|
.send(ValidatorSet { network, session })
|
||||||
|
.expect("p2p retire_tributary channel dropped?");
|
||||||
|
}
|
||||||
|
messages::substrate::CoordinatorMessage::Block { .. } => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
let msg = messages::CoordinatorMessage::from(msg);
|
||||||
|
let metadata = Metadata {
|
||||||
|
from: Service::Coordinator,
|
||||||
|
to: Service::Processor(network),
|
||||||
|
intent: msg.intent(),
|
||||||
|
};
|
||||||
|
let msg = borsh::to_vec(&msg).unwrap();
|
||||||
|
self.message_queue.queue(metadata, msg).await?;
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle the NewSet events
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(new_set) = serai_coordinator_substrate::NewSet::try_recv(&mut txn) else { break };
|
||||||
|
|
||||||
|
if let Some(historic_session) = new_set.set.session.0.checked_sub(2) {
|
||||||
|
// We should have retired this session if we're here
|
||||||
|
if crate::db::RetiredTributary::get(&txn, new_set.set.network).map(|session| session.0) <
|
||||||
|
Some(historic_session)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
If we haven't, it's because we're processing the NewSet event before the retiry
|
||||||
|
event from the Canonical event stream. This happens if the Canonical event, and
|
||||||
|
then the NewSet event, is fired while we're already iterating over NewSet events.
|
||||||
|
|
||||||
|
We break, dropping the txn, restoring this NewSet to the database, so we'll only
|
||||||
|
handle it once a future iteration of this loop handles the retiry event.
|
||||||
|
*/
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Queue this historical Tributary for deletion.
|
||||||
|
|
||||||
|
We explicitly don't queue this upon Tributary retire, instead here, to give time to
|
||||||
|
investigate retired Tributaries if questions are raised post-retiry. This gives a
|
||||||
|
week (the duration of the following session) after the Tributary has been retired to
|
||||||
|
make a backup of the data directory for any investigations.
|
||||||
|
*/
|
||||||
|
crate::db::TributaryCleanup::send(
|
||||||
|
&mut txn,
|
||||||
|
&ValidatorSet { network: new_set.set.network, session: Session(historic_session) },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save this Tributary as active to the database
|
||||||
|
{
|
||||||
|
let mut active_tributaries =
|
||||||
|
crate::db::ActiveTributaries::get(&txn).unwrap_or(Vec::with_capacity(1));
|
||||||
|
active_tributaries.push(new_set.clone());
|
||||||
|
crate::db::ActiveTributaries::set(&mut txn, &active_tributaries);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send GenerateKey to the processor
|
||||||
|
let msg = messages::key_gen::CoordinatorMessage::GenerateKey {
|
||||||
|
session: new_set.set.session,
|
||||||
|
threshold: new_set.threshold,
|
||||||
|
evrf_public_keys: new_set.evrf_public_keys.clone(),
|
||||||
|
};
|
||||||
|
let msg = messages::CoordinatorMessage::from(msg);
|
||||||
|
let metadata = Metadata {
|
||||||
|
from: Service::Coordinator,
|
||||||
|
to: Service::Processor(new_set.set.network),
|
||||||
|
intent: msg.intent(),
|
||||||
|
};
|
||||||
|
let msg = borsh::to_vec(&msg).unwrap();
|
||||||
|
self.message_queue.queue(metadata, msg).await?;
|
||||||
|
|
||||||
|
// Commit the transaction for all of this
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
// Now spawn the Tributary
|
||||||
|
// If we reboot after committing the txn, but before this is called, this will be called
|
||||||
|
// on boot
|
||||||
|
crate::tributary::spawn_tributary(
|
||||||
|
self.db.clone(),
|
||||||
|
self.message_queue.clone(),
|
||||||
|
self.p2p.clone(),
|
||||||
|
&self.p2p_add_tributary,
|
||||||
|
new_set,
|
||||||
|
self.serai_key.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
453
coordinator/src/tributary.rs
Normal file
453
coordinator/src/tributary.rs
Normal file
@@ -0,0 +1,453 @@
|
|||||||
|
use core::{future::Future, time::Duration};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
use rand_core::OsRng;
|
||||||
|
use blake2::{digest::typenum::U32, Digest, Blake2s};
|
||||||
|
use ciphersuite::{Ciphersuite, Ristretto};
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
|
||||||
|
|
||||||
|
use scale::Encode;
|
||||||
|
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||||
|
|
||||||
|
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
|
||||||
|
|
||||||
|
use serai_task::{Task, TaskHandle, DoesNotError, ContinuallyRan};
|
||||||
|
|
||||||
|
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||||
|
|
||||||
|
use serai_cosign::{Faulted, CosignIntent, Cosigning};
|
||||||
|
use serai_coordinator_substrate::{NewSetInformation, SignSlashReport};
|
||||||
|
use serai_coordinator_tributary::{Transaction, ProcessorMessages, CosignIntents, ScanTributaryTask};
|
||||||
|
use serai_coordinator_p2p::P2p;
|
||||||
|
|
||||||
|
use crate::{Db, TributaryTransactions};
|
||||||
|
|
||||||
|
db_channel! {
|
||||||
|
Coordinator {
|
||||||
|
PendingCosigns: (set: ValidatorSet) -> CosignIntent,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Provide a Provided Transaction to the Tributary.
|
||||||
|
///
|
||||||
|
/// This is not a well-designed function. This is specific to the context in which its called,
|
||||||
|
/// within this file. It should only be considered an internal helper for this domain alone.
|
||||||
|
async fn provide_transaction<TD: DbTrait, P: P2p>(
|
||||||
|
set: ValidatorSet,
|
||||||
|
tributary: &Tributary<TD, Transaction, P>,
|
||||||
|
tx: Transaction,
|
||||||
|
) {
|
||||||
|
match tributary.provide_transaction(tx.clone()).await {
|
||||||
|
// The Tributary uses its own DB, so we may provide this multiple times if we reboot before
|
||||||
|
// committing the txn which provoked this
|
||||||
|
Ok(()) | Err(ProvidedError::AlreadyProvided) => {}
|
||||||
|
Err(ProvidedError::NotProvided) => {
|
||||||
|
panic!("providing a Transaction which wasn't a Provided transaction: {tx:?}");
|
||||||
|
}
|
||||||
|
Err(ProvidedError::InvalidProvided(e)) => {
|
||||||
|
panic!("providing an invalid Provided transaction, tx: {tx:?}, error: {e:?}")
|
||||||
|
}
|
||||||
|
// The Tributary's scan task won't advance if we don't have the Provided transactions
|
||||||
|
// present on-chain, and this enters an infinite loop to block the calling task from
|
||||||
|
// advancing
|
||||||
|
Err(ProvidedError::LocalMismatchesOnChain) => loop {
|
||||||
|
log::error!(
|
||||||
|
"Tributary {:?} was supposed to provide {:?} but peers disagree, halting Tributary",
|
||||||
|
set,
|
||||||
|
tx,
|
||||||
|
);
|
||||||
|
// Print this every five minutes as this does need to be handled
|
||||||
|
tokio::time::sleep(Duration::from_secs(5 * 60)).await;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Provides Cosign/Cosigned Transactions onto the Tributary.
|
||||||
|
pub(crate) struct ProvideCosignCosignedTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||||
|
db: CD,
|
||||||
|
tributary_db: TD,
|
||||||
|
set: NewSetInformation,
|
||||||
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
|
}
|
||||||
|
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan
|
||||||
|
for ProvideCosignCosignedTransactionsTask<CD, TD, P>
|
||||||
|
{
|
||||||
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
|
||||||
|
// Check if we produced any cosigns we were supposed to
|
||||||
|
let mut pending_notable_cosign = false;
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
|
// Fetch the next cosign this tributary should handle
|
||||||
|
let Some(cosign) = PendingCosigns::try_recv(&mut txn, self.set.set) else { break };
|
||||||
|
pending_notable_cosign = cosign.notable;
|
||||||
|
|
||||||
|
// If we (Serai) haven't cosigned this block, break as this is still pending
|
||||||
|
let latest = match Cosigning::<CD>::latest_cosigned_block_number(&txn) {
|
||||||
|
Ok(latest) => latest,
|
||||||
|
Err(Faulted) => {
|
||||||
|
log::error!("cosigning faulted");
|
||||||
|
Err("cosigning faulted")?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if latest < cosign.block_number {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Because we've cosigned it, provide the TX for that
|
||||||
|
{
|
||||||
|
let mut txn = self.tributary_db.txn();
|
||||||
|
CosignIntents::provide(&mut txn, self.set.set, &cosign);
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
provide_transaction(
|
||||||
|
self.set.set,
|
||||||
|
&self.tributary,
|
||||||
|
Transaction::Cosigned { substrate_block_hash: cosign.block_hash },
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// Clear pending_notable_cosign since this cosign isn't pending
|
||||||
|
pending_notable_cosign = false;
|
||||||
|
|
||||||
|
// Commit the txn to clear this from PendingCosigns
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't have any notable cosigns pending, provide the next set of cosign intents
|
||||||
|
if !pending_notable_cosign {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
// intended_cosigns will only yield up to and including the next notable cosign
|
||||||
|
for cosign in Cosigning::<CD>::intended_cosigns(&mut txn, self.set.set) {
|
||||||
|
// Flag this cosign as pending
|
||||||
|
PendingCosigns::send(&mut txn, self.set.set, &cosign);
|
||||||
|
// Provide the transaction to queue it for work
|
||||||
|
provide_transaction(
|
||||||
|
self.set.set,
|
||||||
|
&self.tributary,
|
||||||
|
Transaction::Cosign { substrate_block_hash: cosign.block_hash },
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds all of the transactions sent via `TributaryTransactions`.
|
||||||
|
pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||||
|
db: CD,
|
||||||
|
tributary_db: TD,
|
||||||
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
|
set: ValidatorSet,
|
||||||
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
}
|
||||||
|
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> {
|
||||||
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(mut tx) = TributaryTransactions::try_recv(&mut txn, self.set) else { break };
|
||||||
|
|
||||||
|
let kind = tx.kind();
|
||||||
|
match kind {
|
||||||
|
TransactionKind::Provided(_) => provide_transaction(self.set, &self.tributary, tx).await,
|
||||||
|
TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
|
||||||
|
// If this is a signed transaction, sign it
|
||||||
|
if matches!(kind, TransactionKind::Signed(_, _)) {
|
||||||
|
tx.sign(&mut OsRng, self.tributary.genesis(), &self.key);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Actually add the transaction
|
||||||
|
// TODO: If this is a preprocess, make sure the topic has been recognized
|
||||||
|
let res = self.tributary.add_transaction(tx.clone()).await;
|
||||||
|
match &res {
|
||||||
|
// Fresh publication, already published
|
||||||
|
Ok(true | false) => {}
|
||||||
|
Err(
|
||||||
|
TransactionError::TooLargeTransaction |
|
||||||
|
TransactionError::InvalidSigner |
|
||||||
|
TransactionError::InvalidNonce |
|
||||||
|
TransactionError::InvalidSignature |
|
||||||
|
TransactionError::InvalidContent,
|
||||||
|
) => {
|
||||||
|
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
|
||||||
|
}
|
||||||
|
// We've published too many transactions recently
|
||||||
|
// Drop this txn to try to publish it again later on a future iteration
|
||||||
|
Err(TransactionError::TooManyInMempool) => {
|
||||||
|
drop(txn);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// This isn't a Provided transaction so this should never be hit
|
||||||
|
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Takes the messages from ScanTributaryTask and publishes them to the message-queue.
|
||||||
|
pub(crate) struct TributaryProcessorMessagesTask<TD: DbTrait> {
|
||||||
|
tributary_db: TD,
|
||||||
|
set: ValidatorSet,
|
||||||
|
message_queue: Arc<MessageQueue>,
|
||||||
|
}
|
||||||
|
impl<TD: DbTrait> ContinuallyRan for TributaryProcessorMessagesTask<TD> {
|
||||||
|
type Error = String; // TODO
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
loop {
|
||||||
|
let mut txn = self.tributary_db.txn();
|
||||||
|
let Some(msg) = ProcessorMessages::try_recv(&mut txn, self.set) else { break };
|
||||||
|
let metadata = Metadata {
|
||||||
|
from: Service::Coordinator,
|
||||||
|
to: Service::Processor(self.set.network),
|
||||||
|
intent: msg.intent(),
|
||||||
|
};
|
||||||
|
let msg = borsh::to_vec(&msg).unwrap();
|
||||||
|
self.message_queue.queue(metadata, msg).await?;
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks for the notification to sign a slash report and does so if present.
|
||||||
|
pub(crate) struct SignSlashReportTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||||
|
db: CD,
|
||||||
|
tributary_db: TD,
|
||||||
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
|
set: NewSetInformation,
|
||||||
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
}
|
||||||
|
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for SignSlashReportTask<CD, TD, P> {
|
||||||
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(()) = SignSlashReport::try_recv(&mut txn, self.set.set) else { return Ok(false) };
|
||||||
|
|
||||||
|
// Fetch the slash report for this Tributary
|
||||||
|
let mut tx =
|
||||||
|
serai_coordinator_tributary::slash_report_transaction(&self.tributary_db, &self.set);
|
||||||
|
tx.sign(&mut OsRng, self.tributary.genesis(), &self.key);
|
||||||
|
|
||||||
|
let res = self.tributary.add_transaction(tx.clone()).await;
|
||||||
|
match &res {
|
||||||
|
// Fresh publication, already published
|
||||||
|
Ok(true | false) => {}
|
||||||
|
Err(
|
||||||
|
TransactionError::TooLargeTransaction |
|
||||||
|
TransactionError::InvalidSigner |
|
||||||
|
TransactionError::InvalidNonce |
|
||||||
|
TransactionError::InvalidSignature |
|
||||||
|
TransactionError::InvalidContent,
|
||||||
|
) => {
|
||||||
|
panic!("created an invalid SlashReport transaction, tx: {tx:?}, err: {res:?}");
|
||||||
|
}
|
||||||
|
// We've published too many transactions recently
|
||||||
|
// Drop this txn to try to publish it again later on a future iteration
|
||||||
|
Err(TransactionError::TooManyInMempool) => {
|
||||||
|
drop(txn);
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
// This isn't a Provided transaction so this should never be hit
|
||||||
|
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run the scan task whenever the Tributary adds a new block.
|
||||||
|
async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
|
||||||
|
db: CD,
|
||||||
|
set: ValidatorSet,
|
||||||
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
|
scan_tributary_task: TaskHandle,
|
||||||
|
tasks_to_keep_alive: Vec<TaskHandle>,
|
||||||
|
) {
|
||||||
|
loop {
|
||||||
|
// Break once this Tributary is retired
|
||||||
|
if crate::RetiredTributary::get(&db, set.network).map(|session| session.0) >=
|
||||||
|
Some(set.session.0)
|
||||||
|
{
|
||||||
|
drop(tasks_to_keep_alive);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Have the tributary scanner run as soon as there's a new block
|
||||||
|
match tributary.next_block_notification().await.await {
|
||||||
|
Ok(()) => scan_tributary_task.run_now(),
|
||||||
|
// unreachable since this owns the tributary object and doesn't drop it
|
||||||
|
Err(_) => panic!("tributary was dropped causing notification to error"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawn a Tributary.
|
||||||
|
///
|
||||||
|
/// This will:
|
||||||
|
/// - Spawn the Tributary
|
||||||
|
/// - Inform the P2P network of the Tributary
|
||||||
|
/// - Spawn the ScanTributaryTask
|
||||||
|
/// - Spawn the ProvideCosignCosignedTransactionsTask
|
||||||
|
/// - Spawn the TributaryProcessorMessagesTask
|
||||||
|
/// - Spawn the SignSlashReportTask
|
||||||
|
/// - Iterate the scan task whenever a new block occurs (not just on the standard interval)
|
||||||
|
pub(crate) async fn spawn_tributary<P: P2p>(
|
||||||
|
db: Db,
|
||||||
|
message_queue: Arc<MessageQueue>,
|
||||||
|
p2p: P,
|
||||||
|
p2p_add_tributary: &mpsc::UnboundedSender<(ValidatorSet, Tributary<Db, Transaction, P>)>,
|
||||||
|
set: NewSetInformation,
|
||||||
|
serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
) {
|
||||||
|
// Don't spawn retired Tributaries
|
||||||
|
if crate::db::RetiredTributary::get(&db, set.set.network).map(|session| session.0) >=
|
||||||
|
Some(set.set.session.0)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let genesis = <[u8; 32]>::from(Blake2s::<U32>::digest((set.serai_block, set.set).encode()));
|
||||||
|
|
||||||
|
// Since the Serai block will be finalized, then cosigned, before we handle this, this time will
|
||||||
|
// be a couple of minutes stale. While the Tributary will still function with a start time in the
|
||||||
|
// past, the Tributary will immediately incur round timeouts. We reduce these by adding a
|
||||||
|
// constant delay of a couple of minutes.
|
||||||
|
const TRIBUTARY_START_TIME_DELAY: u64 = 120;
|
||||||
|
let start_time = set.declaration_time + TRIBUTARY_START_TIME_DELAY;
|
||||||
|
|
||||||
|
let mut tributary_validators = Vec::with_capacity(set.validators.len());
|
||||||
|
for (validator, weight) in set.validators.iter().copied() {
|
||||||
|
let validator_key = <Ristretto as Ciphersuite>::read_G(&mut validator.0.as_slice())
|
||||||
|
.expect("Serai validator had an invalid public key");
|
||||||
|
let weight = u64::from(weight);
|
||||||
|
tributary_validators.push((validator_key, weight));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spawn the Tributary
|
||||||
|
let tributary_db = crate::db::tributary_db(set.set);
|
||||||
|
let tributary = Tributary::new(
|
||||||
|
tributary_db.clone(),
|
||||||
|
genesis,
|
||||||
|
start_time,
|
||||||
|
serai_key.clone(),
|
||||||
|
tributary_validators,
|
||||||
|
p2p,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let reader = tributary.reader();
|
||||||
|
|
||||||
|
// Inform the P2P network
|
||||||
|
p2p_add_tributary
|
||||||
|
.send((set.set, tributary.clone()))
|
||||||
|
.expect("p2p's add_tributary channel was closed?");
|
||||||
|
|
||||||
|
// Spawn the task to provide Cosign/Cosigned transactions onto the Tributary
|
||||||
|
let (provide_cosign_cosigned_transactions_task_def, provide_cosign_cosigned_transactions_task) =
|
||||||
|
Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(ProvideCosignCosignedTransactionsTask {
|
||||||
|
db: db.clone(),
|
||||||
|
tributary_db: tributary_db.clone(),
|
||||||
|
set: set.clone(),
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
})
|
||||||
|
.continually_run(provide_cosign_cosigned_transactions_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the task to send all messages from the Tributary scanner to the message-queue
|
||||||
|
let (scan_tributary_messages_task_def, scan_tributary_messages_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(TributaryProcessorMessagesTask {
|
||||||
|
tributary_db: tributary_db.clone(),
|
||||||
|
set: set.set,
|
||||||
|
message_queue,
|
||||||
|
})
|
||||||
|
.continually_run(scan_tributary_messages_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the scan task
|
||||||
|
let (scan_tributary_task_def, scan_tributary_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
ScanTributaryTask::<_, P>::new(tributary_db.clone(), &set, reader)
|
||||||
|
// This is the only handle for this TributaryProcessorMessagesTask, so when this task is
|
||||||
|
// dropped, it will be too
|
||||||
|
.continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the sign slash report task
|
||||||
|
let (sign_slash_report_task_def, sign_slash_report_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(SignSlashReportTask {
|
||||||
|
db: db.clone(),
|
||||||
|
tributary_db: tributary_db.clone(),
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
set: set.clone(),
|
||||||
|
key: serai_key.clone(),
|
||||||
|
})
|
||||||
|
.continually_run(sign_slash_report_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the add transactions task
|
||||||
|
let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(AddTributaryTransactionsTask {
|
||||||
|
db: db.clone(),
|
||||||
|
tributary_db,
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
set: set.set,
|
||||||
|
key: serai_key,
|
||||||
|
})
|
||||||
|
.continually_run(add_tributary_transactions_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Whenever a new block occurs, immediately run the scan task
|
||||||
|
// This function also preserves the ProvideCosignCosignedTransactionsTask handle until the
|
||||||
|
// Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad
|
||||||
|
// infinitum
|
||||||
|
tokio::spawn(scan_on_new_block(
|
||||||
|
db,
|
||||||
|
set.set,
|
||||||
|
tributary,
|
||||||
|
scan_tributary_task,
|
||||||
|
vec![
|
||||||
|
provide_cosign_cosigned_transactions_task,
|
||||||
|
sign_slash_report_task,
|
||||||
|
add_tributary_transactions_task,
|
||||||
|
],
|
||||||
|
));
|
||||||
|
}
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
mod transaction;
|
|
||||||
pub use transaction::Transaction;
|
|
||||||
|
|
||||||
mod db;
|
|
||||||
|
|
||||||
mod scan;
|
|
||||||
@@ -1,408 +0,0 @@
|
|||||||
use core::future::Future;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use ciphersuite::group::GroupEncoding;
|
|
||||||
|
|
||||||
use serai_client::{
|
|
||||||
primitives::SeraiAddress,
|
|
||||||
validator_sets::primitives::{ValidatorSet, Slash},
|
|
||||||
};
|
|
||||||
|
|
||||||
use tributary::{
|
|
||||||
Signed as TributarySigned, TransactionKind, TransactionTrait,
|
|
||||||
Transaction as TributaryTransaction, Block, TributaryReader,
|
|
||||||
tendermint::{
|
|
||||||
tx::{TendermintTx, Evidence, decode_signed_message},
|
|
||||||
TendermintNetwork,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::*;
|
|
||||||
use serai_task::ContinuallyRan;
|
|
||||||
|
|
||||||
use messages::sign::VariantSignId;
|
|
||||||
|
|
||||||
use crate::tributary::{
|
|
||||||
db::*,
|
|
||||||
transaction::{SigningProtocolRound, Signed, Transaction},
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ScanBlock<'a, D: DbTxn, TD: Db> {
|
|
||||||
txn: &'a mut D,
|
|
||||||
set: ValidatorSet,
|
|
||||||
validators: &'a [SeraiAddress],
|
|
||||||
total_weight: u64,
|
|
||||||
validator_weights: &'a HashMap<SeraiAddress, u64>,
|
|
||||||
tributary: &'a TributaryReader<TD, Transaction>,
|
|
||||||
}
|
|
||||||
impl<'a, D: DbTxn, TD: Db> ScanBlock<'a, D, TD> {
|
|
||||||
fn potentially_start_cosign(&mut self) {
|
|
||||||
// Don't start a new cosigning instance if we're actively running one
|
|
||||||
if TributaryDb::actively_cosigning(self.txn, self.set) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start cosigning the latest intended-to-be-cosigned block
|
|
||||||
let Some(latest_substrate_block_to_cosign) =
|
|
||||||
TributaryDb::latest_substrate_block_to_cosign(self.txn, self.set)
|
|
||||||
else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
let substrate_block_number = todo!("TODO");
|
|
||||||
|
|
||||||
// Mark us as actively cosigning
|
|
||||||
TributaryDb::start_cosigning(self.txn, self.set, substrate_block_number);
|
|
||||||
// Send the message for the processor to start signing
|
|
||||||
TributaryDb::send_message(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
|
|
||||||
session: self.set.session,
|
|
||||||
block_number: substrate_block_number,
|
|
||||||
block: latest_substrate_block_to_cosign,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
fn handle_application_tx(&mut self, block_number: u64, tx: Transaction) {
|
|
||||||
let signer = |signed: Signed| SeraiAddress(signed.signer.to_bytes());
|
|
||||||
|
|
||||||
if let TransactionKind::Signed(_, TributarySigned { signer, .. }) = tx.kind() {
|
|
||||||
// Don't handle transactions from those fatally slashed
|
|
||||||
// TODO: The fact they can publish these TXs makes this a notable spam vector
|
|
||||||
if TributaryDb::is_fatally_slashed(self.txn, self.set, SeraiAddress(signer.to_bytes())) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
match tx {
|
|
||||||
// Accumulate this vote and fatally slash the participant if past the threshold
|
|
||||||
Transaction::RemoveParticipant { participant, signed } => {
|
|
||||||
let signer = signer(signed);
|
|
||||||
|
|
||||||
// Check the participant voted to be removed actually exists
|
|
||||||
if !self.validators.iter().any(|validator| *validator == participant) {
|
|
||||||
TributaryDb::fatal_slash(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
signer,
|
|
||||||
"voted to remove non-existent participant",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
match TributaryDb::accumulate(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
self.validators,
|
|
||||||
self.total_weight,
|
|
||||||
block_number,
|
|
||||||
Topic::RemoveParticipant { participant },
|
|
||||||
signer,
|
|
||||||
self.validator_weights[&signer],
|
|
||||||
&(),
|
|
||||||
) {
|
|
||||||
DataSet::None => {}
|
|
||||||
DataSet::Participating(_) => {
|
|
||||||
TributaryDb::fatal_slash(self.txn, self.set, participant, "voted to remove");
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send the participation to the processor
|
|
||||||
Transaction::DkgParticipation { participation, signed } => {
|
|
||||||
TributaryDb::send_message(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
messages::key_gen::CoordinatorMessage::Participation {
|
|
||||||
session: self.set.session,
|
|
||||||
participant: todo!("TODO"),
|
|
||||||
participation,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed } => {
|
|
||||||
// Accumulate the preprocesses into our own FROST attempt manager
|
|
||||||
todo!("TODO")
|
|
||||||
}
|
|
||||||
Transaction::DkgConfirmationShare { attempt, share, signed } => {
|
|
||||||
// Accumulate the shares into our own FROST attempt manager
|
|
||||||
todo!("TODO")
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::Cosign { substrate_block_hash } => {
|
|
||||||
// Update the latest intended-to-be-cosigned Substrate block
|
|
||||||
TributaryDb::set_latest_substrate_block_to_cosign(self.txn, self.set, substrate_block_hash);
|
|
||||||
// Start a new cosign if we weren't already working on one
|
|
||||||
self.potentially_start_cosign();
|
|
||||||
}
|
|
||||||
Transaction::Cosigned { substrate_block_hash } => {
|
|
||||||
TributaryDb::finish_cosigning(self.txn, self.set);
|
|
||||||
|
|
||||||
// Fetch the latest intended-to-be-cosigned block
|
|
||||||
let Some(latest_substrate_block_to_cosign) =
|
|
||||||
TributaryDb::latest_substrate_block_to_cosign(self.txn, self.set)
|
|
||||||
else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
// If this is the block we just cosigned, return, preventing us from signing it again
|
|
||||||
if latest_substrate_block_to_cosign == substrate_block_hash {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Since we do have a new cosign to work on, start it
|
|
||||||
self.potentially_start_cosign();
|
|
||||||
}
|
|
||||||
Transaction::SubstrateBlock { hash } => {
|
|
||||||
// Whitelist all of the IDs this Substrate block causes to be signed
|
|
||||||
todo!("TODO")
|
|
||||||
}
|
|
||||||
Transaction::Batch { hash } => {
|
|
||||||
// Whitelist the signing of this batch, publishing our own preprocess
|
|
||||||
todo!("TODO")
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::SlashReport { slash_points, signed } => {
|
|
||||||
let signer = signer(signed);
|
|
||||||
|
|
||||||
if slash_points.len() != self.validators.len() {
|
|
||||||
TributaryDb::fatal_slash(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
signer,
|
|
||||||
"slash report was for a distinct amount of signers",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accumulate, and if past the threshold, calculate *the* slash report and start signing it
|
|
||||||
match TributaryDb::accumulate(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
self.validators,
|
|
||||||
self.total_weight,
|
|
||||||
block_number,
|
|
||||||
Topic::SlashReport,
|
|
||||||
signer,
|
|
||||||
self.validator_weights[&signer],
|
|
||||||
&slash_points,
|
|
||||||
) {
|
|
||||||
DataSet::None => {}
|
|
||||||
DataSet::Participating(data_set) => {
|
|
||||||
// Find the median reported slashes for this validator
|
|
||||||
// TODO: This lets 34% perform a fatal slash. Should that be allowed?
|
|
||||||
let mut median_slash_report = Vec::with_capacity(self.validators.len());
|
|
||||||
for i in 0 .. self.validators.len() {
|
|
||||||
let mut this_validator =
|
|
||||||
data_set.values().map(|report| report[i]).collect::<Vec<_>>();
|
|
||||||
this_validator.sort_unstable();
|
|
||||||
// Choose the median, where if there are two median values, the lower one is chosen
|
|
||||||
let median_index = if (this_validator.len() % 2) == 1 {
|
|
||||||
this_validator.len() / 2
|
|
||||||
} else {
|
|
||||||
(this_validator.len() / 2) - 1
|
|
||||||
};
|
|
||||||
median_slash_report.push(this_validator[median_index]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We only publish slashes for the `f` worst performers to:
|
|
||||||
// 1) Effect amnesty if there were network disruptions which affected everyone
|
|
||||||
// 2) Ensure the signing threshold doesn't have a disincentive to do their job
|
|
||||||
|
|
||||||
// Find the worst performer within the signing threshold's slash points
|
|
||||||
let f = (self.validators.len() - 1) / 3;
|
|
||||||
let worst_validator_in_supermajority_slash_points = {
|
|
||||||
let mut sorted_slash_points = median_slash_report.clone();
|
|
||||||
sorted_slash_points.sort_unstable();
|
|
||||||
// This won't be a valid index if `f == 0`, which means we don't have any validators
|
|
||||||
// to slash
|
|
||||||
let index_of_first_validator_to_slash = self.validators.len() - f;
|
|
||||||
let index_of_worst_validator_in_supermajority = index_of_first_validator_to_slash - 1;
|
|
||||||
sorted_slash_points[index_of_worst_validator_in_supermajority]
|
|
||||||
};
|
|
||||||
|
|
||||||
// Perform the amortization
|
|
||||||
for slash_points in &mut median_slash_report {
|
|
||||||
*slash_points =
|
|
||||||
slash_points.saturating_sub(worst_validator_in_supermajority_slash_points)
|
|
||||||
}
|
|
||||||
let amortized_slash_report = median_slash_report;
|
|
||||||
|
|
||||||
// Create the resulting slash report
|
|
||||||
let mut slash_report = vec![];
|
|
||||||
for (validator, points) in self.validators.iter().copied().zip(amortized_slash_report) {
|
|
||||||
if points != 0 {
|
|
||||||
slash_report.push(Slash { key: validator.into(), points });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert!(slash_report.len() <= f);
|
|
||||||
|
|
||||||
// Recognize the topic for signing the slash report
|
|
||||||
TributaryDb::recognize_topic(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
Topic::Sign {
|
|
||||||
id: VariantSignId::SlashReport,
|
|
||||||
attempt: 0,
|
|
||||||
round: SigningProtocolRound::Preprocess,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
// Send the message for the processor to start signing
|
|
||||||
TributaryDb::send_message(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
messages::coordinator::CoordinatorMessage::SignSlashReport {
|
|
||||||
session: self.set.session,
|
|
||||||
report: slash_report,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::Sign { id, attempt, round, data, signed } => {
|
|
||||||
let topic = Topic::Sign { id, attempt, round };
|
|
||||||
let signer = signer(signed);
|
|
||||||
|
|
||||||
if u64::try_from(data.len()).unwrap() != self.validator_weights[&signer] {
|
|
||||||
TributaryDb::fatal_slash(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
signer,
|
|
||||||
"signer signed with a distinct amount of key shares than they had key shares",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
match TributaryDb::accumulate(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
self.validators,
|
|
||||||
self.total_weight,
|
|
||||||
block_number,
|
|
||||||
topic,
|
|
||||||
signer,
|
|
||||||
self.validator_weights[&signer],
|
|
||||||
&data,
|
|
||||||
) {
|
|
||||||
DataSet::None => {}
|
|
||||||
DataSet::Participating(data_set) => {
|
|
||||||
let id = topic.sign_id(self.set).expect("Topic::Sign didn't have SignId");
|
|
||||||
let flatten_data_set = |data_set| todo!("TODO");
|
|
||||||
let data_set = flatten_data_set(data_set);
|
|
||||||
TributaryDb::send_message(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
match round {
|
|
||||||
SigningProtocolRound::Preprocess => {
|
|
||||||
messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set }
|
|
||||||
}
|
|
||||||
SigningProtocolRound::Share => {
|
|
||||||
messages::sign::CoordinatorMessage::Shares { id, shares: data_set }
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_block(mut self, block_number: u64, block: Block<Transaction>) {
|
|
||||||
TributaryDb::start_of_block(self.txn, self.set, block_number);
|
|
||||||
|
|
||||||
for tx in block.transactions {
|
|
||||||
match tx {
|
|
||||||
TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => {
|
|
||||||
// Since the evidence is on the chain, it will have already been validated
|
|
||||||
// We can just punish the signer
|
|
||||||
let data = match ev {
|
|
||||||
Evidence::ConflictingMessages(first, second) => (first, Some(second)),
|
|
||||||
Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None),
|
|
||||||
};
|
|
||||||
/* TODO
|
|
||||||
let msgs = (
|
|
||||||
decode_signed_message::<TendermintNetwork<D, Transaction, P>>(&data.0).unwrap(),
|
|
||||||
if data.1.is_some() {
|
|
||||||
Some(
|
|
||||||
decode_signed_message::<TendermintNetwork<D, Transaction, P>>(&data.1.unwrap())
|
|
||||||
.unwrap(),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
// Since anything with evidence is fundamentally faulty behavior, not just temporal
|
|
||||||
// errors, mark the node as fatally slashed
|
|
||||||
TributaryDb::fatal_slash(
|
|
||||||
self.txn, msgs.0.msg.sender, &format!("invalid tendermint messages: {msgs:?}"));
|
|
||||||
*/
|
|
||||||
todo!("TODO")
|
|
||||||
}
|
|
||||||
TributaryTransaction::Application(tx) => {
|
|
||||||
self.handle_application_tx(block_number, tx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ScanTributaryTask<D: Db, TD: Db> {
|
|
||||||
db: D,
|
|
||||||
set: ValidatorSet,
|
|
||||||
validators: Vec<SeraiAddress>,
|
|
||||||
total_weight: u64,
|
|
||||||
validator_weights: HashMap<SeraiAddress, u64>,
|
|
||||||
tributary: TributaryReader<TD, Transaction>,
|
|
||||||
}
|
|
||||||
impl<D: Db, TD: Db> ContinuallyRan for ScanTributaryTask<D, TD> {
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
|
||||||
async move {
|
|
||||||
let (mut last_block_number, mut last_block_hash) =
|
|
||||||
TributaryDb::last_handled_tributary_block(&self.db, self.set)
|
|
||||||
.unwrap_or((0, self.tributary.genesis()));
|
|
||||||
|
|
||||||
let mut made_progess = false;
|
|
||||||
while let Some(next) = self.tributary.block_after(&last_block_hash) {
|
|
||||||
let block = self.tributary.block(&next).unwrap();
|
|
||||||
let block_number = last_block_number + 1;
|
|
||||||
let block_hash = block.hash();
|
|
||||||
|
|
||||||
// Make sure we have all of the provided transactions for this block
|
|
||||||
for tx in &block.transactions {
|
|
||||||
let TransactionKind::Provided(order) = tx.kind() else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
// make sure we have all the provided txs in this block locally
|
|
||||||
if !self.tributary.locally_provided_txs_in_block(&block_hash, order) {
|
|
||||||
return Err(format!(
|
|
||||||
"didn't have the provided Transactions on-chain for set (ephemeral error): {:?}",
|
|
||||||
self.set
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
(ScanBlock {
|
|
||||||
txn: &mut txn,
|
|
||||||
set: self.set,
|
|
||||||
validators: &self.validators,
|
|
||||||
total_weight: self.total_weight,
|
|
||||||
validator_weights: &self.validator_weights,
|
|
||||||
tributary: &self.tributary,
|
|
||||||
})
|
|
||||||
.handle_block(block_number, block);
|
|
||||||
TributaryDb::set_last_handled_tributary_block(&mut txn, self.set, block_number, block_hash);
|
|
||||||
last_block_number = block_number;
|
|
||||||
last_block_hash = block_hash;
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
made_progess = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(made_progess)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,338 +0,0 @@
|
|||||||
use core::{ops::Deref, fmt::Debug};
|
|
||||||
use std::io;
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
use rand_core::{RngCore, CryptoRng};
|
|
||||||
|
|
||||||
use blake2::{digest::typenum::U32, Digest, Blake2b};
|
|
||||||
use ciphersuite::{
|
|
||||||
group::{ff::Field, GroupEncoding},
|
|
||||||
Ciphersuite, Ristretto,
|
|
||||||
};
|
|
||||||
use schnorr::SchnorrSignature;
|
|
||||||
|
|
||||||
use scale::Encode;
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
|
||||||
|
|
||||||
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::MAX_KEY_SHARES_PER_SET};
|
|
||||||
|
|
||||||
use messages::sign::VariantSignId;
|
|
||||||
|
|
||||||
use tributary::{
|
|
||||||
ReadWrite,
|
|
||||||
transaction::{
|
|
||||||
Signed as TributarySigned, TransactionError, TransactionKind, Transaction as TransactionTrait,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// The round this data is for, within a signing protocol.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
|
||||||
pub enum SigningProtocolRound {
|
|
||||||
/// A preprocess.
|
|
||||||
Preprocess,
|
|
||||||
/// A signature share.
|
|
||||||
Share,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SigningProtocolRound {
|
|
||||||
fn nonce(&self) -> u32 {
|
|
||||||
match self {
|
|
||||||
SigningProtocolRound::Preprocess => 0,
|
|
||||||
SigningProtocolRound::Share => 1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `tributary::Signed` but without the nonce.
|
|
||||||
///
|
|
||||||
/// All of our nonces are deterministic to the type of transaction and fields within.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
|
||||||
pub struct Signed {
|
|
||||||
/// The signer.
|
|
||||||
pub signer: <Ristretto as Ciphersuite>::G,
|
|
||||||
/// The signature.
|
|
||||||
pub signature: SchnorrSignature<Ristretto>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BorshSerialize for Signed {
|
|
||||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> Result<(), io::Error> {
|
|
||||||
writer.write_all(self.signer.to_bytes().as_ref())?;
|
|
||||||
self.signature.write(writer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl BorshDeserialize for Signed {
|
|
||||||
fn deserialize_reader<R: io::Read>(reader: &mut R) -> Result<Self, io::Error> {
|
|
||||||
let signer = Ristretto::read_G(reader)?;
|
|
||||||
let signature = SchnorrSignature::read(reader)?;
|
|
||||||
Ok(Self { signer, signature })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Signed {
|
|
||||||
/// Provide a nonce to convert a `Signed` into a `tributary::Signed`.
|
|
||||||
fn nonce(&self, nonce: u32) -> TributarySigned {
|
|
||||||
TributarySigned { signer: self.signer, nonce, signature: self.signature }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The Tributary transaction definition used by Serai
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
|
||||||
pub enum Transaction {
|
|
||||||
/// A vote to remove a participant for invalid behavior
|
|
||||||
RemoveParticipant {
|
|
||||||
/// The participant to remove
|
|
||||||
participant: SeraiAddress,
|
|
||||||
/// The transaction's signer and signature
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// A participation in the DKG
|
|
||||||
DkgParticipation {
|
|
||||||
participation: Vec<u8>,
|
|
||||||
/// The transaction's signer and signature
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
/// The preprocess to confirm the DKG results on-chain
|
|
||||||
DkgConfirmationPreprocess {
|
|
||||||
/// The attempt number of this signing protocol
|
|
||||||
attempt: u32,
|
|
||||||
// The preprocess
|
|
||||||
preprocess: [u8; 64],
|
|
||||||
/// The transaction's signer and signature
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
/// The signature share to confirm the DKG results on-chain
|
|
||||||
DkgConfirmationShare {
|
|
||||||
/// The attempt number of this signing protocol
|
|
||||||
attempt: u32,
|
|
||||||
// The signature share
|
|
||||||
share: [u8; 32],
|
|
||||||
/// The transaction's signer and signature
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Intend to co-sign a finalized Substrate block
|
|
||||||
///
|
|
||||||
/// When the time comes to start a new co-signing protocol, the most recent Substrate block will
|
|
||||||
/// be the one selected to be cosigned.
|
|
||||||
Cosign {
|
|
||||||
/// The hash of the Substrate block to sign
|
|
||||||
substrate_block_hash: [u8; 32],
|
|
||||||
},
|
|
||||||
|
|
||||||
/// The cosign for a Substrate block
|
|
||||||
///
|
|
||||||
/// After producing this cosign, we need to start work on the latest intended-to-be cosigned
|
|
||||||
/// block. That requires agreement on when this cosign was produced, which we solve by embedding
|
|
||||||
/// this cosign on chain.
|
|
||||||
///
|
|
||||||
/// We ideally don't have this transaction at all. The coordinator, without access to any of the
|
|
||||||
/// key shares, could observe the FROST signing session and determine a successful completion.
|
|
||||||
/// Unfortunately, that functionality is not present in modular-frost, so we do need to support
|
|
||||||
/// *some* asynchronous flow (where the processor or P2P network informs us of the successful
|
|
||||||
/// completion).
|
|
||||||
///
|
|
||||||
/// If we use a `Provided` transaction, that requires everyone observe this cosign.
|
|
||||||
///
|
|
||||||
/// If we use an `Unsigned` transaction, we can't verify the cosign signature inside
|
|
||||||
/// `Transaction::verify` unless we embedded the full `SignedCosign` on-chain. The issue is since
|
|
||||||
/// a Tributary is stateless with regards to the on-chain logic, including `Transaction::verify`,
|
|
||||||
/// we can't verify the signature against the group's public key unless we also include that (but
|
|
||||||
/// then we open a DoS where arbitrary group keys are specified to cause inclusion of arbitrary
|
|
||||||
/// blobs on chain).
|
|
||||||
///
|
|
||||||
/// If we use a `Signed` transaction, we mitigate the DoS risk by having someone to fatally
|
|
||||||
/// slash. We have horrible performance though as for 100 validators, all 100 will publish this
|
|
||||||
/// transaction.
|
|
||||||
///
|
|
||||||
/// We could use a signed `Unsigned` transaction, where it includes a signer and signature but
|
|
||||||
/// isn't technically a Signed transaction. This lets us de-duplicate the transaction premised on
|
|
||||||
/// its contents.
|
|
||||||
///
|
|
||||||
/// The optimal choice is likely to use a `Provided` transaction. We don't actually need to
|
|
||||||
/// observe the produced cosign (which is ephemeral). As long as it's agreed the cosign in
|
|
||||||
/// question no longer needs to produced, which would mean the cosigning protocol at-large
|
|
||||||
/// cosigning the block in question, it'd be safe to provide this and move on to the next cosign.
|
|
||||||
Cosigned { substrate_block_hash: [u8; 32] },
|
|
||||||
|
|
||||||
/// Acknowledge a Substrate block
|
|
||||||
///
|
|
||||||
/// This is provided after the block has been cosigned.
|
|
||||||
///
|
|
||||||
/// With the acknowledgement of a Substrate block, we can whitelist all the `VariantSignId`s
|
|
||||||
/// resulting from its handling.
|
|
||||||
SubstrateBlock {
|
|
||||||
/// The hash of the Substrate block
|
|
||||||
hash: [u8; 32],
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Acknowledge a Batch
|
|
||||||
///
|
|
||||||
/// Once everyone has acknowledged the Batch, we can begin signing it.
|
|
||||||
Batch {
|
|
||||||
/// The hash of the Batch's serialization.
|
|
||||||
///
|
|
||||||
/// Generally, we refer to a Batch by its ID/the hash of its instructions. Here, we want to
|
|
||||||
/// ensure consensus on the Batch, and achieving consensus on its hash is the most effective
|
|
||||||
/// way to do that.
|
|
||||||
hash: [u8; 32],
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Data from a signing protocol.
|
|
||||||
Sign {
|
|
||||||
/// The ID of the object being signed
|
|
||||||
id: VariantSignId,
|
|
||||||
/// The attempt number of this signing protocol
|
|
||||||
attempt: u32,
|
|
||||||
/// The round this data is for, within the signing protocol
|
|
||||||
round: SigningProtocolRound,
|
|
||||||
/// The data itself
|
|
||||||
///
|
|
||||||
/// There will be `n` blobs of data where `n` is the amount of key shares the validator sending
|
|
||||||
/// this transaction has.
|
|
||||||
data: Vec<Vec<u8>>,
|
|
||||||
/// The transaction's signer and signature
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// The local view of slashes observed by the transaction's sender
|
|
||||||
SlashReport {
|
|
||||||
/// The slash points accrued by each validator
|
|
||||||
slash_points: Vec<u32>,
|
|
||||||
/// The transaction's signer and signature
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReadWrite for Transaction {
|
|
||||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
borsh::from_reader(reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
borsh::to_writer(writer, self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TransactionTrait for Transaction {
|
|
||||||
fn kind(&self) -> TransactionKind {
|
|
||||||
match self {
|
|
||||||
Transaction::RemoveParticipant { participant, signed } => {
|
|
||||||
TransactionKind::Signed((b"RemoveParticipant", participant).encode(), signed.nonce(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::DkgParticipation { signed, .. } => {
|
|
||||||
TransactionKind::Signed(b"DkgParticipation".encode(), signed.nonce(0))
|
|
||||||
}
|
|
||||||
Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => {
|
|
||||||
TransactionKind::Signed((b"DkgConfirmation", attempt).encode(), signed.nonce(0))
|
|
||||||
}
|
|
||||||
Transaction::DkgConfirmationShare { attempt, signed, .. } => {
|
|
||||||
TransactionKind::Signed((b"DkgConfirmation", attempt).encode(), signed.nonce(1))
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::Cosign { .. } => TransactionKind::Provided("CosignSubstrateBlock"),
|
|
||||||
Transaction::Cosigned { .. } => TransactionKind::Provided("Cosigned"),
|
|
||||||
Transaction::SubstrateBlock { .. } => TransactionKind::Provided("SubstrateBlock"),
|
|
||||||
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
|
||||||
|
|
||||||
Transaction::Sign { id, attempt, round, signed, .. } => {
|
|
||||||
TransactionKind::Signed((b"Sign", id, attempt).encode(), signed.nonce(round.nonce()))
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::SlashReport { signed, .. } => {
|
|
||||||
TransactionKind::Signed(b"SlashReport".encode(), signed.nonce(0))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn hash(&self) -> [u8; 32] {
|
|
||||||
let mut tx = ReadWrite::serialize(self);
|
|
||||||
if let TransactionKind::Signed(_, signed) = self.kind() {
|
|
||||||
// Make sure the part we're cutting off is the signature
|
|
||||||
assert_eq!(tx.drain((tx.len() - 64) ..).collect::<Vec<_>>(), signed.signature.serialize());
|
|
||||||
}
|
|
||||||
Blake2b::<U32>::digest(&tx).into()
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a stateless verification which we use to enforce some size limits.
|
|
||||||
fn verify(&self) -> Result<(), TransactionError> {
|
|
||||||
#[allow(clippy::match_same_arms)]
|
|
||||||
match self {
|
|
||||||
// Fixed-length TX
|
|
||||||
Transaction::RemoveParticipant { .. } => {}
|
|
||||||
|
|
||||||
// TODO: MAX_DKG_PARTICIPATION_LEN
|
|
||||||
Transaction::DkgParticipation { .. } => {}
|
|
||||||
// These are fixed-length TXs
|
|
||||||
Transaction::DkgConfirmationPreprocess { .. } | Transaction::DkgConfirmationShare { .. } => {}
|
|
||||||
|
|
||||||
// Provided TXs
|
|
||||||
Transaction::Cosign { .. } |
|
|
||||||
Transaction::Cosigned { .. } |
|
|
||||||
Transaction::SubstrateBlock { .. } |
|
|
||||||
Transaction::Batch { .. } => {}
|
|
||||||
|
|
||||||
Transaction::Sign { data, .. } => {
|
|
||||||
if data.len() > usize::try_from(MAX_KEY_SHARES_PER_SET).unwrap() {
|
|
||||||
Err(TransactionError::InvalidContent)?
|
|
||||||
}
|
|
||||||
// TODO: MAX_SIGN_LEN
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::SlashReport { slash_points, .. } => {
|
|
||||||
if slash_points.len() > usize::try_from(MAX_KEY_SHARES_PER_SET).unwrap() {
|
|
||||||
Err(TransactionError::InvalidContent)?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Transaction {
|
|
||||||
// Sign a transaction
|
|
||||||
//
|
|
||||||
// Panics if signing a transaction type which isn't `TransactionKind::Signed`
|
|
||||||
pub fn sign<R: RngCore + CryptoRng>(
|
|
||||||
&mut self,
|
|
||||||
rng: &mut R,
|
|
||||||
genesis: [u8; 32],
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
) {
|
|
||||||
fn signed(tx: &mut Transaction) -> &mut Signed {
|
|
||||||
#[allow(clippy::match_same_arms)] // This doesn't make semantic sense here
|
|
||||||
match tx {
|
|
||||||
Transaction::RemoveParticipant { ref mut signed, .. } |
|
|
||||||
Transaction::DkgParticipation { ref mut signed, .. } |
|
|
||||||
Transaction::DkgConfirmationPreprocess { ref mut signed, .. } => signed,
|
|
||||||
Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
|
|
||||||
|
|
||||||
Transaction::Cosign { .. } => panic!("signing CosignSubstrateBlock"),
|
|
||||||
Transaction::Cosigned { .. } => panic!("signing Cosigned"),
|
|
||||||
Transaction::SubstrateBlock { .. } => panic!("signing SubstrateBlock"),
|
|
||||||
Transaction::Batch { .. } => panic!("signing Batch"),
|
|
||||||
|
|
||||||
Transaction::Sign { ref mut signed, .. } => signed,
|
|
||||||
|
|
||||||
Transaction::SlashReport { ref mut signed, .. } => signed,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decide the nonce to sign with
|
|
||||||
let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));
|
|
||||||
|
|
||||||
{
|
|
||||||
// Set the signer and the nonce
|
|
||||||
let signed = signed(self);
|
|
||||||
signed.signer = Ristretto::generator() * key.deref();
|
|
||||||
signed.signature.R = <Ristretto as Ciphersuite>::generator() * sig_nonce.deref();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the signature hash (which now includes `R || A` making it valid as the challenge)
|
|
||||||
let sig_hash = self.sig_hash(genesis);
|
|
||||||
|
|
||||||
// Sign the signature
|
|
||||||
signed(self).signature = SchnorrSignature::<Ristretto>::sign(key, sig_nonce, sig_hash);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -18,7 +18,9 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
bitvec = { version = "1", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Serai Coordinate Substrate Scanner
|
# Serai Coordinator Substrate
|
||||||
|
|
||||||
This is the scanner of the Serai blockchain for the purposes of Serai's coordinator.
|
This crate manages the Serai coordinators's interactions with Serai's Substrate blockchain.
|
||||||
|
|
||||||
Two event streams are defined:
|
Two event streams are defined:
|
||||||
|
|
||||||
@@ -12,3 +12,9 @@ Two event streams are defined:
|
|||||||
The canonical event stream is available without provision of a validator's public key. The ephemeral
|
The canonical event stream is available without provision of a validator's public key. The ephemeral
|
||||||
event stream requires provision of a validator's public key. Both are ordered within themselves, yet
|
event stream requires provision of a validator's public key. Both are ordered within themselves, yet
|
||||||
there are no ordering guarantees across the two.
|
there are no ordering guarantees across the two.
|
||||||
|
|
||||||
|
Additionally, a collection of tasks are defined to publish data onto Serai:
|
||||||
|
|
||||||
|
- `SetKeysTask`, which sets the keys generated via DKGs onto Serai.
|
||||||
|
- `PublishBatchTask`, which publishes `Batch`s onto Serai.
|
||||||
|
- `PublishSlashReportTask`, which publishes `SlashReport`s onto Serai.
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use std::future::Future;
|
use core::future::Future;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use futures::stream::{StreamExt, FuturesOrdered};
|
use futures::stream::{StreamExt, FuturesOrdered};
|
||||||
|
|
||||||
@@ -20,20 +21,22 @@ create_db!(
|
|||||||
/// The event stream for canonical events.
|
/// The event stream for canonical events.
|
||||||
pub struct CanonicalEventStream<D: Db> {
|
pub struct CanonicalEventStream<D: Db> {
|
||||||
db: D,
|
db: D,
|
||||||
serai: Serai,
|
serai: Arc<Serai>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> CanonicalEventStream<D> {
|
impl<D: Db> CanonicalEventStream<D> {
|
||||||
/// Create a new canonical event stream.
|
/// Create a new canonical event stream.
|
||||||
///
|
///
|
||||||
/// Only one of these may exist over the provided database.
|
/// Only one of these may exist over the provided database.
|
||||||
pub fn new(db: D, serai: Serai) -> Self {
|
pub fn new(db: D, serai: Arc<Serai>) -> Self {
|
||||||
Self { db, serai }
|
Self { db, serai }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let next_block = NextBlock::get(&self.db).unwrap_or(0);
|
let next_block = NextBlock::get(&self.db).unwrap_or(0);
|
||||||
let latest_finalized_block =
|
let latest_finalized_block =
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use std::future::Future;
|
use core::future::Future;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use futures::stream::{StreamExt, FuturesOrdered};
|
use futures::stream::{StreamExt, FuturesOrdered};
|
||||||
|
|
||||||
@@ -24,7 +25,7 @@ create_db!(
|
|||||||
/// The event stream for ephemeral events.
|
/// The event stream for ephemeral events.
|
||||||
pub struct EphemeralEventStream<D: Db> {
|
pub struct EphemeralEventStream<D: Db> {
|
||||||
db: D,
|
db: D,
|
||||||
serai: Serai,
|
serai: Arc<Serai>,
|
||||||
validator: PublicKey,
|
validator: PublicKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -32,13 +33,15 @@ impl<D: Db> EphemeralEventStream<D> {
|
|||||||
/// Create a new ephemeral event stream.
|
/// Create a new ephemeral event stream.
|
||||||
///
|
///
|
||||||
/// Only one of these may exist over the provided database.
|
/// Only one of these may exist over the provided database.
|
||||||
pub fn new(db: D, serai: Serai, validator: PublicKey) -> Self {
|
pub fn new(db: D, serai: Arc<Serai>, validator: PublicKey) -> Self {
|
||||||
Self { db, serai, validator }
|
Self { db, serai, validator }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let next_block = NextBlock::get(&self.db).unwrap_or(0);
|
let next_block = NextBlock::get(&self.db).unwrap_or(0);
|
||||||
let latest_finalized_block =
|
let latest_finalized_block =
|
||||||
@@ -156,8 +159,9 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
Err("validator's weight exceeded u16::MAX".to_string())?
|
Err("validator's weight exceeded u16::MAX".to_string())?
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Do the summation in u32 so we don't risk a u16 overflow
|
||||||
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
|
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
|
||||||
if total_weight > MAX_KEY_SHARES_PER_SET {
|
if total_weight > u32::from(MAX_KEY_SHARES_PER_SET) {
|
||||||
Err(format!(
|
Err(format!(
|
||||||
"{set:?} has {total_weight} key shares when the max is {MAX_KEY_SHARES_PER_SET}"
|
"{set:?} has {total_weight} key shares when the max is {MAX_KEY_SHARES_PER_SET}"
|
||||||
))?;
|
))?;
|
||||||
@@ -216,7 +220,7 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
&NewSetInformation {
|
&NewSetInformation {
|
||||||
set: *set,
|
set: *set,
|
||||||
serai_block: block.block_hash,
|
serai_block: block.block_hash,
|
||||||
start_time: block.time,
|
declaration_time: block.time,
|
||||||
// TODO: Why do we have this as an explicit field here?
|
// TODO: Why do we have this as an explicit field here?
|
||||||
// Shouldn't thiis be inlined into the Processor's key gen code, where it's used?
|
// Shouldn't thiis be inlined into the Processor's key gen code, where it's used?
|
||||||
threshold: ((total_weight * 2) / 3) + 1,
|
threshold: ((total_weight * 2) / 3) + 1,
|
||||||
@@ -233,7 +237,7 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
else {
|
else {
|
||||||
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
||||||
};
|
};
|
||||||
crate::SignSlashReport::send(&mut txn, set);
|
crate::SignSlashReport::send(&mut txn, *set);
|
||||||
}
|
}
|
||||||
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
|||||||
@@ -6,14 +6,25 @@ use scale::{Encode, Decode};
|
|||||||
use borsh::{io, BorshSerialize, BorshDeserialize};
|
use borsh::{io, BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{PublicKey, NetworkId},
|
primitives::{NetworkId, PublicKey, Signature, SeraiAddress},
|
||||||
validator_sets::primitives::ValidatorSet,
|
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
||||||
|
in_instructions::primitives::SignedBatch,
|
||||||
|
Transaction,
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
|
|
||||||
mod canonical;
|
mod canonical;
|
||||||
|
pub use canonical::CanonicalEventStream;
|
||||||
mod ephemeral;
|
mod ephemeral;
|
||||||
|
pub use ephemeral::EphemeralEventStream;
|
||||||
|
|
||||||
|
mod set_keys;
|
||||||
|
pub use set_keys::SetKeysTask;
|
||||||
|
mod publish_batch;
|
||||||
|
pub use publish_batch::PublishBatchTask;
|
||||||
|
mod publish_slash_report;
|
||||||
|
pub use publish_slash_report::PublishSlashReportTask;
|
||||||
|
|
||||||
fn borsh_serialize_validators<W: io::Write>(
|
fn borsh_serialize_validators<W: io::Write>(
|
||||||
validators: &Vec<(PublicKey, u16)>,
|
validators: &Vec<(PublicKey, u16)>,
|
||||||
@@ -30,26 +41,28 @@ fn borsh_deserialize_validators<R: io::Read>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The information for a new set.
|
/// The information for a new set.
|
||||||
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub struct NewSetInformation {
|
pub struct NewSetInformation {
|
||||||
set: ValidatorSet,
|
/// The set.
|
||||||
serai_block: [u8; 32],
|
pub set: ValidatorSet,
|
||||||
start_time: u64,
|
/// The Serai block which declared it.
|
||||||
threshold: u16,
|
pub serai_block: [u8; 32],
|
||||||
|
/// The time of the block which declared it, in seconds.
|
||||||
|
pub declaration_time: u64,
|
||||||
|
/// The threshold to use.
|
||||||
|
pub threshold: u16,
|
||||||
|
/// The validators, with the amount of key shares they have.
|
||||||
#[borsh(
|
#[borsh(
|
||||||
serialize_with = "borsh_serialize_validators",
|
serialize_with = "borsh_serialize_validators",
|
||||||
deserialize_with = "borsh_deserialize_validators"
|
deserialize_with = "borsh_deserialize_validators"
|
||||||
)]
|
)]
|
||||||
validators: Vec<(PublicKey, u16)>,
|
pub validators: Vec<(PublicKey, u16)>,
|
||||||
evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
/// The eVRF public keys.
|
||||||
|
pub evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
mod _public_db {
|
mod _public_db {
|
||||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
use super::*;
|
||||||
|
|
||||||
use serai_db::*;
|
|
||||||
|
|
||||||
use crate::NewSetInformation;
|
|
||||||
|
|
||||||
db_channel!(
|
db_channel!(
|
||||||
CoordinatorSubstrate {
|
CoordinatorSubstrate {
|
||||||
@@ -58,8 +71,20 @@ mod _public_db {
|
|||||||
|
|
||||||
// Relevant new set, from an ephemeral event stream
|
// Relevant new set, from an ephemeral event stream
|
||||||
NewSet: () -> NewSetInformation,
|
NewSet: () -> NewSetInformation,
|
||||||
// Relevant sign slash report, from an ephemeral event stream
|
// Potentially relevant sign slash report, from an ephemeral event stream
|
||||||
SignSlashReport: () -> ValidatorSet,
|
SignSlashReport: (set: ValidatorSet) -> (),
|
||||||
|
|
||||||
|
// Signed batches to publish onto the Serai network
|
||||||
|
SignedBatches: (network: NetworkId) -> SignedBatch,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
create_db!(
|
||||||
|
CoordinatorSubstrate {
|
||||||
|
// Keys to set on the Serai network
|
||||||
|
Keys: (network: NetworkId) -> (Session, Vec<u8>),
|
||||||
|
// Slash reports to publish onto the Serai network
|
||||||
|
SlashReports: (network: NetworkId) -> (Session, Vec<u8>),
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -101,12 +126,103 @@ impl NewSet {
|
|||||||
/// notifications for all relevant validator sets will be included.
|
/// notifications for all relevant validator sets will be included.
|
||||||
pub struct SignSlashReport;
|
pub struct SignSlashReport;
|
||||||
impl SignSlashReport {
|
impl SignSlashReport {
|
||||||
pub(crate) fn send(txn: &mut impl DbTxn, set: &ValidatorSet) {
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet) {
|
||||||
_public_db::SignSlashReport::send(txn, set);
|
_public_db::SignSlashReport::send(txn, set, &());
|
||||||
}
|
}
|
||||||
/// Try to receive a notification to sign a slash report, returning `None` if there is none to
|
/// Try to receive a notification to sign a slash report, returning `None` if there is none to
|
||||||
/// receive.
|
/// receive.
|
||||||
pub fn try_recv(txn: &mut impl DbTxn) -> Option<ValidatorSet> {
|
pub fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<()> {
|
||||||
_public_db::SignSlashReport::try_recv(txn)
|
_public_db::SignSlashReport::try_recv(txn, set)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The keys to set on Serai.
|
||||||
|
pub struct Keys;
|
||||||
|
impl Keys {
|
||||||
|
/// Set the keys to report for a validator set.
|
||||||
|
///
|
||||||
|
/// This only saves the most recent keys as only a single session is eligible to have its keys
|
||||||
|
/// reported at once.
|
||||||
|
pub fn set(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
set: ValidatorSet,
|
||||||
|
key_pair: KeyPair,
|
||||||
|
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||||
|
signature: Signature,
|
||||||
|
) {
|
||||||
|
// If we have a more recent pair of keys, don't write this historic one
|
||||||
|
if let Some((existing_session, _)) = _public_db::Keys::get(txn, set.network) {
|
||||||
|
if existing_session.0 >= set.session.0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let tx = serai_client::validator_sets::SeraiValidatorSets::set_keys(
|
||||||
|
set.network,
|
||||||
|
key_pair,
|
||||||
|
signature_participants,
|
||||||
|
signature,
|
||||||
|
);
|
||||||
|
_public_db::Keys::set(txn, set.network, &(set.session, tx.encode()));
|
||||||
|
}
|
||||||
|
pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> {
|
||||||
|
let (session, tx) = _public_db::Keys::take(txn, network)?;
|
||||||
|
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The signed batches to publish onto Serai.
|
||||||
|
pub struct SignedBatches;
|
||||||
|
impl SignedBatches {
|
||||||
|
/// Send a `SignedBatch` to publish onto Serai.
|
||||||
|
///
|
||||||
|
/// These will be published sequentially. Out-of-order sending risks hanging the task.
|
||||||
|
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
|
||||||
|
_public_db::SignedBatches::send(txn, batch.batch.network, batch);
|
||||||
|
}
|
||||||
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, network: NetworkId) -> Option<SignedBatch> {
|
||||||
|
_public_db::SignedBatches::try_recv(txn, network)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The slash report was invalid.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct InvalidSlashReport;
|
||||||
|
|
||||||
|
/// The slash reports to publish onto Serai.
|
||||||
|
pub struct SlashReports;
|
||||||
|
impl SlashReports {
|
||||||
|
/// Set the slashes to report for a validator set.
|
||||||
|
///
|
||||||
|
/// This only saves the most recent slashes as only a single session is eligible to have its
|
||||||
|
/// slashes reported at once.
|
||||||
|
///
|
||||||
|
/// Returns Err if the slashes are invalid. Returns Ok if the slashes weren't detected as
|
||||||
|
/// invalid. Slashes may be considered invalid by the Serai blockchain later even if not detected
|
||||||
|
/// as invalid here.
|
||||||
|
pub fn set(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
set: ValidatorSet,
|
||||||
|
slashes: Vec<(SeraiAddress, u32)>,
|
||||||
|
signature: Signature,
|
||||||
|
) -> Result<(), InvalidSlashReport> {
|
||||||
|
// If we have a more recent slash report, don't write this historic one
|
||||||
|
if let Some((existing_session, _)) = _public_db::SlashReports::get(txn, set.network) {
|
||||||
|
if existing_session.0 >= set.session.0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
|
||||||
|
set.network,
|
||||||
|
slashes.try_into().map_err(|_| InvalidSlashReport)?,
|
||||||
|
signature,
|
||||||
|
);
|
||||||
|
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> {
|
||||||
|
let (session, tx) = _public_db::SlashReports::take(txn, network)?;
|
||||||
|
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
66
coordinator/substrate/src/publish_batch.rs
Normal file
66
coordinator/substrate/src/publish_batch.rs
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
|
use serai_client::{primitives::NetworkId, SeraiError, Serai};
|
||||||
|
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use crate::SignedBatches;
|
||||||
|
|
||||||
|
/// Publish `SignedBatch`s from `SignedBatches` onto Serai.
|
||||||
|
pub struct PublishBatchTask<D: Db> {
|
||||||
|
db: D,
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
network: NetworkId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> PublishBatchTask<D> {
|
||||||
|
/// Create a task to publish `SignedBatch`s onto Serai.
|
||||||
|
///
|
||||||
|
/// Returns None if `network == NetworkId::Serai`.
|
||||||
|
// TODO: ExternalNetworkId
|
||||||
|
pub fn new(db: D, serai: Arc<Serai>, network: NetworkId) -> Option<Self> {
|
||||||
|
if network == NetworkId::Serai {
|
||||||
|
None?
|
||||||
|
};
|
||||||
|
Some(Self { db, serai, network })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
||||||
|
type Error = SeraiError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(batch) = SignedBatches::try_recv(&mut txn, self.network) else {
|
||||||
|
// No batch to publish at this time
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Publish this Batch if it hasn't already been published
|
||||||
|
let serai = self.serai.as_of_latest_finalized_block().await?;
|
||||||
|
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
|
||||||
|
if last_batch < Some(batch.batch.id) {
|
||||||
|
// This stream of Batches *should* be sequential within the larger context of the Serai
|
||||||
|
// coordinator. In this library, we use a more relaxed definition and don't assert
|
||||||
|
// sequence. This does risk hanging the task, if Batch #n+1 is sent before Batch #n, but
|
||||||
|
// that is a documented fault of the `SignedBatches` API.
|
||||||
|
self
|
||||||
|
.serai
|
||||||
|
.publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
89
coordinator/substrate/src/publish_slash_report.rs
Normal file
89
coordinator/substrate/src/publish_slash_report.rs
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
|
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, Serai};
|
||||||
|
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use crate::SlashReports;
|
||||||
|
|
||||||
|
/// Publish slash reports from `SlashReports` onto Serai.
|
||||||
|
pub struct PublishSlashReportTask<D: Db> {
|
||||||
|
db: D,
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> PublishSlashReportTask<D> {
|
||||||
|
/// Create a task to publish slash reports onto Serai.
|
||||||
|
pub fn new(db: D, serai: Arc<Serai>) -> Self {
|
||||||
|
Self { db, serai }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
|
||||||
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
for network in serai_client::primitives::NETWORKS {
|
||||||
|
if network == NetworkId::Serai {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else {
|
||||||
|
// No slash report to publish
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let serai =
|
||||||
|
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let serai = serai.validator_sets();
|
||||||
|
let session_after_slash_report = Session(session.0 + 1);
|
||||||
|
let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let current_session = current_session.map(|session| session.0);
|
||||||
|
// Only attempt to publish the slash report for session #n while session #n+1 is still
|
||||||
|
// active
|
||||||
|
let session_after_slash_report_retired =
|
||||||
|
current_session > Some(session_after_slash_report.0);
|
||||||
|
if session_after_slash_report_retired {
|
||||||
|
// Commit the txn to drain this slash report from the database and not try it again later
|
||||||
|
txn.commit();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if Some(session_after_slash_report.0) != current_session {
|
||||||
|
// We already checked the current session wasn't greater, and they're not equal
|
||||||
|
assert!(current_session < Some(session_after_slash_report.0));
|
||||||
|
// This would mean the Serai node is resyncing and is behind where it prior was
|
||||||
|
Err("have a slash report for a session Serai has yet to retire".to_string())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this session which should publish a slash report already has, move on
|
||||||
|
let key_pending_slash_report =
|
||||||
|
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
if key_pending_slash_report.is_none() {
|
||||||
|
txn.commit();
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
match self.serai.publish(&slash_report).await {
|
||||||
|
Ok(()) => {
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
// This could be specific to this TX (such as an already in mempool error) and it may be
|
||||||
|
// worthwhile to continue iteration with the other pending slash reports. We assume this
|
||||||
|
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
|
||||||
|
// miniscule compared to the window available to publish the slash report. That makes
|
||||||
|
// this a non-issue.
|
||||||
|
Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}"))?,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
88
coordinator/substrate/src/set_keys.rs
Normal file
88
coordinator/substrate/src/set_keys.rs
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
|
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
|
||||||
|
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use crate::Keys;
|
||||||
|
|
||||||
|
/// Set keys from `Keys` on Serai.
|
||||||
|
pub struct SetKeysTask<D: Db> {
|
||||||
|
db: D,
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> SetKeysTask<D> {
|
||||||
|
/// Create a task to publish slash reports onto Serai.
|
||||||
|
pub fn new(db: D, serai: Arc<Serai>) -> Self {
|
||||||
|
Self { db, serai }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
||||||
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
for network in serai_client::primitives::NETWORKS {
|
||||||
|
if network == NetworkId::Serai {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some((session, keys)) = Keys::take(&mut txn, network) else {
|
||||||
|
// No keys to set
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let serai =
|
||||||
|
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let serai = serai.validator_sets();
|
||||||
|
let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let current_session = current_session.map(|session| session.0);
|
||||||
|
// Only attempt to set these keys if this isn't a retired session
|
||||||
|
if Some(session.0) < current_session {
|
||||||
|
// Commit the txn to take these keys from the database and not try it again later
|
||||||
|
txn.commit();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if Some(session.0) != current_session {
|
||||||
|
// We already checked the current session wasn't greater, and they're not equal
|
||||||
|
assert!(current_session < Some(session.0));
|
||||||
|
// This would mean the Serai node is resyncing and is behind where it prior was
|
||||||
|
Err("have a keys for a session Serai has yet to start".to_string())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this session already has had its keys set, move on
|
||||||
|
if serai
|
||||||
|
.keys(ValidatorSet { network, session })
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
|
txn.commit();
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
match self.serai.publish(&keys).await {
|
||||||
|
Ok(()) => {
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
// This could be specific to this TX (such as an already in mempool error) and it may be
|
||||||
|
// worthwhile to continue iteration with the other pending slash reports. We assume this
|
||||||
|
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
|
||||||
|
// miniscule compared to the window reasonable to set the keys. That makes this a
|
||||||
|
// non-issue.
|
||||||
|
Err(e) => Err(format!("couldn't publish set keys transaction: {e:?}"))?,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
49
coordinator/tributary-sdk/Cargo.toml
Normal file
49
coordinator/tributary-sdk/Cargo.toml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
[package]
|
||||||
|
name = "tributary-sdk"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "A micro-blockchain to provide consensus and ordering to P2P communication"
|
||||||
|
license = "AGPL-3.0-only"
|
||||||
|
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary-sdk"
|
||||||
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
|
edition = "2021"
|
||||||
|
rust-version = "1.81"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
subtle = { version = "^2", default-features = false, features = ["std"] }
|
||||||
|
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
rand = { version = "0.8", default-features = false, features = ["std"] }
|
||||||
|
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
|
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["std", "recommended"] }
|
||||||
|
|
||||||
|
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", version = "0.4", default-features = false, features = ["std", "ristretto"] }
|
||||||
|
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", version = "0.5", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
serai-db = { path = "../../common/db", version = "0.1" }
|
||||||
|
|
||||||
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||||
|
futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] }
|
||||||
|
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
||||||
|
tendermint = { package = "tendermint-machine", path = "./tendermint", version = "0.2" }
|
||||||
|
|
||||||
|
tokio = { version = "1", default-features = false, features = ["sync", "time", "rt"] }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tokio = { version = "1", features = ["macros"] }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
tests = []
|
||||||
15
coordinator/tributary-sdk/LICENSE
Normal file
15
coordinator/tributary-sdk/LICENSE
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
AGPL-3.0-only license
|
||||||
|
|
||||||
|
Copyright (c) 2023 Luke Parker
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
published by the Free Software Foundation.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
3
coordinator/tributary-sdk/README.md
Normal file
3
coordinator/tributary-sdk/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Tributary
|
||||||
|
|
||||||
|
A verifiable, ordered broadcast layer implemented as a BFT micro-blockchain.
|
||||||
388
coordinator/tributary-sdk/src/lib.rs
Normal file
388
coordinator/tributary-sdk/src/lib.rs
Normal file
@@ -0,0 +1,388 @@
|
|||||||
|
use core::{marker::PhantomData, fmt::Debug, future::Future};
|
||||||
|
use std::{sync::Arc, io};
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
|
use ciphersuite::{Ciphersuite, Ristretto};
|
||||||
|
|
||||||
|
use scale::Decode;
|
||||||
|
use futures_channel::mpsc::UnboundedReceiver;
|
||||||
|
use futures_util::{StreamExt, SinkExt};
|
||||||
|
use ::tendermint::{
|
||||||
|
ext::{BlockNumber, Commit, Block as BlockTrait, Network},
|
||||||
|
SignedMessageFor, SyncedBlock, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,
|
||||||
|
TendermintMachine, TendermintHandle,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub use ::tendermint::Evidence;
|
||||||
|
|
||||||
|
use serai_db::Db;
|
||||||
|
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
|
mod merkle;
|
||||||
|
pub(crate) use merkle::*;
|
||||||
|
|
||||||
|
pub mod transaction;
|
||||||
|
pub use transaction::{TransactionError, Signed, TransactionKind, Transaction as TransactionTrait};
|
||||||
|
|
||||||
|
use crate::tendermint::tx::TendermintTx;
|
||||||
|
|
||||||
|
mod provided;
|
||||||
|
pub(crate) use provided::*;
|
||||||
|
pub use provided::ProvidedError;
|
||||||
|
|
||||||
|
mod block;
|
||||||
|
pub use block::*;
|
||||||
|
|
||||||
|
mod blockchain;
|
||||||
|
pub(crate) use blockchain::*;
|
||||||
|
|
||||||
|
mod mempool;
|
||||||
|
pub(crate) use mempool::*;
|
||||||
|
|
||||||
|
pub mod tendermint;
|
||||||
|
pub(crate) use crate::tendermint::*;
|
||||||
|
|
||||||
|
#[cfg(any(test, feature = "tests"))]
|
||||||
|
pub mod tests;
|
||||||
|
|
||||||
|
/// Size limit for an individual transaction.
|
||||||
|
// This needs to be big enough to participate in a 101-of-150 eVRF DKG with each element taking
|
||||||
|
// `MAX_KEY_LEN`. This also needs to be big enough to pariticpate in signing 520 Bitcoin inputs
|
||||||
|
// with 49 key shares, and signing 120 Monero inputs with 49 key shares.
|
||||||
|
// TODO: Add a test for these properties
|
||||||
|
pub const TRANSACTION_SIZE_LIMIT: usize = 2_000_000;
|
||||||
|
/// Amount of transactions a single account may have in the mempool.
|
||||||
|
pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
||||||
|
/// Block size limit.
|
||||||
|
// This targets a growth limit of roughly 30 GB a day, under load, in order to prevent a malicious
|
||||||
|
// participant from flooding disks and causing out of space errors in order processes.
|
||||||
|
pub const BLOCK_SIZE_LIMIT: usize = 2_001_000;
|
||||||
|
|
||||||
|
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
||||||
|
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
|
||||||
|
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub enum Transaction<T: TransactionTrait> {
|
||||||
|
Tendermint(TendermintTx),
|
||||||
|
Application(T),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: TransactionTrait> ReadWrite for Transaction<T> {
|
||||||
|
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let mut kind = [0];
|
||||||
|
reader.read_exact(&mut kind)?;
|
||||||
|
match kind[0] {
|
||||||
|
0 => {
|
||||||
|
let tx = TendermintTx::read(reader)?;
|
||||||
|
Ok(Transaction::Tendermint(tx))
|
||||||
|
}
|
||||||
|
1 => {
|
||||||
|
let tx = T::read(reader)?;
|
||||||
|
Ok(Transaction::Application(tx))
|
||||||
|
}
|
||||||
|
_ => Err(io::Error::other("invalid transaction type")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
match self {
|
||||||
|
Transaction::Tendermint(tx) => {
|
||||||
|
writer.write_all(&[0])?;
|
||||||
|
tx.write(writer)
|
||||||
|
}
|
||||||
|
Transaction::Application(tx) => {
|
||||||
|
writer.write_all(&[1])?;
|
||||||
|
tx.write(writer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: TransactionTrait> Transaction<T> {
|
||||||
|
pub fn hash(&self) -> [u8; 32] {
|
||||||
|
match self {
|
||||||
|
Transaction::Tendermint(tx) => tx.hash(),
|
||||||
|
Transaction::Application(tx) => tx.hash(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn kind(&self) -> TransactionKind {
|
||||||
|
match self {
|
||||||
|
Transaction::Tendermint(tx) => tx.kind(),
|
||||||
|
Transaction::Application(tx) => tx.kind(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An item which can be read and written.
|
||||||
|
pub trait ReadWrite: Sized {
|
||||||
|
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;
|
||||||
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;
|
||||||
|
|
||||||
|
fn serialize(&self) -> Vec<u8> {
|
||||||
|
// BlockHeader is 64 bytes and likely the smallest item in this system
|
||||||
|
let mut buf = Vec::with_capacity(64);
|
||||||
|
self.write(&mut buf).unwrap();
|
||||||
|
buf
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait P2p: 'static + Send + Sync + Clone {
|
||||||
|
/// Broadcast a message to all other members of the Tributary with the specified genesis.
|
||||||
|
///
|
||||||
|
/// The Tributary will re-broadcast consensus messages on a fixed interval to ensure they aren't
|
||||||
|
/// prematurely dropped from the P2P layer. THe P2P layer SHOULD perform content-based
|
||||||
|
/// deduplication to ensure a sane amount of load.
|
||||||
|
fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) -> impl Send + Future<Output = ()>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P: P2p> P2p for Arc<P> {
|
||||||
|
fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||||
|
P::broadcast(self, genesis, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct Tributary<D: Db, T: TransactionTrait, P: P2p> {
|
||||||
|
db: D,
|
||||||
|
|
||||||
|
genesis: [u8; 32],
|
||||||
|
network: TendermintNetwork<D, T, P>,
|
||||||
|
|
||||||
|
synced_block: Arc<RwLock<SyncedBlockSender<TendermintNetwork<D, T, P>>>>,
|
||||||
|
synced_block_result: Arc<RwLock<SyncedBlockResultReceiver>>,
|
||||||
|
messages: Arc<RwLock<MessageSender<TendermintNetwork<D, T, P>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||||
|
pub async fn new(
|
||||||
|
db: D,
|
||||||
|
genesis: [u8; 32],
|
||||||
|
start_time: u64,
|
||||||
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,
|
||||||
|
p2p: P,
|
||||||
|
) -> Option<Self> {
|
||||||
|
log::info!("new Tributary with genesis {}", hex::encode(genesis));
|
||||||
|
|
||||||
|
let validators_vec = validators.iter().map(|validator| validator.0).collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let signer = Arc::new(Signer::new(genesis, key));
|
||||||
|
let validators = Arc::new(Validators::new(genesis, validators)?);
|
||||||
|
|
||||||
|
let mut blockchain = Blockchain::new(db.clone(), genesis, &validators_vec);
|
||||||
|
let block_number = BlockNumber(blockchain.block_number());
|
||||||
|
|
||||||
|
let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) {
|
||||||
|
Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time
|
||||||
|
} else {
|
||||||
|
start_time
|
||||||
|
};
|
||||||
|
let proposal = TendermintBlock(
|
||||||
|
blockchain.build_block::<TendermintNetwork<D, T, P>>(&validators).serialize(),
|
||||||
|
);
|
||||||
|
let blockchain = Arc::new(RwLock::new(blockchain));
|
||||||
|
|
||||||
|
let network = TendermintNetwork { genesis, signer, validators, blockchain, p2p };
|
||||||
|
|
||||||
|
let TendermintHandle { synced_block, synced_block_result, messages, machine } =
|
||||||
|
TendermintMachine::new(
|
||||||
|
db.clone(),
|
||||||
|
network.clone(),
|
||||||
|
genesis,
|
||||||
|
block_number,
|
||||||
|
start_time,
|
||||||
|
proposal,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
tokio::spawn(machine.run());
|
||||||
|
|
||||||
|
Some(Self {
|
||||||
|
db,
|
||||||
|
genesis,
|
||||||
|
network,
|
||||||
|
synced_block: Arc::new(RwLock::new(synced_block)),
|
||||||
|
synced_block_result: Arc::new(RwLock::new(synced_block_result)),
|
||||||
|
messages: Arc::new(RwLock::new(messages)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn block_time() -> u32 {
|
||||||
|
TendermintNetwork::<D, T, P>::block_time()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn genesis(&self) -> [u8; 32] {
|
||||||
|
self.genesis
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn block_number(&self) -> u64 {
|
||||||
|
self.network.blockchain.read().await.block_number()
|
||||||
|
}
|
||||||
|
pub async fn tip(&self) -> [u8; 32] {
|
||||||
|
self.network.blockchain.read().await.tip()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reader(&self) -> TributaryReader<D, T> {
|
||||||
|
TributaryReader(self.db.clone(), self.genesis, PhantomData)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn provide_transaction(&self, tx: T) -> Result<(), ProvidedError> {
|
||||||
|
self.network.blockchain.write().await.provide_transaction(tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn next_nonce(
|
||||||
|
&self,
|
||||||
|
signer: &<Ristretto as Ciphersuite>::G,
|
||||||
|
order: &[u8],
|
||||||
|
) -> Option<u32> {
|
||||||
|
self.network.blockchain.read().await.next_nonce(signer, order)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error.
|
||||||
|
// Safe to be &self since the only meaningful usage of self is self.network.blockchain which
|
||||||
|
// successfully acquires its own write lock
|
||||||
|
pub async fn add_transaction(&self, tx: T) -> Result<bool, TransactionError> {
|
||||||
|
let tx = Transaction::Application(tx);
|
||||||
|
let mut to_broadcast = vec![TRANSACTION_MESSAGE];
|
||||||
|
tx.write(&mut to_broadcast).unwrap();
|
||||||
|
let res = self.network.blockchain.write().await.add_transaction::<TendermintNetwork<D, T, P>>(
|
||||||
|
true,
|
||||||
|
tx,
|
||||||
|
&self.network.signature_scheme(),
|
||||||
|
);
|
||||||
|
if res == Ok(true) {
|
||||||
|
self.network.p2p.broadcast(self.genesis, to_broadcast).await;
|
||||||
|
}
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sync_block_internal(
|
||||||
|
&self,
|
||||||
|
block: Block<T>,
|
||||||
|
commit: Vec<u8>,
|
||||||
|
result: &mut UnboundedReceiver<bool>,
|
||||||
|
) -> bool {
|
||||||
|
let (tip, block_number) = {
|
||||||
|
let blockchain = self.network.blockchain.read().await;
|
||||||
|
(blockchain.tip(), blockchain.block_number())
|
||||||
|
};
|
||||||
|
|
||||||
|
if block.header.parent != tip {
|
||||||
|
log::debug!("told to sync a block whose parent wasn't our tip");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
let block = TendermintBlock(block.serialize());
|
||||||
|
let mut commit_ref = commit.as_ref();
|
||||||
|
let Ok(commit) = Commit::<Arc<Validators>>::decode(&mut commit_ref) else {
|
||||||
|
log::error!("sent an invalidly serialized commit");
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
// Storage DoS vector. We *could* truncate to solely the relevant portion, trying to save this,
|
||||||
|
// yet then we'd have to test the truncation was performed correctly.
|
||||||
|
if !commit_ref.is_empty() {
|
||||||
|
log::error!("sent an commit with additional data after it");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if !self.network.verify_commit(block.id(), &commit) {
|
||||||
|
log::error!("sent an invalid commit");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
let number = BlockNumber(block_number + 1);
|
||||||
|
self.synced_block.write().await.send(SyncedBlock { number, block, commit }).await.unwrap();
|
||||||
|
result.next().await.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync a block.
|
||||||
|
// TODO: Since we have a static validator set, we should only need the tail commit?
|
||||||
|
pub async fn sync_block(&self, block: Block<T>, commit: Vec<u8>) -> bool {
|
||||||
|
let mut result = self.synced_block_result.write().await;
|
||||||
|
self.sync_block_internal(block, commit, &mut result).await
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return true if the message should be rebroadcasted.
|
||||||
|
pub async fn handle_message(&self, msg: &[u8]) -> bool {
|
||||||
|
match msg.first() {
|
||||||
|
Some(&TRANSACTION_MESSAGE) => {
|
||||||
|
let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {
|
||||||
|
log::error!("received invalid transaction message");
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: Sync mempools with fellow peers
|
||||||
|
// Can we just rebroadcast transactions not included for at least two blocks?
|
||||||
|
let res =
|
||||||
|
self.network.blockchain.write().await.add_transaction::<TendermintNetwork<D, T, P>>(
|
||||||
|
false,
|
||||||
|
tx,
|
||||||
|
&self.network.signature_scheme(),
|
||||||
|
);
|
||||||
|
log::debug!("received transaction message. valid new transaction: {res:?}");
|
||||||
|
res == Ok(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(&TENDERMINT_MESSAGE) => {
|
||||||
|
let Ok(msg) =
|
||||||
|
SignedMessageFor::<TendermintNetwork<D, T, P>>::decode::<&[u8]>(&mut &msg[1 ..])
|
||||||
|
else {
|
||||||
|
log::error!("received invalid tendermint message");
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
self.messages.write().await.send(msg).await.unwrap();
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a Future which will resolve once the next block has been added.
|
||||||
|
pub async fn next_block_notification(
|
||||||
|
&self,
|
||||||
|
) -> impl Send + Sync + core::future::Future<Output = Result<(), impl Send + Sync>> {
|
||||||
|
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||||
|
self.network.blockchain.write().await.next_block_notifications.push_back(tx);
|
||||||
|
rx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct TributaryReader<D: Db, T: TransactionTrait>(D, [u8; 32], PhantomData<T>);
|
||||||
|
impl<D: Db, T: TransactionTrait> TributaryReader<D, T> {
|
||||||
|
pub fn genesis(&self) -> [u8; 32] {
|
||||||
|
self.1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since these values are static once set, they can be safely read from the database without lock
|
||||||
|
// acquisition
|
||||||
|
pub fn block(&self, hash: &[u8; 32]) -> Option<Block<T>> {
|
||||||
|
Blockchain::<D, T>::block_from_db(&self.0, self.1, hash)
|
||||||
|
}
|
||||||
|
pub fn commit(&self, hash: &[u8; 32]) -> Option<Vec<u8>> {
|
||||||
|
Blockchain::<D, T>::commit_from_db(&self.0, self.1, hash)
|
||||||
|
}
|
||||||
|
pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option<Commit<Validators>> {
|
||||||
|
self.commit(hash).map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap())
|
||||||
|
}
|
||||||
|
pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> {
|
||||||
|
Blockchain::<D, T>::block_after(&self.0, self.1, hash)
|
||||||
|
}
|
||||||
|
pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> {
|
||||||
|
self
|
||||||
|
.commit(hash)
|
||||||
|
.map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool {
|
||||||
|
Blockchain::<D, T>::locally_provided_txs_in_block(&self.0, &self.1, hash, order)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This isn't static, yet can be read with only minor discrepancy risks
|
||||||
|
pub fn tip(&self) -> [u8; 32] {
|
||||||
|
Blockchain::<D, T>::tip_from_db(&self.0, self.1)
|
||||||
|
}
|
||||||
|
}
|
||||||
218
coordinator/tributary-sdk/src/transaction.rs
Normal file
218
coordinator/tributary-sdk/src/transaction.rs
Normal file
@@ -0,0 +1,218 @@
|
|||||||
|
use core::fmt::Debug;
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
use zeroize::Zeroize;
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
|
use blake2::{Digest, Blake2b512};
|
||||||
|
|
||||||
|
use ciphersuite::{
|
||||||
|
group::{Group, GroupEncoding},
|
||||||
|
Ciphersuite, Ristretto,
|
||||||
|
};
|
||||||
|
use schnorr::SchnorrSignature;
|
||||||
|
|
||||||
|
use crate::{TRANSACTION_SIZE_LIMIT, ReadWrite};
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug, Error)]
|
||||||
|
pub enum TransactionError {
|
||||||
|
/// Transaction exceeded the size limit.
|
||||||
|
#[error("transaction is too large")]
|
||||||
|
TooLargeTransaction,
|
||||||
|
/// Transaction's signer isn't a participant.
|
||||||
|
#[error("invalid signer")]
|
||||||
|
InvalidSigner,
|
||||||
|
/// Transaction's nonce isn't the prior nonce plus one.
|
||||||
|
#[error("invalid nonce")]
|
||||||
|
InvalidNonce,
|
||||||
|
/// Transaction's signature is invalid.
|
||||||
|
#[error("invalid signature")]
|
||||||
|
InvalidSignature,
|
||||||
|
/// Transaction's content is invalid.
|
||||||
|
#[error("transaction content is invalid")]
|
||||||
|
InvalidContent,
|
||||||
|
/// Transaction's signer has too many transactions in the mempool.
|
||||||
|
#[error("signer has too many transactions in the mempool")]
|
||||||
|
TooManyInMempool,
|
||||||
|
/// Provided Transaction added to mempool.
|
||||||
|
#[error("provided transaction added to mempool")]
|
||||||
|
ProvidedAddedToMempool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Data for a signed transaction.
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub struct Signed {
|
||||||
|
pub signer: <Ristretto as Ciphersuite>::G,
|
||||||
|
pub nonce: u32,
|
||||||
|
pub signature: SchnorrSignature<Ristretto>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReadWrite for Signed {
|
||||||
|
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let signer = Ristretto::read_G(reader)?;
|
||||||
|
|
||||||
|
let mut nonce = [0; 4];
|
||||||
|
reader.read_exact(&mut nonce)?;
|
||||||
|
let nonce = u32::from_le_bytes(nonce);
|
||||||
|
if nonce >= (u32::MAX - 1) {
|
||||||
|
Err(io::Error::other("nonce exceeded limit"))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut signature = SchnorrSignature::<Ristretto>::read(reader)?;
|
||||||
|
if signature.R.is_identity().into() {
|
||||||
|
// Anyone malicious could remove this and try to find zero signatures
|
||||||
|
// We should never produce zero signatures though meaning this should never come up
|
||||||
|
// If it does somehow come up, this is a decent courtesy
|
||||||
|
signature.zeroize();
|
||||||
|
Err(io::Error::other("signature nonce was identity"))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Signed { signer, nonce, signature })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
// This is either an invalid signature or a private key leak
|
||||||
|
if self.signature.R.is_identity().into() {
|
||||||
|
Err(io::Error::other("signature nonce was identity"))?;
|
||||||
|
}
|
||||||
|
writer.write_all(&self.signer.to_bytes())?;
|
||||||
|
writer.write_all(&self.nonce.to_le_bytes())?;
|
||||||
|
self.signature.write(writer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Signed {
|
||||||
|
pub fn read_without_nonce<R: io::Read>(reader: &mut R, nonce: u32) -> io::Result<Self> {
|
||||||
|
let signer = Ristretto::read_G(reader)?;
|
||||||
|
|
||||||
|
let mut signature = SchnorrSignature::<Ristretto>::read(reader)?;
|
||||||
|
if signature.R.is_identity().into() {
|
||||||
|
// Anyone malicious could remove this and try to find zero signatures
|
||||||
|
// We should never produce zero signatures though meaning this should never come up
|
||||||
|
// If it does somehow come up, this is a decent courtesy
|
||||||
|
signature.zeroize();
|
||||||
|
Err(io::Error::other("signature nonce was identity"))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Signed { signer, nonce, signature })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write_without_nonce<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
// This is either an invalid signature or a private key leak
|
||||||
|
if self.signature.R.is_identity().into() {
|
||||||
|
Err(io::Error::other("signature nonce was identity"))?;
|
||||||
|
}
|
||||||
|
writer.write_all(&self.signer.to_bytes())?;
|
||||||
|
self.signature.write(writer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub enum TransactionKind {
|
||||||
|
/// This transaction should be provided by every validator, in an exact order.
|
||||||
|
///
|
||||||
|
/// The contained static string names the orderer to use. This allows two distinct provided
|
||||||
|
/// transaction kinds, without a synchronized order, to be ordered within their own kind without
|
||||||
|
/// requiring ordering with each other.
|
||||||
|
///
|
||||||
|
/// The only malleability is in when this transaction appears on chain. The block producer will
|
||||||
|
/// include it when they have it. Block verification will fail for validators without it.
|
||||||
|
///
|
||||||
|
/// If a supermajority of validators produce a commit for a block with a provided transaction
|
||||||
|
/// which isn't locally held, the block will be added to the local chain. When the transaction is
|
||||||
|
/// locally provided, it will be compared for correctness to the on-chain version
|
||||||
|
///
|
||||||
|
/// In order to ensure TXs aren't accidentally provided multiple times, all provided transactions
|
||||||
|
/// must have a unique hash which is also unique to all Unsigned transactions.
|
||||||
|
Provided(&'static str),
|
||||||
|
|
||||||
|
/// An unsigned transaction, only able to be included by the block producer.
|
||||||
|
///
|
||||||
|
/// Once an Unsigned transaction is included on-chain, it may not be included again. In order to
|
||||||
|
/// have multiple Unsigned transactions with the same values included on-chain, some distinct
|
||||||
|
/// nonce must be included in order to cause a distinct hash.
|
||||||
|
///
|
||||||
|
/// The hash must also be unique with all Provided transactions.
|
||||||
|
Unsigned,
|
||||||
|
|
||||||
|
/// A signed transaction.
|
||||||
|
Signed(Vec<u8>, Signed),
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Should this be renamed TransactionTrait now that a literal Transaction exists?
|
||||||
|
// Or should the literal Transaction be renamed to Event?
|
||||||
|
pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite {
|
||||||
|
/// Return what type of transaction this is.
|
||||||
|
fn kind(&self) -> TransactionKind;
|
||||||
|
|
||||||
|
/// Return the hash of this transaction.
|
||||||
|
///
|
||||||
|
/// The hash must NOT commit to the signature.
|
||||||
|
fn hash(&self) -> [u8; 32];
|
||||||
|
|
||||||
|
/// Perform transaction-specific verification.
|
||||||
|
fn verify(&self) -> Result<(), TransactionError>;
|
||||||
|
|
||||||
|
/// Obtain the challenge for this transaction's signature.
|
||||||
|
///
|
||||||
|
/// Do not override this unless you know what you're doing.
|
||||||
|
///
|
||||||
|
/// Panics if called on non-signed transactions.
|
||||||
|
fn sig_hash(&self, genesis: [u8; 32]) -> <Ristretto as Ciphersuite>::F {
|
||||||
|
match self.kind() {
|
||||||
|
TransactionKind::Signed(order, Signed { signature, .. }) => {
|
||||||
|
<Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(
|
||||||
|
&Blake2b512::digest(
|
||||||
|
[
|
||||||
|
b"Tributary Signed Transaction",
|
||||||
|
genesis.as_ref(),
|
||||||
|
&self.hash(),
|
||||||
|
order.as_ref(),
|
||||||
|
signature.R.to_bytes().as_ref(),
|
||||||
|
]
|
||||||
|
.concat(),
|
||||||
|
)
|
||||||
|
.into(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
_ => panic!("sig_hash called on non-signed transaction"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait GAIN: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32> {}
|
||||||
|
impl<F: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32>> GAIN for F {}
|
||||||
|
|
||||||
|
pub(crate) fn verify_transaction<F: GAIN, T: Transaction>(
|
||||||
|
tx: &T,
|
||||||
|
genesis: [u8; 32],
|
||||||
|
get_and_increment_nonce: &mut F,
|
||||||
|
) -> Result<(), TransactionError> {
|
||||||
|
if tx.serialize().len() > TRANSACTION_SIZE_LIMIT {
|
||||||
|
Err(TransactionError::TooLargeTransaction)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
tx.verify()?;
|
||||||
|
|
||||||
|
match tx.kind() {
|
||||||
|
TransactionKind::Provided(_) | TransactionKind::Unsigned => {}
|
||||||
|
TransactionKind::Signed(order, Signed { signer, nonce, signature }) => {
|
||||||
|
if let Some(next_nonce) = get_and_increment_nonce(&signer, &order) {
|
||||||
|
if nonce != next_nonce {
|
||||||
|
Err(TransactionError::InvalidNonce)?;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Not a participant
|
||||||
|
Err(TransactionError::InvalidSigner)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Use a batch verification here
|
||||||
|
if !signature.verify(signer, tx.sig_hash(genesis)) {
|
||||||
|
Err(TransactionError::InvalidSignature)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -1,11 +1,13 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tributary-chain"
|
name = "serai-coordinator-tributary"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
description = "A micro-blockchain to provide consensus and ordering to P2P communication"
|
description = "The Tributary used by the Serai Coordinator"
|
||||||
license = "AGPL-3.0-only"
|
license = "AGPL-3.0-only"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary"
|
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
publish = false
|
||||||
rust-version = "1.81"
|
rust-version = "1.81"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
@@ -16,34 +18,29 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
thiserror = { version = "2", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
subtle = { version = "^2", default-features = false, features = ["std"] }
|
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||||
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
rand = { version = "0.8", default-features = false, features = ["std"] }
|
|
||||||
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
|
|
||||||
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
|
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
|
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
serai-db = { path = "../../common/db" }
|
serai-db = { path = "../../common/db" }
|
||||||
|
serai-task = { path = "../../common/task", version = "0.1" }
|
||||||
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
tributary-sdk = { path = "../tributary-sdk" }
|
||||||
futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] }
|
|
||||||
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
|
||||||
tendermint = { package = "tendermint-machine", path = "./tendermint" }
|
|
||||||
|
|
||||||
tokio = { version = "1", default-features = false, features = ["sync", "time", "rt"] }
|
serai-cosign = { path = "../cosign" }
|
||||||
|
serai-coordinator-substrate = { path = "../substrate" }
|
||||||
|
|
||||||
[dev-dependencies]
|
messages = { package = "serai-processor-messages", path = "../../processor/messages" }
|
||||||
tokio = { version = "1", features = ["macros"] }
|
|
||||||
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
tests = []
|
longer-reattempts = []
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
# Tributary
|
# Serai Coordinator Tributary
|
||||||
|
|
||||||
A verifiable, ordered broadcast layer implemented as a BFT micro-blockchain.
|
The Tributary used by the Serai Coordinator. This includes the `Transaction`
|
||||||
|
definition and the code to handle blocks added on-chain.
|
||||||
|
|||||||
@@ -9,7 +9,9 @@ use messages::sign::{VariantSignId, SignId};
|
|||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
|
|
||||||
use crate::tributary::transaction::SigningProtocolRound;
|
use serai_cosign::CosignIntent;
|
||||||
|
|
||||||
|
use crate::transaction::SigningProtocolRound;
|
||||||
|
|
||||||
/// A topic within the database which the group participates in
|
/// A topic within the database which the group participates in
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||||
@@ -167,6 +169,9 @@ impl Topic {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) trait Borshy: BorshSerialize + BorshDeserialize {}
|
||||||
|
impl<T: BorshSerialize + BorshDeserialize> Borshy for T {}
|
||||||
|
|
||||||
/// The resulting data set from an accumulation
|
/// The resulting data set from an accumulation
|
||||||
pub(crate) enum DataSet<D: Borshy> {
|
pub(crate) enum DataSet<D: Borshy> {
|
||||||
/// Accumulating this did not produce a data set to act on
|
/// Accumulating this did not produce a data set to act on
|
||||||
@@ -176,21 +181,25 @@ pub(crate) enum DataSet<D: Borshy> {
|
|||||||
Participating(HashMap<SeraiAddress, D>),
|
Participating(HashMap<SeraiAddress, D>),
|
||||||
}
|
}
|
||||||
|
|
||||||
trait Borshy: BorshSerialize + BorshDeserialize {}
|
|
||||||
impl<T: BorshSerialize + BorshDeserialize> Borshy for T {}
|
|
||||||
|
|
||||||
create_db!(
|
create_db!(
|
||||||
CoordinatorTributary {
|
CoordinatorTributary {
|
||||||
// The last handled tributary block's (number, hash)
|
// The last handled tributary block's (number, hash)
|
||||||
LastHandledTributaryBlock: (set: ValidatorSet) -> (u64, [u8; 32]),
|
LastHandledTributaryBlock: (set: ValidatorSet) -> (u64, [u8; 32]),
|
||||||
|
|
||||||
// The slash points a validator has accrued, with u64::MAX representing a fatal slash.
|
// The slash points a validator has accrued, with u32::MAX representing a fatal slash.
|
||||||
SlashPoints: (set: ValidatorSet, validator: SeraiAddress) -> u64,
|
SlashPoints: (set: ValidatorSet, validator: SeraiAddress) -> u32,
|
||||||
|
|
||||||
|
// The cosign intent for a Substrate block
|
||||||
|
CosignIntents: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> CosignIntent,
|
||||||
// The latest Substrate block to cosign.
|
// The latest Substrate block to cosign.
|
||||||
LatestSubstrateBlockToCosign: (set: ValidatorSet) -> [u8; 32],
|
LatestSubstrateBlockToCosign: (set: ValidatorSet) -> [u8; 32],
|
||||||
// If we're actively cosigning or not.
|
// The hash of the block we're actively cosigning.
|
||||||
ActivelyCosigning: (set: ValidatorSet) -> (),
|
ActivelyCosigning: (set: ValidatorSet) -> [u8; 32],
|
||||||
|
// If this block has already been cosigned.
|
||||||
|
Cosigned: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> (),
|
||||||
|
|
||||||
|
// The plans to whitelist upon a `Transaction::SubstrateBlock` being included on-chain.
|
||||||
|
SubstrateBlockPlans: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> Vec<[u8; 32]>,
|
||||||
|
|
||||||
// The weight accumulated for a topic.
|
// The weight accumulated for a topic.
|
||||||
AccumulatedWeight: (set: ValidatorSet, topic: Topic) -> u64,
|
AccumulatedWeight: (set: ValidatorSet, topic: Topic) -> u64,
|
||||||
@@ -238,19 +247,20 @@ impl TributaryDb {
|
|||||||
) {
|
) {
|
||||||
LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash);
|
LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash);
|
||||||
}
|
}
|
||||||
pub(crate) fn actively_cosigning(txn: &mut impl DbTxn, set: ValidatorSet) -> bool {
|
pub(crate) fn actively_cosigning(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<[u8; 32]> {
|
||||||
ActivelyCosigning::get(txn, set).is_some()
|
ActivelyCosigning::get(txn, set)
|
||||||
}
|
}
|
||||||
pub(crate) fn start_cosigning(
|
pub(crate) fn start_cosigning(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ValidatorSet,
|
set: ValidatorSet,
|
||||||
|
substrate_block_hash: [u8; 32],
|
||||||
substrate_block_number: u64,
|
substrate_block_number: u64,
|
||||||
) {
|
) {
|
||||||
assert!(
|
assert!(
|
||||||
ActivelyCosigning::get(txn, set).is_none(),
|
ActivelyCosigning::get(txn, set).is_none(),
|
||||||
"starting cosigning while already cosigning"
|
"starting cosigning while already cosigning"
|
||||||
);
|
);
|
||||||
ActivelyCosigning::set(txn, set, &());
|
ActivelyCosigning::set(txn, set, &substrate_block_hash);
|
||||||
|
|
||||||
TributaryDb::recognize_topic(
|
TributaryDb::recognize_topic(
|
||||||
txn,
|
txn,
|
||||||
@@ -265,6 +275,20 @@ impl TributaryDb {
|
|||||||
pub(crate) fn finish_cosigning(txn: &mut impl DbTxn, set: ValidatorSet) {
|
pub(crate) fn finish_cosigning(txn: &mut impl DbTxn, set: ValidatorSet) {
|
||||||
assert!(ActivelyCosigning::take(txn, set).is_some(), "finished cosigning but not cosigning");
|
assert!(ActivelyCosigning::take(txn, set).is_some(), "finished cosigning but not cosigning");
|
||||||
}
|
}
|
||||||
|
pub(crate) fn mark_cosigned(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
set: ValidatorSet,
|
||||||
|
substrate_block_hash: [u8; 32],
|
||||||
|
) {
|
||||||
|
Cosigned::set(txn, set, substrate_block_hash, &());
|
||||||
|
}
|
||||||
|
pub(crate) fn cosigned(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
set: ValidatorSet,
|
||||||
|
substrate_block_hash: [u8; 32],
|
||||||
|
) -> bool {
|
||||||
|
Cosigned::get(txn, set, substrate_block_hash).is_some()
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ValidatorSet, topic: Topic) {
|
pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ValidatorSet, topic: Topic) {
|
||||||
AccumulatedWeight::set(txn, set, topic, &0);
|
AccumulatedWeight::set(txn, set, topic, &0);
|
||||||
@@ -272,7 +296,19 @@ impl TributaryDb {
|
|||||||
|
|
||||||
pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ValidatorSet, block_number: u64) {
|
pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ValidatorSet, block_number: u64) {
|
||||||
for topic in Reattempt::take(txn, set, block_number).unwrap_or(vec![]) {
|
for topic in Reattempt::take(txn, set, block_number).unwrap_or(vec![]) {
|
||||||
// TODO: Slash all people who preprocessed but didn't share
|
/*
|
||||||
|
TODO: Slash all people who preprocessed but didn't share, and add a delay to their
|
||||||
|
participations in future protocols. When we call accumulate, if the participant has no
|
||||||
|
delay, their accumulation occurs immediately. Else, the accumulation occurs after the
|
||||||
|
specified delay.
|
||||||
|
|
||||||
|
This means even if faulty validators are first to preprocess, they won't be selected for
|
||||||
|
the signing set unless there's a lack of less faulty validators available.
|
||||||
|
|
||||||
|
We need to decrease this delay upon successful partipations, and set it to the maximum upon
|
||||||
|
`f + 1` validators voting to fatally slash the validator in question. This won't issue the
|
||||||
|
fatal slash but should still be effective.
|
||||||
|
*/
|
||||||
Self::recognize_topic(txn, set, topic);
|
Self::recognize_topic(txn, set, topic);
|
||||||
if let Some(id) = topic.sign_id(set) {
|
if let Some(id) = topic.sign_id(set) {
|
||||||
Self::send_message(txn, set, messages::sign::CoordinatorMessage::Reattempt { id });
|
Self::send_message(txn, set, messages::sign::CoordinatorMessage::Reattempt { id });
|
||||||
@@ -287,7 +323,7 @@ impl TributaryDb {
|
|||||||
reason: &str,
|
reason: &str,
|
||||||
) {
|
) {
|
||||||
log::warn!("{validator} fatally slashed: {reason}");
|
log::warn!("{validator} fatally slashed: {reason}");
|
||||||
SlashPoints::set(txn, set, validator, &u64::MAX);
|
SlashPoints::set(txn, set, validator, &u32::MAX);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn is_fatally_slashed(
|
pub(crate) fn is_fatally_slashed(
|
||||||
@@ -295,7 +331,7 @@ impl TributaryDb {
|
|||||||
set: ValidatorSet,
|
set: ValidatorSet,
|
||||||
validator: SeraiAddress,
|
validator: SeraiAddress,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
SlashPoints::get(getter, set, validator).unwrap_or(0) == u64::MAX
|
SlashPoints::get(getter, set, validator).unwrap_or(0) == u32::MAX
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
@@ -360,12 +396,12 @@ impl TributaryDb {
|
|||||||
// 5 minutes
|
// 5 minutes
|
||||||
#[cfg(not(feature = "longer-reattempts"))]
|
#[cfg(not(feature = "longer-reattempts"))]
|
||||||
const BASE_REATTEMPT_DELAY: u32 =
|
const BASE_REATTEMPT_DELAY: u32 =
|
||||||
(5u32 * 60 * 1000).div_ceil(tributary::tendermint::TARGET_BLOCK_TIME);
|
(5u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME);
|
||||||
|
|
||||||
// 10 minutes, intended for latent environments like the GitHub CI
|
// 10 minutes, intended for latent environments like the GitHub CI
|
||||||
#[cfg(feature = "longer-reattempts")]
|
#[cfg(feature = "longer-reattempts")]
|
||||||
const BASE_REATTEMPT_DELAY: u32 =
|
const BASE_REATTEMPT_DELAY: u32 =
|
||||||
(10u32 * 60 * 1000).div_ceil(tributary::tendermint::TARGET_BLOCK_TIME);
|
(10u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME);
|
||||||
|
|
||||||
// Linearly scale the time for the protocol with the attempt number
|
// Linearly scale the time for the protocol with the attempt number
|
||||||
let blocks_till_reattempt = u64::from(attempt * BASE_REATTEMPT_DELAY);
|
let blocks_till_reattempt = u64::from(attempt * BASE_REATTEMPT_DELAY);
|
||||||
@@ -1,388 +1,584 @@
|
|||||||
use core::{marker::PhantomData, fmt::Debug, future::Future};
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
use std::{sync::Arc, io};
|
#![doc = include_str!("../README.md")]
|
||||||
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use core::{marker::PhantomData, future::Future};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use ciphersuite::{Ciphersuite, Ristretto};
|
use ciphersuite::group::GroupEncoding;
|
||||||
|
|
||||||
use scale::Decode;
|
use serai_client::{
|
||||||
use futures_channel::mpsc::UnboundedReceiver;
|
primitives::SeraiAddress,
|
||||||
use futures_util::{StreamExt, SinkExt};
|
validator_sets::primitives::{ValidatorSet, Slash},
|
||||||
use ::tendermint::{
|
|
||||||
ext::{BlockNumber, Commit, Block as BlockTrait, Network},
|
|
||||||
SignedMessageFor, SyncedBlock, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,
|
|
||||||
TendermintMachine, TendermintHandle,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub use ::tendermint::Evidence;
|
use serai_db::*;
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
use serai_db::Db;
|
use tributary_sdk::{
|
||||||
|
tendermint::{
|
||||||
|
tx::{TendermintTx, Evidence, decode_signed_message},
|
||||||
|
TendermintNetwork,
|
||||||
|
},
|
||||||
|
Signed as TributarySigned, TransactionKind, TransactionTrait,
|
||||||
|
Transaction as TributaryTransaction, Block, TributaryReader, P2p,
|
||||||
|
};
|
||||||
|
|
||||||
use tokio::sync::RwLock;
|
use serai_cosign::CosignIntent;
|
||||||
|
use serai_coordinator_substrate::NewSetInformation;
|
||||||
|
|
||||||
mod merkle;
|
use messages::sign::VariantSignId;
|
||||||
pub(crate) use merkle::*;
|
|
||||||
|
|
||||||
pub mod transaction;
|
mod transaction;
|
||||||
pub use transaction::{TransactionError, Signed, TransactionKind, Transaction as TransactionTrait};
|
pub use transaction::{SigningProtocolRound, Signed, Transaction};
|
||||||
|
|
||||||
use crate::tendermint::tx::TendermintTx;
|
mod db;
|
||||||
|
use db::*;
|
||||||
|
|
||||||
mod provided;
|
/// Messages to send to the Processors.
|
||||||
pub(crate) use provided::*;
|
pub struct ProcessorMessages;
|
||||||
pub use provided::ProvidedError;
|
impl ProcessorMessages {
|
||||||
|
/// Try to receive a message to send to a Processor.
|
||||||
mod block;
|
pub fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<messages::CoordinatorMessage> {
|
||||||
pub use block::*;
|
db::ProcessorMessages::try_recv(txn, set)
|
||||||
|
|
||||||
mod blockchain;
|
|
||||||
pub(crate) use blockchain::*;
|
|
||||||
|
|
||||||
mod mempool;
|
|
||||||
pub(crate) use mempool::*;
|
|
||||||
|
|
||||||
pub mod tendermint;
|
|
||||||
pub(crate) use crate::tendermint::*;
|
|
||||||
|
|
||||||
#[cfg(any(test, feature = "tests"))]
|
|
||||||
pub mod tests;
|
|
||||||
|
|
||||||
/// Size limit for an individual transaction.
|
|
||||||
// This needs to be big enough to participate in a 101-of-150 eVRF DKG with each element taking
|
|
||||||
// `MAX_KEY_LEN`. This also needs to be big enough to pariticpate in signing 520 Bitcoin inputs
|
|
||||||
// with 49 key shares, and signing 120 Monero inputs with 49 key shares.
|
|
||||||
// TODO: Add a test for these properties
|
|
||||||
pub const TRANSACTION_SIZE_LIMIT: usize = 2_000_000;
|
|
||||||
/// Amount of transactions a single account may have in the mempool.
|
|
||||||
pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
|
||||||
/// Block size limit.
|
|
||||||
// This targets a growth limit of roughly 30 GB a day, under load, in order to prevent a malicious
|
|
||||||
// participant from flooding disks and causing out of space errors in order processes.
|
|
||||||
pub const BLOCK_SIZE_LIMIT: usize = 2_001_000;
|
|
||||||
|
|
||||||
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
|
||||||
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
|
|
||||||
|
|
||||||
#[allow(clippy::large_enum_variant)]
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub enum Transaction<T: TransactionTrait> {
|
|
||||||
Tendermint(TendermintTx),
|
|
||||||
Application(T),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TransactionTrait> ReadWrite for Transaction<T> {
|
|
||||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
let mut kind = [0];
|
|
||||||
reader.read_exact(&mut kind)?;
|
|
||||||
match kind[0] {
|
|
||||||
0 => {
|
|
||||||
let tx = TendermintTx::read(reader)?;
|
|
||||||
Ok(Transaction::Tendermint(tx))
|
|
||||||
}
|
|
||||||
1 => {
|
|
||||||
let tx = T::read(reader)?;
|
|
||||||
Ok(Transaction::Application(tx))
|
|
||||||
}
|
|
||||||
_ => Err(io::Error::other("invalid transaction type")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
match self {
|
|
||||||
Transaction::Tendermint(tx) => {
|
|
||||||
writer.write_all(&[0])?;
|
|
||||||
tx.write(writer)
|
|
||||||
}
|
|
||||||
Transaction::Application(tx) => {
|
|
||||||
writer.write_all(&[1])?;
|
|
||||||
tx.write(writer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: TransactionTrait> Transaction<T> {
|
/// The cosign intents.
|
||||||
pub fn hash(&self) -> [u8; 32] {
|
pub struct CosignIntents;
|
||||||
match self {
|
impl CosignIntents {
|
||||||
Transaction::Tendermint(tx) => tx.hash(),
|
/// Provide a CosignIntent for this Tributary.
|
||||||
Transaction::Application(tx) => tx.hash(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn kind(&self) -> TransactionKind {
|
|
||||||
match self {
|
|
||||||
Transaction::Tendermint(tx) => tx.kind(),
|
|
||||||
Transaction::Application(tx) => tx.kind(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An item which can be read and written.
|
|
||||||
pub trait ReadWrite: Sized {
|
|
||||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;
|
|
||||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;
|
|
||||||
|
|
||||||
fn serialize(&self) -> Vec<u8> {
|
|
||||||
// BlockHeader is 64 bytes and likely the smallest item in this system
|
|
||||||
let mut buf = Vec::with_capacity(64);
|
|
||||||
self.write(&mut buf).unwrap();
|
|
||||||
buf
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait P2p: 'static + Send + Sync + Clone {
|
|
||||||
/// Broadcast a message to all other members of the Tributary with the specified genesis.
|
|
||||||
///
|
///
|
||||||
/// The Tributary will re-broadcast consensus messages on a fixed interval to ensure they aren't
|
/// This must be done before the associated `Transaction::Cosign` is provided.
|
||||||
/// prematurely dropped from the P2P layer. THe P2P layer SHOULD perform content-based
|
pub fn provide(txn: &mut impl DbTxn, set: ValidatorSet, intent: &CosignIntent) {
|
||||||
/// deduplication to ensure a sane amount of load.
|
db::CosignIntents::set(txn, set, intent.block_hash, intent);
|
||||||
fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) -> impl Send + Future<Output = ()>;
|
}
|
||||||
}
|
fn take(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
impl<P: P2p> P2p for Arc<P> {
|
set: ValidatorSet,
|
||||||
fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) -> impl Send + Future<Output = ()> {
|
substrate_block_hash: [u8; 32],
|
||||||
P::broadcast(self, genesis, msg)
|
) -> Option<CosignIntent> {
|
||||||
|
db::CosignIntents::take(txn, set, substrate_block_hash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
/// The plans to whitelist upon a `Transaction::SubstrateBlock` being included on-chain.
|
||||||
pub struct Tributary<D: Db, T: TransactionTrait, P: P2p> {
|
pub struct SubstrateBlockPlans;
|
||||||
db: D,
|
impl SubstrateBlockPlans {
|
||||||
|
/// Set the plans to whitelist upon the associated `Transaction::SubstrateBlock` being included
|
||||||
genesis: [u8; 32],
|
/// on-chain.
|
||||||
network: TendermintNetwork<D, T, P>,
|
///
|
||||||
|
/// This must be done before the associated `Transaction::Cosign` is provided.
|
||||||
synced_block: Arc<RwLock<SyncedBlockSender<TendermintNetwork<D, T, P>>>>,
|
pub fn set(
|
||||||
synced_block_result: Arc<RwLock<SyncedBlockResultReceiver>>,
|
txn: &mut impl DbTxn,
|
||||||
messages: Arc<RwLock<MessageSender<TendermintNetwork<D, T, P>>>>,
|
set: ValidatorSet,
|
||||||
|
substrate_block_hash: [u8; 32],
|
||||||
|
plans: &Vec<[u8; 32]>,
|
||||||
|
) {
|
||||||
|
db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, &plans);
|
||||||
|
}
|
||||||
|
fn take(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
set: ValidatorSet,
|
||||||
|
substrate_block_hash: [u8; 32],
|
||||||
|
) -> Option<Vec<[u8; 32]>> {
|
||||||
|
db::SubstrateBlockPlans::take(txn, set, substrate_block_hash)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
struct ScanBlock<'a, TD: Db, TDT: DbTxn, P: P2p> {
|
||||||
pub async fn new(
|
_td: PhantomData<TD>,
|
||||||
db: D,
|
_p2p: PhantomData<P>,
|
||||||
genesis: [u8; 32],
|
tributary_txn: &'a mut TDT,
|
||||||
start_time: u64,
|
set: ValidatorSet,
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
validators: &'a [SeraiAddress],
|
||||||
validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,
|
total_weight: u64,
|
||||||
p2p: P,
|
validator_weights: &'a HashMap<SeraiAddress, u64>,
|
||||||
) -> Option<Self> {
|
}
|
||||||
log::info!("new Tributary with genesis {}", hex::encode(genesis));
|
impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
||||||
|
fn potentially_start_cosign(&mut self) {
|
||||||
|
// Don't start a new cosigning instance if we're actively running one
|
||||||
|
if TributaryDb::actively_cosigning(self.tributary_txn, self.set).is_some() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
let validators_vec = validators.iter().map(|validator| validator.0).collect::<Vec<_>>();
|
// Fetch the latest intended-to-be-cosigned block
|
||||||
|
let Some(latest_substrate_block_to_cosign) =
|
||||||
let signer = Arc::new(Signer::new(genesis, key));
|
TributaryDb::latest_substrate_block_to_cosign(self.tributary_txn, self.set)
|
||||||
let validators = Arc::new(Validators::new(genesis, validators)?);
|
else {
|
||||||
|
return;
|
||||||
let mut blockchain = Blockchain::new(db.clone(), genesis, &validators_vec);
|
|
||||||
let block_number = BlockNumber(blockchain.block_number());
|
|
||||||
|
|
||||||
let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) {
|
|
||||||
Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time
|
|
||||||
} else {
|
|
||||||
start_time
|
|
||||||
};
|
};
|
||||||
let proposal = TendermintBlock(
|
|
||||||
blockchain.build_block::<TendermintNetwork<D, T, P>>(&validators).serialize(),
|
// If it was already cosigned, return
|
||||||
|
if TributaryDb::cosigned(self.tributary_txn, self.set, latest_substrate_block_to_cosign) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let intent =
|
||||||
|
CosignIntents::take(self.tributary_txn, self.set, latest_substrate_block_to_cosign)
|
||||||
|
.expect("Transaction::Cosign locally provided but CosignIntents wasn't populated");
|
||||||
|
assert_eq!(
|
||||||
|
intent.block_hash, latest_substrate_block_to_cosign,
|
||||||
|
"provided CosignIntent wasn't saved by its block hash"
|
||||||
);
|
);
|
||||||
let blockchain = Arc::new(RwLock::new(blockchain));
|
|
||||||
|
|
||||||
let network = TendermintNetwork { genesis, signer, validators, blockchain, p2p };
|
// Mark us as actively cosigning
|
||||||
|
TributaryDb::start_cosigning(
|
||||||
let TendermintHandle { synced_block, synced_block_result, messages, machine } =
|
self.tributary_txn,
|
||||||
TendermintMachine::new(
|
self.set,
|
||||||
db.clone(),
|
latest_substrate_block_to_cosign,
|
||||||
network.clone(),
|
intent.block_number,
|
||||||
genesis,
|
);
|
||||||
block_number,
|
// Send the message for the processor to start signing
|
||||||
start_time,
|
TributaryDb::send_message(
|
||||||
proposal,
|
self.tributary_txn,
|
||||||
)
|
self.set,
|
||||||
.await;
|
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
|
||||||
tokio::spawn(machine.run());
|
session: self.set.session,
|
||||||
|
intent,
|
||||||
Some(Self {
|
},
|
||||||
db,
|
|
||||||
genesis,
|
|
||||||
network,
|
|
||||||
synced_block: Arc::new(RwLock::new(synced_block)),
|
|
||||||
synced_block_result: Arc::new(RwLock::new(synced_block_result)),
|
|
||||||
messages: Arc::new(RwLock::new(messages)),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn block_time() -> u32 {
|
|
||||||
TendermintNetwork::<D, T, P>::block_time()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn genesis(&self) -> [u8; 32] {
|
|
||||||
self.genesis
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn block_number(&self) -> u64 {
|
|
||||||
self.network.blockchain.read().await.block_number()
|
|
||||||
}
|
|
||||||
pub async fn tip(&self) -> [u8; 32] {
|
|
||||||
self.network.blockchain.read().await.tip()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn reader(&self) -> TributaryReader<D, T> {
|
|
||||||
TributaryReader(self.db.clone(), self.genesis, PhantomData)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn provide_transaction(&self, tx: T) -> Result<(), ProvidedError> {
|
|
||||||
self.network.blockchain.write().await.provide_transaction(tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn next_nonce(
|
|
||||||
&self,
|
|
||||||
signer: &<Ristretto as Ciphersuite>::G,
|
|
||||||
order: &[u8],
|
|
||||||
) -> Option<u32> {
|
|
||||||
self.network.blockchain.read().await.next_nonce(signer, order)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error.
|
|
||||||
// Safe to be &self since the only meaningful usage of self is self.network.blockchain which
|
|
||||||
// successfully acquires its own write lock
|
|
||||||
pub async fn add_transaction(&self, tx: T) -> Result<bool, TransactionError> {
|
|
||||||
let tx = Transaction::Application(tx);
|
|
||||||
let mut to_broadcast = vec![TRANSACTION_MESSAGE];
|
|
||||||
tx.write(&mut to_broadcast).unwrap();
|
|
||||||
let res = self.network.blockchain.write().await.add_transaction::<TendermintNetwork<D, T, P>>(
|
|
||||||
true,
|
|
||||||
tx,
|
|
||||||
&self.network.signature_scheme(),
|
|
||||||
);
|
);
|
||||||
if res == Ok(true) {
|
|
||||||
self.network.p2p.broadcast(self.genesis, to_broadcast).await;
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
}
|
||||||
|
fn handle_application_tx(&mut self, block_number: u64, tx: Transaction) {
|
||||||
|
let signer = |signed: Signed| SeraiAddress(signed.signer().to_bytes());
|
||||||
|
|
||||||
async fn sync_block_internal(
|
if let TransactionKind::Signed(_, TributarySigned { signer, .. }) = tx.kind() {
|
||||||
&self,
|
// Don't handle transactions from those fatally slashed
|
||||||
block: Block<T>,
|
// TODO: The fact they can publish these TXs makes this a notable spam vector
|
||||||
commit: Vec<u8>,
|
if TributaryDb::is_fatally_slashed(
|
||||||
result: &mut UnboundedReceiver<bool>,
|
self.tributary_txn,
|
||||||
) -> bool {
|
self.set,
|
||||||
let (tip, block_number) = {
|
SeraiAddress(signer.to_bytes()),
|
||||||
let blockchain = self.network.blockchain.read().await;
|
) {
|
||||||
(blockchain.tip(), blockchain.block_number())
|
return;
|
||||||
};
|
}
|
||||||
|
|
||||||
if block.header.parent != tip {
|
|
||||||
log::debug!("told to sync a block whose parent wasn't our tip");
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let block = TendermintBlock(block.serialize());
|
match tx {
|
||||||
let mut commit_ref = commit.as_ref();
|
// Accumulate this vote and fatally slash the participant if past the threshold
|
||||||
let Ok(commit) = Commit::<Arc<Validators>>::decode(&mut commit_ref) else {
|
Transaction::RemoveParticipant { participant, signed } => {
|
||||||
log::error!("sent an invalidly serialized commit");
|
let signer = signer(signed);
|
||||||
return false;
|
|
||||||
};
|
|
||||||
// Storage DoS vector. We *could* truncate to solely the relevant portion, trying to save this,
|
|
||||||
// yet then we'd have to test the truncation was performed correctly.
|
|
||||||
if !commit_ref.is_empty() {
|
|
||||||
log::error!("sent an commit with additional data after it");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !self.network.verify_commit(block.id(), &commit) {
|
|
||||||
log::error!("sent an invalid commit");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
let number = BlockNumber(block_number + 1);
|
// Check the participant voted to be removed actually exists
|
||||||
self.synced_block.write().await.send(SyncedBlock { number, block, commit }).await.unwrap();
|
if !self.validators.iter().any(|validator| *validator == participant) {
|
||||||
result.next().await.unwrap()
|
TributaryDb::fatal_slash(
|
||||||
}
|
self.tributary_txn,
|
||||||
|
self.set,
|
||||||
// Sync a block.
|
signer,
|
||||||
// TODO: Since we have a static validator set, we should only need the tail commit?
|
"voted to remove non-existent participant",
|
||||||
pub async fn sync_block(&self, block: Block<T>, commit: Vec<u8>) -> bool {
|
|
||||||
let mut result = self.synced_block_result.write().await;
|
|
||||||
self.sync_block_internal(block, commit, &mut result).await
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return true if the message should be rebroadcasted.
|
|
||||||
pub async fn handle_message(&self, msg: &[u8]) -> bool {
|
|
||||||
match msg.first() {
|
|
||||||
Some(&TRANSACTION_MESSAGE) => {
|
|
||||||
let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {
|
|
||||||
log::error!("received invalid transaction message");
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: Sync mempools with fellow peers
|
|
||||||
// Can we just rebroadcast transactions not included for at least two blocks?
|
|
||||||
let res =
|
|
||||||
self.network.blockchain.write().await.add_transaction::<TendermintNetwork<D, T, P>>(
|
|
||||||
false,
|
|
||||||
tx,
|
|
||||||
&self.network.signature_scheme(),
|
|
||||||
);
|
);
|
||||||
log::debug!("received transaction message. valid new transaction: {res:?}");
|
return;
|
||||||
res == Ok(true)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Some(&TENDERMINT_MESSAGE) => {
|
match TributaryDb::accumulate(
|
||||||
let Ok(msg) =
|
self.tributary_txn,
|
||||||
SignedMessageFor::<TendermintNetwork<D, T, P>>::decode::<&[u8]>(&mut &msg[1 ..])
|
self.set,
|
||||||
else {
|
self.validators,
|
||||||
log::error!("received invalid tendermint message");
|
self.total_weight,
|
||||||
return false;
|
block_number,
|
||||||
|
Topic::RemoveParticipant { participant },
|
||||||
|
signer,
|
||||||
|
self.validator_weights[&signer],
|
||||||
|
&(),
|
||||||
|
) {
|
||||||
|
DataSet::None => {}
|
||||||
|
DataSet::Participating(_) => {
|
||||||
|
TributaryDb::fatal_slash(self.tributary_txn, self.set, participant, "voted to remove");
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
self.messages.write().await.send(msg).await.unwrap();
|
|
||||||
false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_ => false,
|
// Send the participation to the processor
|
||||||
|
Transaction::DkgParticipation { participation, signed } => {
|
||||||
|
TributaryDb::send_message(
|
||||||
|
self.tributary_txn,
|
||||||
|
self.set,
|
||||||
|
messages::key_gen::CoordinatorMessage::Participation {
|
||||||
|
session: self.set.session,
|
||||||
|
participant: todo!("TODO"),
|
||||||
|
participation,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed } => {
|
||||||
|
// Accumulate the preprocesses into our own FROST attempt manager
|
||||||
|
todo!("TODO")
|
||||||
|
}
|
||||||
|
Transaction::DkgConfirmationShare { attempt, share, signed } => {
|
||||||
|
// Accumulate the shares into our own FROST attempt manager
|
||||||
|
todo!("TODO: SetKeysTask")
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::Cosign { substrate_block_hash } => {
|
||||||
|
// Update the latest intended-to-be-cosigned Substrate block
|
||||||
|
TributaryDb::set_latest_substrate_block_to_cosign(
|
||||||
|
self.tributary_txn,
|
||||||
|
self.set,
|
||||||
|
substrate_block_hash,
|
||||||
|
);
|
||||||
|
// Start a new cosign if we aren't already working on one
|
||||||
|
self.potentially_start_cosign();
|
||||||
|
}
|
||||||
|
Transaction::Cosigned { substrate_block_hash } => {
|
||||||
|
/*
|
||||||
|
We provide one Cosigned per Cosign transaction, but they have independent orders. This
|
||||||
|
means we may receive Cosigned before Cosign. In order to ensure we only start work on
|
||||||
|
not-yet-Cosigned cosigns, we flag all cosigned blocks as cosigned. Then, when we choose
|
||||||
|
the next block to work on, we won't if it's already been cosigned.
|
||||||
|
*/
|
||||||
|
TributaryDb::mark_cosigned(self.tributary_txn, self.set, substrate_block_hash);
|
||||||
|
|
||||||
|
// If we aren't actively cosigning this block, return
|
||||||
|
// This occurs when we have Cosign TXs A, B, C, we received Cosigned for A and start on C,
|
||||||
|
// and then receive Cosigned for B
|
||||||
|
if TributaryDb::actively_cosigning(self.tributary_txn, self.set) !=
|
||||||
|
Some(substrate_block_hash)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since this is the block we were cosigning, mark us as having finished cosigning
|
||||||
|
TributaryDb::finish_cosigning(self.tributary_txn, self.set);
|
||||||
|
|
||||||
|
// Start working on the next cosign
|
||||||
|
self.potentially_start_cosign();
|
||||||
|
}
|
||||||
|
Transaction::SubstrateBlock { hash } => {
|
||||||
|
// Whitelist all of the IDs this Substrate block causes to be signed
|
||||||
|
let plans = SubstrateBlockPlans::take(self.tributary_txn, self.set, hash).expect(
|
||||||
|
"Transaction::SubstrateBlock locally provided but SubstrateBlockPlans wasn't populated",
|
||||||
|
);
|
||||||
|
for plan in plans {
|
||||||
|
TributaryDb::recognize_topic(
|
||||||
|
self.tributary_txn,
|
||||||
|
self.set,
|
||||||
|
Topic::Sign {
|
||||||
|
id: VariantSignId::Transaction(plan),
|
||||||
|
attempt: 0,
|
||||||
|
round: SigningProtocolRound::Preprocess,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Transaction::Batch { hash } => {
|
||||||
|
// Whitelist the signing of this batch
|
||||||
|
TributaryDb::recognize_topic(
|
||||||
|
self.tributary_txn,
|
||||||
|
self.set,
|
||||||
|
Topic::Sign {
|
||||||
|
id: VariantSignId::Batch(hash),
|
||||||
|
attempt: 0,
|
||||||
|
round: SigningProtocolRound::Preprocess,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::SlashReport { slash_points, signed } => {
|
||||||
|
let signer = signer(signed);
|
||||||
|
|
||||||
|
if slash_points.len() != self.validators.len() {
|
||||||
|
TributaryDb::fatal_slash(
|
||||||
|
self.tributary_txn,
|
||||||
|
self.set,
|
||||||
|
signer,
|
||||||
|
"slash report was for a distinct amount of signers",
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulate, and if past the threshold, calculate *the* slash report and start signing it
|
||||||
|
match TributaryDb::accumulate(
|
||||||
|
self.tributary_txn,
|
||||||
|
self.set,
|
||||||
|
self.validators,
|
||||||
|
self.total_weight,
|
||||||
|
block_number,
|
||||||
|
Topic::SlashReport,
|
||||||
|
signer,
|
||||||
|
self.validator_weights[&signer],
|
||||||
|
&slash_points,
|
||||||
|
) {
|
||||||
|
DataSet::None => {}
|
||||||
|
DataSet::Participating(data_set) => {
|
||||||
|
// Find the median reported slashes for this validator
|
||||||
|
/*
|
||||||
|
TODO: This lets 34% perform a fatal slash. That shouldn't be allowed. We need
|
||||||
|
to accept slash reports for a period past the threshold, and only fatally slash if we
|
||||||
|
have a supermajority agree the slash should be fatal. If there isn't a supermajority,
|
||||||
|
but the median believe the slash should be fatal, we need to fallback to a large
|
||||||
|
constant.
|
||||||
|
|
||||||
|
Also, TODO, each slash point should probably be considered as
|
||||||
|
`MAX_KEY_SHARES_PER_SET * BLOCK_TIME` seconds of downtime. As this time crosses
|
||||||
|
various thresholds (1 day, 3 days, etc), a multiplier should be attached.
|
||||||
|
*/
|
||||||
|
let mut median_slash_report = Vec::with_capacity(self.validators.len());
|
||||||
|
for i in 0 .. self.validators.len() {
|
||||||
|
let mut this_validator =
|
||||||
|
data_set.values().map(|report| report[i]).collect::<Vec<_>>();
|
||||||
|
this_validator.sort_unstable();
|
||||||
|
// Choose the median, where if there are two median values, the lower one is chosen
|
||||||
|
let median_index = if (this_validator.len() % 2) == 1 {
|
||||||
|
this_validator.len() / 2
|
||||||
|
} else {
|
||||||
|
(this_validator.len() / 2) - 1
|
||||||
|
};
|
||||||
|
median_slash_report.push(this_validator[median_index]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// We only publish slashes for the `f` worst performers to:
|
||||||
|
// 1) Effect amnesty if there were network disruptions which affected everyone
|
||||||
|
// 2) Ensure the signing threshold doesn't have a disincentive to do their job
|
||||||
|
|
||||||
|
// Find the worst performer within the signing threshold's slash points
|
||||||
|
let f = (self.validators.len() - 1) / 3;
|
||||||
|
let worst_validator_in_supermajority_slash_points = {
|
||||||
|
let mut sorted_slash_points = median_slash_report.clone();
|
||||||
|
sorted_slash_points.sort_unstable();
|
||||||
|
// This won't be a valid index if `f == 0`, which means we don't have any validators
|
||||||
|
// to slash
|
||||||
|
let index_of_first_validator_to_slash = self.validators.len() - f;
|
||||||
|
let index_of_worst_validator_in_supermajority = index_of_first_validator_to_slash - 1;
|
||||||
|
sorted_slash_points[index_of_worst_validator_in_supermajority]
|
||||||
|
};
|
||||||
|
|
||||||
|
// Perform the amortization
|
||||||
|
for slash_points in &mut median_slash_report {
|
||||||
|
*slash_points =
|
||||||
|
slash_points.saturating_sub(worst_validator_in_supermajority_slash_points)
|
||||||
|
}
|
||||||
|
let amortized_slash_report = median_slash_report;
|
||||||
|
|
||||||
|
// Create the resulting slash report
|
||||||
|
let mut slash_report = vec![];
|
||||||
|
for (validator, points) in self.validators.iter().copied().zip(amortized_slash_report) {
|
||||||
|
// TODO: Natively store this as a `Slash`
|
||||||
|
if points == u32::MAX {
|
||||||
|
slash_report.push(Slash::Fatal);
|
||||||
|
} else {
|
||||||
|
slash_report.push(Slash::Points(points));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert!(slash_report.len() <= f);
|
||||||
|
|
||||||
|
// Recognize the topic for signing the slash report
|
||||||
|
TributaryDb::recognize_topic(
|
||||||
|
self.tributary_txn,
|
||||||
|
self.set,
|
||||||
|
Topic::Sign {
|
||||||
|
id: VariantSignId::SlashReport,
|
||||||
|
attempt: 0,
|
||||||
|
round: SigningProtocolRound::Preprocess,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
// Send the message for the processor to start signing
|
||||||
|
TributaryDb::send_message(
|
||||||
|
self.tributary_txn,
|
||||||
|
self.set,
|
||||||
|
messages::coordinator::CoordinatorMessage::SignSlashReport {
|
||||||
|
session: self.set.session,
|
||||||
|
report: slash_report,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::Sign { id, attempt, round, data, signed } => {
|
||||||
|
let topic = Topic::Sign { id, attempt, round };
|
||||||
|
let signer = signer(signed);
|
||||||
|
|
||||||
|
if u64::try_from(data.len()).unwrap() != self.validator_weights[&signer] {
|
||||||
|
TributaryDb::fatal_slash(
|
||||||
|
self.tributary_txn,
|
||||||
|
self.set,
|
||||||
|
signer,
|
||||||
|
"signer signed with a distinct amount of key shares than they had key shares",
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
match TributaryDb::accumulate(
|
||||||
|
self.tributary_txn,
|
||||||
|
self.set,
|
||||||
|
self.validators,
|
||||||
|
self.total_weight,
|
||||||
|
block_number,
|
||||||
|
topic,
|
||||||
|
signer,
|
||||||
|
self.validator_weights[&signer],
|
||||||
|
&data,
|
||||||
|
) {
|
||||||
|
DataSet::None => {}
|
||||||
|
DataSet::Participating(data_set) => {
|
||||||
|
let id = topic.sign_id(self.set).expect("Topic::Sign didn't have SignId");
|
||||||
|
let flatten_data_set = |data_set| todo!("TODO");
|
||||||
|
let data_set = flatten_data_set(data_set);
|
||||||
|
TributaryDb::send_message(
|
||||||
|
self.tributary_txn,
|
||||||
|
self.set,
|
||||||
|
match round {
|
||||||
|
SigningProtocolRound::Preprocess => {
|
||||||
|
messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set }
|
||||||
|
}
|
||||||
|
SigningProtocolRound::Share => {
|
||||||
|
messages::sign::CoordinatorMessage::Shares { id, shares: data_set }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get a Future which will resolve once the next block has been added.
|
fn handle_block(mut self, block_number: u64, block: Block<Transaction>) {
|
||||||
pub async fn next_block_notification(
|
TributaryDb::start_of_block(self.tributary_txn, self.set, block_number);
|
||||||
&self,
|
|
||||||
) -> impl Send + Sync + core::future::Future<Output = Result<(), impl Send + Sync>> {
|
for tx in block.transactions {
|
||||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
match tx {
|
||||||
self.network.blockchain.write().await.next_block_notifications.push_back(tx);
|
TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => {
|
||||||
rx
|
// Since the evidence is on the chain, it will have already been validated
|
||||||
|
// We can just punish the signer
|
||||||
|
let data = match ev {
|
||||||
|
Evidence::ConflictingMessages(first, second) => (first, Some(second)),
|
||||||
|
Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None),
|
||||||
|
};
|
||||||
|
let msgs = (
|
||||||
|
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(&data.0).unwrap(),
|
||||||
|
if data.1.is_some() {
|
||||||
|
Some(
|
||||||
|
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(&data.1.unwrap())
|
||||||
|
.unwrap(),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
// Since anything with evidence is fundamentally faulty behavior, not just temporal
|
||||||
|
// errors, mark the node as fatally slashed
|
||||||
|
TributaryDb::fatal_slash(
|
||||||
|
self.tributary_txn,
|
||||||
|
self.set,
|
||||||
|
SeraiAddress(msgs.0.msg.sender),
|
||||||
|
&format!("invalid tendermint messages: {msgs:?}"),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
TributaryTransaction::Application(tx) => {
|
||||||
|
self.handle_application_tx(block_number, tx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
/// The task to scan the Tributary, populating `ProcessorMessages`.
|
||||||
pub struct TributaryReader<D: Db, T: TransactionTrait>(D, [u8; 32], PhantomData<T>);
|
pub struct ScanTributaryTask<TD: Db, P: P2p> {
|
||||||
impl<D: Db, T: TransactionTrait> TributaryReader<D, T> {
|
tributary_db: TD,
|
||||||
pub fn genesis(&self) -> [u8; 32] {
|
set: ValidatorSet,
|
||||||
self.1
|
validators: Vec<SeraiAddress>,
|
||||||
}
|
total_weight: u64,
|
||||||
|
validator_weights: HashMap<SeraiAddress, u64>,
|
||||||
|
tributary: TributaryReader<TD, Transaction>,
|
||||||
|
_p2p: PhantomData<P>,
|
||||||
|
}
|
||||||
|
|
||||||
// Since these values are static once set, they can be safely read from the database without lock
|
impl<TD: Db, P: P2p> ScanTributaryTask<TD, P> {
|
||||||
// acquisition
|
/// Create a new instance of this task.
|
||||||
pub fn block(&self, hash: &[u8; 32]) -> Option<Block<T>> {
|
pub fn new(
|
||||||
Blockchain::<D, T>::block_from_db(&self.0, self.1, hash)
|
tributary_db: TD,
|
||||||
}
|
new_set: &NewSetInformation,
|
||||||
pub fn commit(&self, hash: &[u8; 32]) -> Option<Vec<u8>> {
|
tributary: TributaryReader<TD, Transaction>,
|
||||||
Blockchain::<D, T>::commit_from_db(&self.0, self.1, hash)
|
) -> Self {
|
||||||
}
|
let mut validators = Vec::with_capacity(new_set.validators.len());
|
||||||
pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option<Commit<Validators>> {
|
let mut total_weight = 0;
|
||||||
self.commit(hash).map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap())
|
let mut validator_weights = HashMap::with_capacity(new_set.validators.len());
|
||||||
}
|
for (validator, weight) in new_set.validators.iter().copied() {
|
||||||
pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> {
|
let validator = SeraiAddress::from(validator);
|
||||||
Blockchain::<D, T>::block_after(&self.0, self.1, hash)
|
let weight = u64::from(weight);
|
||||||
}
|
validators.push(validator);
|
||||||
pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> {
|
total_weight += weight;
|
||||||
self
|
validator_weights.insert(validator, weight);
|
||||||
.commit(hash)
|
}
|
||||||
.map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool {
|
ScanTributaryTask {
|
||||||
Blockchain::<D, T>::locally_provided_txs_in_block(&self.0, &self.1, hash, order)
|
tributary_db,
|
||||||
}
|
set: new_set.set,
|
||||||
|
validators,
|
||||||
// This isn't static, yet can be read with only minor discrepancy risks
|
total_weight,
|
||||||
pub fn tip(&self) -> [u8; 32] {
|
validator_weights,
|
||||||
Blockchain::<D, T>::tip_from_db(&self.0, self.1)
|
tributary,
|
||||||
|
_p2p: PhantomData,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
|
||||||
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let (mut last_block_number, mut last_block_hash) =
|
||||||
|
TributaryDb::last_handled_tributary_block(&self.tributary_db, self.set)
|
||||||
|
.unwrap_or((0, self.tributary.genesis()));
|
||||||
|
|
||||||
|
let mut made_progress = false;
|
||||||
|
while let Some(next) = self.tributary.block_after(&last_block_hash) {
|
||||||
|
let block = self.tributary.block(&next).unwrap();
|
||||||
|
let block_number = last_block_number + 1;
|
||||||
|
let block_hash = block.hash();
|
||||||
|
|
||||||
|
// Make sure we have all of the provided transactions for this block
|
||||||
|
for tx in &block.transactions {
|
||||||
|
let TransactionKind::Provided(order) = tx.kind() else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
// make sure we have all the provided txs in this block locally
|
||||||
|
if !self.tributary.locally_provided_txs_in_block(&block_hash, order) {
|
||||||
|
return Err(format!(
|
||||||
|
"didn't have the provided Transactions on-chain for set (ephemeral error): {:?}",
|
||||||
|
self.set
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut tributary_txn = self.tributary_db.txn();
|
||||||
|
(ScanBlock {
|
||||||
|
_td: PhantomData::<TD>,
|
||||||
|
_p2p: PhantomData::<P>,
|
||||||
|
tributary_txn: &mut tributary_txn,
|
||||||
|
set: self.set,
|
||||||
|
validators: &self.validators,
|
||||||
|
total_weight: self.total_weight,
|
||||||
|
validator_weights: &self.validator_weights,
|
||||||
|
})
|
||||||
|
.handle_block(block_number, block);
|
||||||
|
TributaryDb::set_last_handled_tributary_block(
|
||||||
|
&mut tributary_txn,
|
||||||
|
self.set,
|
||||||
|
block_number,
|
||||||
|
block_hash,
|
||||||
|
);
|
||||||
|
last_block_number = block_number;
|
||||||
|
last_block_hash = block_hash;
|
||||||
|
tributary_txn.commit();
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create the Transaction::SlashReport to publish per the local view.
|
||||||
|
pub fn slash_report_transaction(getter: &impl Get, set: &NewSetInformation) -> Transaction {
|
||||||
|
let mut slash_points = Vec::with_capacity(set.validators.len());
|
||||||
|
for (validator, _weight) in set.validators.iter().copied() {
|
||||||
|
let validator = SeraiAddress::from(validator);
|
||||||
|
slash_points.push(SlashPoints::get(getter, set.set, validator).unwrap_or(0));
|
||||||
|
}
|
||||||
|
Transaction::SlashReport { slash_points, signed: Signed::default() }
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,218 +1,365 @@
|
|||||||
use core::fmt::Debug;
|
use core::{ops::Deref, fmt::Debug};
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
use zeroize::Zeroizing;
|
||||||
use thiserror::Error;
|
use rand_core::{RngCore, CryptoRng};
|
||||||
|
|
||||||
use blake2::{Digest, Blake2b512};
|
|
||||||
|
|
||||||
|
use blake2::{digest::typenum::U32, Digest, Blake2b};
|
||||||
use ciphersuite::{
|
use ciphersuite::{
|
||||||
group::{Group, GroupEncoding},
|
group::{ff::Field, Group, GroupEncoding},
|
||||||
Ciphersuite, Ristretto,
|
Ciphersuite, Ristretto,
|
||||||
};
|
};
|
||||||
use schnorr::SchnorrSignature;
|
use schnorr::SchnorrSignature;
|
||||||
|
|
||||||
use crate::{TRANSACTION_SIZE_LIMIT, ReadWrite};
|
use scale::Encode;
|
||||||
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Error)]
|
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::MAX_KEY_SHARES_PER_SET};
|
||||||
pub enum TransactionError {
|
|
||||||
/// Transaction exceeded the size limit.
|
use messages::sign::VariantSignId;
|
||||||
#[error("transaction is too large")]
|
|
||||||
TooLargeTransaction,
|
use tributary_sdk::{
|
||||||
/// Transaction's signer isn't a participant.
|
ReadWrite,
|
||||||
#[error("invalid signer")]
|
transaction::{
|
||||||
InvalidSigner,
|
Signed as TributarySigned, TransactionError, TransactionKind, Transaction as TransactionTrait,
|
||||||
/// Transaction's nonce isn't the prior nonce plus one.
|
},
|
||||||
#[error("invalid nonce")]
|
};
|
||||||
InvalidNonce,
|
|
||||||
/// Transaction's signature is invalid.
|
/// The round this data is for, within a signing protocol.
|
||||||
#[error("invalid signature")]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||||
InvalidSignature,
|
pub enum SigningProtocolRound {
|
||||||
/// Transaction's content is invalid.
|
/// A preprocess.
|
||||||
#[error("transaction content is invalid")]
|
Preprocess,
|
||||||
InvalidContent,
|
/// A signature share.
|
||||||
/// Transaction's signer has too many transactions in the mempool.
|
Share,
|
||||||
#[error("signer has too many transactions in the mempool")]
|
|
||||||
TooManyInMempool,
|
|
||||||
/// Provided Transaction added to mempool.
|
|
||||||
#[error("provided transaction added to mempool")]
|
|
||||||
ProvidedAddedToMempool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Data for a signed transaction.
|
impl SigningProtocolRound {
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
fn nonce(&self) -> u32 {
|
||||||
pub struct Signed {
|
match self {
|
||||||
pub signer: <Ristretto as Ciphersuite>::G,
|
SigningProtocolRound::Preprocess => 0,
|
||||||
pub nonce: u32,
|
SigningProtocolRound::Share => 1,
|
||||||
pub signature: SchnorrSignature<Ristretto>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReadWrite for Signed {
|
|
||||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
let signer = Ristretto::read_G(reader)?;
|
|
||||||
|
|
||||||
let mut nonce = [0; 4];
|
|
||||||
reader.read_exact(&mut nonce)?;
|
|
||||||
let nonce = u32::from_le_bytes(nonce);
|
|
||||||
if nonce >= (u32::MAX - 1) {
|
|
||||||
Err(io::Error::other("nonce exceeded limit"))?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut signature = SchnorrSignature::<Ristretto>::read(reader)?;
|
|
||||||
if signature.R.is_identity().into() {
|
|
||||||
// Anyone malicious could remove this and try to find zero signatures
|
|
||||||
// We should never produce zero signatures though meaning this should never come up
|
|
||||||
// If it does somehow come up, this is a decent courtesy
|
|
||||||
signature.zeroize();
|
|
||||||
Err(io::Error::other("signature nonce was identity"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Signed { signer, nonce, signature })
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
/// `tributary::Signed` but without the nonce.
|
||||||
// This is either an invalid signature or a private key leak
|
///
|
||||||
if self.signature.R.is_identity().into() {
|
/// All of our nonces are deterministic to the type of transaction and fields within.
|
||||||
Err(io::Error::other("signature nonce was identity"))?;
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||||
}
|
pub struct Signed {
|
||||||
writer.write_all(&self.signer.to_bytes())?;
|
/// The signer.
|
||||||
writer.write_all(&self.nonce.to_le_bytes())?;
|
signer: <Ristretto as Ciphersuite>::G,
|
||||||
|
/// The signature.
|
||||||
|
signature: SchnorrSignature<Ristretto>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BorshSerialize for Signed {
|
||||||
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> Result<(), io::Error> {
|
||||||
|
writer.write_all(self.signer.to_bytes().as_ref())?;
|
||||||
self.signature.write(writer)
|
self.signature.write(writer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl BorshDeserialize for Signed {
|
||||||
|
fn deserialize_reader<R: io::Read>(reader: &mut R) -> Result<Self, io::Error> {
|
||||||
|
let signer = Ristretto::read_G(reader)?;
|
||||||
|
let signature = SchnorrSignature::read(reader)?;
|
||||||
|
Ok(Self { signer, signature })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Signed {
|
impl Signed {
|
||||||
pub fn read_without_nonce<R: io::Read>(reader: &mut R, nonce: u32) -> io::Result<Self> {
|
/// Fetch the signer.
|
||||||
let signer = Ristretto::read_G(reader)?;
|
pub(crate) fn signer(&self) -> <Ristretto as Ciphersuite>::G {
|
||||||
|
self.signer
|
||||||
let mut signature = SchnorrSignature::<Ristretto>::read(reader)?;
|
|
||||||
if signature.R.is_identity().into() {
|
|
||||||
// Anyone malicious could remove this and try to find zero signatures
|
|
||||||
// We should never produce zero signatures though meaning this should never come up
|
|
||||||
// If it does somehow come up, this is a decent courtesy
|
|
||||||
signature.zeroize();
|
|
||||||
Err(io::Error::other("signature nonce was identity"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Signed { signer, nonce, signature })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_without_nonce<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
/// Provide a nonce to convert a `Signed` into a `tributary::Signed`.
|
||||||
// This is either an invalid signature or a private key leak
|
fn to_tributary_signed(self, nonce: u32) -> TributarySigned {
|
||||||
if self.signature.R.is_identity().into() {
|
TributarySigned { signer: self.signer, nonce, signature: self.signature }
|
||||||
Err(io::Error::other("signature nonce was identity"))?;
|
|
||||||
}
|
|
||||||
writer.write_all(&self.signer.to_bytes())?;
|
|
||||||
self.signature.write(writer)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::large_enum_variant)]
|
impl Default for Signed {
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
fn default() -> Self {
|
||||||
pub enum TransactionKind {
|
Self {
|
||||||
/// This transaction should be provided by every validator, in an exact order.
|
signer: <Ristretto as Ciphersuite>::G::identity(),
|
||||||
///
|
signature: SchnorrSignature {
|
||||||
/// The contained static string names the orderer to use. This allows two distinct provided
|
R: <Ristretto as Ciphersuite>::G::identity(),
|
||||||
/// transaction kinds, without a synchronized order, to be ordered within their own kind without
|
s: <Ristretto as Ciphersuite>::F::ZERO,
|
||||||
/// requiring ordering with each other.
|
},
|
||||||
///
|
}
|
||||||
/// The only malleability is in when this transaction appears on chain. The block producer will
|
}
|
||||||
/// include it when they have it. Block verification will fail for validators without it.
|
|
||||||
///
|
|
||||||
/// If a supermajority of validators produce a commit for a block with a provided transaction
|
|
||||||
/// which isn't locally held, the block will be added to the local chain. When the transaction is
|
|
||||||
/// locally provided, it will be compared for correctness to the on-chain version
|
|
||||||
///
|
|
||||||
/// In order to ensure TXs aren't accidentally provided multiple times, all provided transactions
|
|
||||||
/// must have a unique hash which is also unique to all Unsigned transactions.
|
|
||||||
Provided(&'static str),
|
|
||||||
|
|
||||||
/// An unsigned transaction, only able to be included by the block producer.
|
|
||||||
///
|
|
||||||
/// Once an Unsigned transaction is included on-chain, it may not be included again. In order to
|
|
||||||
/// have multiple Unsigned transactions with the same values included on-chain, some distinct
|
|
||||||
/// nonce must be included in order to cause a distinct hash.
|
|
||||||
///
|
|
||||||
/// The hash must also be unique with all Provided transactions.
|
|
||||||
Unsigned,
|
|
||||||
|
|
||||||
/// A signed transaction.
|
|
||||||
Signed(Vec<u8>, Signed),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Should this be renamed TransactionTrait now that a literal Transaction exists?
|
/// The Tributary transaction definition used by Serai
|
||||||
// Or should the literal Transaction be renamed to Event?
|
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite {
|
pub enum Transaction {
|
||||||
/// Return what type of transaction this is.
|
/// A vote to remove a participant for invalid behavior
|
||||||
fn kind(&self) -> TransactionKind;
|
RemoveParticipant {
|
||||||
|
/// The participant to remove
|
||||||
|
participant: SeraiAddress,
|
||||||
|
/// The transaction's signer and signature
|
||||||
|
signed: Signed,
|
||||||
|
},
|
||||||
|
|
||||||
/// Return the hash of this transaction.
|
/// A participation in the DKG
|
||||||
///
|
DkgParticipation {
|
||||||
/// The hash must NOT commit to the signature.
|
/// The serialized participation
|
||||||
fn hash(&self) -> [u8; 32];
|
participation: Vec<u8>,
|
||||||
|
/// The transaction's signer and signature
|
||||||
|
signed: Signed,
|
||||||
|
},
|
||||||
|
/// The preprocess to confirm the DKG results on-chain
|
||||||
|
DkgConfirmationPreprocess {
|
||||||
|
/// The attempt number of this signing protocol
|
||||||
|
attempt: u32,
|
||||||
|
/// The preprocess
|
||||||
|
preprocess: [u8; 64],
|
||||||
|
/// The transaction's signer and signature
|
||||||
|
signed: Signed,
|
||||||
|
},
|
||||||
|
/// The signature share to confirm the DKG results on-chain
|
||||||
|
DkgConfirmationShare {
|
||||||
|
/// The attempt number of this signing protocol
|
||||||
|
attempt: u32,
|
||||||
|
/// The signature share
|
||||||
|
share: [u8; 32],
|
||||||
|
/// The transaction's signer and signature
|
||||||
|
signed: Signed,
|
||||||
|
},
|
||||||
|
|
||||||
/// Perform transaction-specific verification.
|
/// Intend to cosign a finalized Substrate block
|
||||||
fn verify(&self) -> Result<(), TransactionError>;
|
///
|
||||||
|
/// When the time comes to start a new cosigning protocol, the most recent Substrate block will
|
||||||
|
/// be the one selected to be cosigned.
|
||||||
|
Cosign {
|
||||||
|
/// The hash of the Substrate block to cosign
|
||||||
|
substrate_block_hash: [u8; 32],
|
||||||
|
},
|
||||||
|
|
||||||
/// Obtain the challenge for this transaction's signature.
|
/// Note an intended-to-be-cosigned Substrate block as cosigned
|
||||||
///
|
///
|
||||||
/// Do not override this unless you know what you're doing.
|
/// After producing this cosign, we need to start work on the latest intended-to-be cosigned
|
||||||
|
/// block. That requires agreement on when this cosign was produced, which we solve by noting
|
||||||
|
/// this cosign on-chain.
|
||||||
///
|
///
|
||||||
/// Panics if called on non-signed transactions.
|
/// We ideally don't have this transaction at all. The coordinator, without access to any of the
|
||||||
fn sig_hash(&self, genesis: [u8; 32]) -> <Ristretto as Ciphersuite>::F {
|
/// key shares, could observe the FROST signing session and determine a successful completion.
|
||||||
match self.kind() {
|
/// Unfortunately, that functionality is not present in modular-frost, so we do need to support
|
||||||
TransactionKind::Signed(order, Signed { signature, .. }) => {
|
/// *some* asynchronous flow (where the processor or P2P network informs us of the successful
|
||||||
<Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(
|
/// completion).
|
||||||
&Blake2b512::digest(
|
///
|
||||||
[
|
/// If we use a `Provided` transaction, that requires everyone observe this cosign.
|
||||||
b"Tributary Signed Transaction",
|
///
|
||||||
genesis.as_ref(),
|
/// If we use an `Unsigned` transaction, we can't verify the cosign signature inside
|
||||||
&self.hash(),
|
/// `Transaction::verify` unless we embedded the full `SignedCosign` on-chain. The issue is since
|
||||||
order.as_ref(),
|
/// a Tributary is stateless with regards to the on-chain logic, including `Transaction::verify`,
|
||||||
signature.R.to_bytes().as_ref(),
|
/// we can't verify the signature against the group's public key unless we also include that (but
|
||||||
]
|
/// then we open a DoS where arbitrary group keys are specified to cause inclusion of arbitrary
|
||||||
.concat(),
|
/// blobs on chain).
|
||||||
)
|
///
|
||||||
.into(),
|
/// If we use a `Signed` transaction, we mitigate the DoS risk by having someone to fatally
|
||||||
)
|
/// slash. We have horrible performance though as for 100 validators, all 100 will publish this
|
||||||
|
/// transaction.
|
||||||
|
///
|
||||||
|
/// We could use a signed `Unsigned` transaction, where it includes a signer and signature but
|
||||||
|
/// isn't technically a Signed transaction. This lets us de-duplicate the transaction premised on
|
||||||
|
/// its contents.
|
||||||
|
///
|
||||||
|
/// The optimal choice is likely to use a `Provided` transaction. We don't actually need to
|
||||||
|
/// observe the produced cosign (which is ephemeral). As long as it's agreed the cosign in
|
||||||
|
/// question no longer needs to produced, which would mean the cosigning protocol at-large
|
||||||
|
/// cosigning the block in question, it'd be safe to provide this and move on to the next cosign.
|
||||||
|
Cosigned {
|
||||||
|
/// The hash of the Substrate block which was cosigned
|
||||||
|
substrate_block_hash: [u8; 32],
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Acknowledge a Substrate block
|
||||||
|
///
|
||||||
|
/// This is provided after the block has been cosigned.
|
||||||
|
///
|
||||||
|
/// With the acknowledgement of a Substrate block, we can whitelist all the `VariantSignId`s
|
||||||
|
/// resulting from its handling.
|
||||||
|
SubstrateBlock {
|
||||||
|
/// The hash of the Substrate block
|
||||||
|
hash: [u8; 32],
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Acknowledge a Batch
|
||||||
|
///
|
||||||
|
/// Once everyone has acknowledged the Batch, we can begin signing it.
|
||||||
|
Batch {
|
||||||
|
/// The hash of the Batch's serialization.
|
||||||
|
///
|
||||||
|
/// Generally, we refer to a Batch by its ID/the hash of its instructions. Here, we want to
|
||||||
|
/// ensure consensus on the Batch, and achieving consensus on its hash is the most effective
|
||||||
|
/// way to do that.
|
||||||
|
hash: [u8; 32],
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Data from a signing protocol.
|
||||||
|
Sign {
|
||||||
|
/// The ID of the object being signed
|
||||||
|
id: VariantSignId,
|
||||||
|
/// The attempt number of this signing protocol
|
||||||
|
attempt: u32,
|
||||||
|
/// The round this data is for, within the signing protocol
|
||||||
|
round: SigningProtocolRound,
|
||||||
|
/// The data itself
|
||||||
|
///
|
||||||
|
/// There will be `n` blobs of data where `n` is the amount of key shares the validator sending
|
||||||
|
/// this transaction has.
|
||||||
|
data: Vec<Vec<u8>>,
|
||||||
|
/// The transaction's signer and signature
|
||||||
|
signed: Signed,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// The local view of slashes observed by the transaction's sender
|
||||||
|
SlashReport {
|
||||||
|
/// The slash points accrued by each validator
|
||||||
|
slash_points: Vec<u32>,
|
||||||
|
/// The transaction's signer and signature
|
||||||
|
signed: Signed,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReadWrite for Transaction {
|
||||||
|
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
borsh::from_reader(reader)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
borsh::to_writer(writer, self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TransactionTrait for Transaction {
|
||||||
|
fn kind(&self) -> TransactionKind {
|
||||||
|
match self {
|
||||||
|
Transaction::RemoveParticipant { participant, signed } => TransactionKind::Signed(
|
||||||
|
(b"RemoveParticipant", participant).encode(),
|
||||||
|
signed.to_tributary_signed(0),
|
||||||
|
),
|
||||||
|
|
||||||
|
Transaction::DkgParticipation { signed, .. } => {
|
||||||
|
TransactionKind::Signed(b"DkgParticipation".encode(), signed.to_tributary_signed(0))
|
||||||
|
}
|
||||||
|
Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => TransactionKind::Signed(
|
||||||
|
(b"DkgConfirmation", attempt).encode(),
|
||||||
|
signed.to_tributary_signed(0),
|
||||||
|
),
|
||||||
|
Transaction::DkgConfirmationShare { attempt, signed, .. } => TransactionKind::Signed(
|
||||||
|
(b"DkgConfirmation", attempt).encode(),
|
||||||
|
signed.to_tributary_signed(1),
|
||||||
|
),
|
||||||
|
|
||||||
|
Transaction::Cosign { .. } => TransactionKind::Provided("Cosign"),
|
||||||
|
Transaction::Cosigned { .. } => TransactionKind::Provided("Cosigned"),
|
||||||
|
// TODO: Provide this
|
||||||
|
Transaction::SubstrateBlock { .. } => TransactionKind::Provided("SubstrateBlock"),
|
||||||
|
// TODO: Provide this
|
||||||
|
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
||||||
|
|
||||||
|
Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed(
|
||||||
|
(b"Sign", id, attempt).encode(),
|
||||||
|
signed.to_tributary_signed(round.nonce()),
|
||||||
|
),
|
||||||
|
|
||||||
|
Transaction::SlashReport { signed, .. } => {
|
||||||
|
TransactionKind::Signed(b"SlashReport".encode(), signed.to_tributary_signed(0))
|
||||||
}
|
}
|
||||||
_ => panic!("sig_hash called on non-signed transaction"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pub trait GAIN: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32> {}
|
fn hash(&self) -> [u8; 32] {
|
||||||
impl<F: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32>> GAIN for F {}
|
let mut tx = ReadWrite::serialize(self);
|
||||||
|
if let TransactionKind::Signed(_, signed) = self.kind() {
|
||||||
pub(crate) fn verify_transaction<F: GAIN, T: Transaction>(
|
// Make sure the part we're cutting off is the signature
|
||||||
tx: &T,
|
assert_eq!(tx.drain((tx.len() - 64) ..).collect::<Vec<_>>(), signed.signature.serialize());
|
||||||
genesis: [u8; 32],
|
}
|
||||||
get_and_increment_nonce: &mut F,
|
Blake2b::<U32>::digest(&tx).into()
|
||||||
) -> Result<(), TransactionError> {
|
|
||||||
if tx.serialize().len() > TRANSACTION_SIZE_LIMIT {
|
|
||||||
Err(TransactionError::TooLargeTransaction)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tx.verify()?;
|
// This is a stateless verification which we use to enforce some size limits.
|
||||||
|
fn verify(&self) -> Result<(), TransactionError> {
|
||||||
|
#[allow(clippy::match_same_arms)]
|
||||||
|
match self {
|
||||||
|
// Fixed-length TX
|
||||||
|
Transaction::RemoveParticipant { .. } => {}
|
||||||
|
|
||||||
match tx.kind() {
|
// TODO: MAX_DKG_PARTICIPATION_LEN
|
||||||
TransactionKind::Provided(_) | TransactionKind::Unsigned => {}
|
Transaction::DkgParticipation { .. } => {}
|
||||||
TransactionKind::Signed(order, Signed { signer, nonce, signature }) => {
|
// These are fixed-length TXs
|
||||||
if let Some(next_nonce) = get_and_increment_nonce(&signer, &order) {
|
Transaction::DkgConfirmationPreprocess { .. } | Transaction::DkgConfirmationShare { .. } => {}
|
||||||
if nonce != next_nonce {
|
|
||||||
Err(TransactionError::InvalidNonce)?;
|
// Provided TXs
|
||||||
|
Transaction::Cosign { .. } |
|
||||||
|
Transaction::Cosigned { .. } |
|
||||||
|
Transaction::SubstrateBlock { .. } |
|
||||||
|
Transaction::Batch { .. } => {}
|
||||||
|
|
||||||
|
Transaction::Sign { data, .. } => {
|
||||||
|
if data.len() > usize::from(MAX_KEY_SHARES_PER_SET) {
|
||||||
|
Err(TransactionError::InvalidContent)?
|
||||||
}
|
}
|
||||||
} else {
|
// TODO: MAX_SIGN_LEN
|
||||||
// Not a participant
|
|
||||||
Err(TransactionError::InvalidSigner)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Use a batch verification here
|
Transaction::SlashReport { slash_points, .. } => {
|
||||||
if !signature.verify(signer, tx.sig_hash(genesis)) {
|
if slash_points.len() > usize::from(MAX_KEY_SHARES_PER_SET) {
|
||||||
Err(TransactionError::InvalidSignature)?;
|
Err(TransactionError::InvalidContent)?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Transaction {
|
||||||
|
/// Sign a transaction.
|
||||||
|
///
|
||||||
|
/// Panics if signing a transaction whose type isn't `TransactionKind::Signed`.
|
||||||
|
pub fn sign<R: RngCore + CryptoRng>(
|
||||||
|
&mut self,
|
||||||
|
rng: &mut R,
|
||||||
|
genesis: [u8; 32],
|
||||||
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
) {
|
||||||
|
fn signed(tx: &mut Transaction) -> &mut Signed {
|
||||||
|
#[allow(clippy::match_same_arms)] // This doesn't make semantic sense here
|
||||||
|
match tx {
|
||||||
|
Transaction::RemoveParticipant { ref mut signed, .. } |
|
||||||
|
Transaction::DkgParticipation { ref mut signed, .. } |
|
||||||
|
Transaction::DkgConfirmationPreprocess { ref mut signed, .. } => signed,
|
||||||
|
Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
|
||||||
|
|
||||||
|
Transaction::Cosign { .. } => panic!("signing CosignSubstrateBlock"),
|
||||||
|
Transaction::Cosigned { .. } => panic!("signing Cosigned"),
|
||||||
|
Transaction::SubstrateBlock { .. } => panic!("signing SubstrateBlock"),
|
||||||
|
Transaction::Batch { .. } => panic!("signing Batch"),
|
||||||
|
|
||||||
|
Transaction::Sign { ref mut signed, .. } => signed,
|
||||||
|
|
||||||
|
Transaction::SlashReport { ref mut signed, .. } => signed,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
// Decide the nonce to sign with
|
||||||
|
let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));
|
||||||
|
|
||||||
|
{
|
||||||
|
// Set the signer and the nonce
|
||||||
|
let signed = signed(self);
|
||||||
|
signed.signer = Ristretto::generator() * key.deref();
|
||||||
|
signed.signature.R = <Ristretto as Ciphersuite>::generator() * sig_nonce.deref();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the signature hash (which now includes `R || A` making it valid as the challenge)
|
||||||
|
let sig_hash = self.sig_hash(genesis);
|
||||||
|
|
||||||
|
// Sign the signature
|
||||||
|
signed(self).signature = SchnorrSignature::<Ristretto>::sign(key, sig_nonce, sig_hash);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -72,9 +72,10 @@ exceptions = [
|
|||||||
{ allow = ["AGPL-3.0"], name = "serai-ethereum-processor" },
|
{ allow = ["AGPL-3.0"], name = "serai-ethereum-processor" },
|
||||||
{ allow = ["AGPL-3.0"], name = "serai-monero-processor" },
|
{ allow = ["AGPL-3.0"], name = "serai-monero-processor" },
|
||||||
|
|
||||||
{ allow = ["AGPL-3.0"], name = "tributary-chain" },
|
{ allow = ["AGPL-3.0"], name = "tributary-sdk" },
|
||||||
{ allow = ["AGPL-3.0"], name = "serai-cosign" },
|
{ allow = ["AGPL-3.0"], name = "serai-cosign" },
|
||||||
{ allow = ["AGPL-3.0"], name = "serai-coordinator-substrate" },
|
{ allow = ["AGPL-3.0"], name = "serai-coordinator-substrate" },
|
||||||
|
{ allow = ["AGPL-3.0"], name = "serai-coordinator-tributary" },
|
||||||
{ allow = ["AGPL-3.0"], name = "serai-coordinator-p2p" },
|
{ allow = ["AGPL-3.0"], name = "serai-coordinator-p2p" },
|
||||||
{ allow = ["AGPL-3.0"], name = "serai-coordinator-libp2p-p2p" },
|
{ allow = ["AGPL-3.0"], name = "serai-coordinator-libp2p-p2p" },
|
||||||
{ allow = ["AGPL-3.0"], name = "serai-coordinator" },
|
{ allow = ["AGPL-3.0"], name = "serai-coordinator" },
|
||||||
|
|||||||
@@ -64,22 +64,20 @@ impl MessageQueue {
|
|||||||
Self::new(service, url, priv_key)
|
Self::new(service, url, priv_key)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[must_use]
|
async fn send(socket: &mut TcpStream, msg: MessageQueueRequest) -> Result<(), String> {
|
||||||
async fn send(socket: &mut TcpStream, msg: MessageQueueRequest) -> bool {
|
|
||||||
let msg = borsh::to_vec(&msg).unwrap();
|
let msg = borsh::to_vec(&msg).unwrap();
|
||||||
let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else {
|
match socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await {
|
||||||
log::warn!("couldn't send the message len");
|
Ok(()) => {}
|
||||||
return false;
|
Err(e) => Err(format!("couldn't send the message len: {e:?}"))?,
|
||||||
};
|
};
|
||||||
let Ok(()) = socket.write_all(&msg).await else {
|
match socket.write_all(&msg).await {
|
||||||
log::warn!("couldn't write the message");
|
Ok(()) => {}
|
||||||
return false;
|
Err(e) => Err(format!("couldn't write the message: {e:?}"))?,
|
||||||
};
|
}
|
||||||
true
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn queue(&self, metadata: Metadata, msg: Vec<u8>) {
|
pub async fn queue(&self, metadata: Metadata, msg: Vec<u8>) -> Result<(), String> {
|
||||||
// TODO: Should this use OsRng? Deterministic or deterministic + random may be better.
|
|
||||||
let nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
let nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
||||||
let nonce_pub = Ristretto::generator() * nonce.deref();
|
let nonce_pub = Ristretto::generator() * nonce.deref();
|
||||||
let sig = SchnorrSignature::<Ristretto>::sign(
|
let sig = SchnorrSignature::<Ristretto>::sign(
|
||||||
@@ -97,6 +95,21 @@ impl MessageQueue {
|
|||||||
.serialize();
|
.serialize();
|
||||||
|
|
||||||
let msg = MessageQueueRequest::Queue { meta: metadata, msg, sig };
|
let msg = MessageQueueRequest::Queue { meta: metadata, msg, sig };
|
||||||
|
|
||||||
|
let mut socket = match TcpStream::connect(&self.url).await {
|
||||||
|
Ok(socket) => socket,
|
||||||
|
Err(e) => Err(format!("failed to connect to the message-queue service: {e:?}"))?,
|
||||||
|
};
|
||||||
|
Self::send(&mut socket, msg.clone()).await?;
|
||||||
|
match socket.read_u8().await {
|
||||||
|
Ok(1) => {}
|
||||||
|
Ok(b) => Err(format!("message-queue didn't return for 1 for its ack, recieved: {b}"))?,
|
||||||
|
Err(e) => Err(format!("failed to read the response from the message-queue service: {e:?}"))?,
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn queue_with_retry(&self, metadata: Metadata, msg: Vec<u8>) {
|
||||||
let mut first = true;
|
let mut first = true;
|
||||||
loop {
|
loop {
|
||||||
// Sleep, so we don't hammer re-attempts
|
// Sleep, so we don't hammer re-attempts
|
||||||
@@ -105,14 +118,9 @@ impl MessageQueue {
|
|||||||
}
|
}
|
||||||
first = false;
|
first = false;
|
||||||
|
|
||||||
let Ok(mut socket) = TcpStream::connect(&self.url).await else { continue };
|
if self.queue(metadata.clone(), msg.clone()).await.is_ok() {
|
||||||
if !Self::send(&mut socket, msg.clone()).await {
|
break;
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
if socket.read_u8().await.ok() != Some(1) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -136,7 +144,7 @@ impl MessageQueue {
|
|||||||
log::trace!("opened socket for next");
|
log::trace!("opened socket for next");
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
if !Self::send(&mut socket, msg.clone()).await {
|
if Self::send(&mut socket, msg.clone()).await.is_err() {
|
||||||
continue 'outer;
|
continue 'outer;
|
||||||
}
|
}
|
||||||
let status = match socket.read_u8().await {
|
let status = match socket.read_u8().await {
|
||||||
@@ -224,7 +232,7 @@ impl MessageQueue {
|
|||||||
first = false;
|
first = false;
|
||||||
|
|
||||||
let Ok(mut socket) = TcpStream::connect(&self.url).await else { continue };
|
let Ok(mut socket) = TcpStream::connect(&self.url).await else { continue };
|
||||||
if !Self::send(&mut socket, msg.clone()).await {
|
if Self::send(&mut socket, msg.clone()).await.is_err() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if socket.read_u8().await.ok() != Some(1) {
|
if socket.read_u8().await.ok() != Some(1) {
|
||||||
|
|||||||
@@ -95,6 +95,7 @@ impl Coordinator {
|
|||||||
message_queue.ack(Service::Coordinator, msg.id).await;
|
message_queue.ack(Service::Coordinator, msg.id).await;
|
||||||
|
|
||||||
// Fire that there's a new message
|
// Fire that there's a new message
|
||||||
|
// This assumes the success path, not the just-rebooted-path
|
||||||
received_message_send
|
received_message_send
|
||||||
.send(())
|
.send(())
|
||||||
.expect("failed to tell the Coordinator there's a new message");
|
.expect("failed to tell the Coordinator there's a new message");
|
||||||
@@ -103,6 +104,7 @@ impl Coordinator {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// Spawn a task to send messages to the message-queue
|
// Spawn a task to send messages to the message-queue
|
||||||
|
// TODO: Define a proper task for this and remove use of queue_with_retry
|
||||||
tokio::spawn({
|
tokio::spawn({
|
||||||
let mut db = db.clone();
|
let mut db = db.clone();
|
||||||
async move {
|
async move {
|
||||||
@@ -115,12 +117,12 @@ impl Coordinator {
|
|||||||
to: Service::Coordinator,
|
to: Service::Coordinator,
|
||||||
intent: borsh::from_slice::<messages::ProcessorMessage>(&msg).unwrap().intent(),
|
intent: borsh::from_slice::<messages::ProcessorMessage>(&msg).unwrap().intent(),
|
||||||
};
|
};
|
||||||
message_queue.queue(metadata, msg).await;
|
message_queue.queue_with_retry(metadata, msg).await;
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
let _ =
|
let _ =
|
||||||
tokio::time::timeout(core::time::Duration::from_secs(60), sent_message_recv.recv())
|
tokio::time::timeout(core::time::Duration::from_secs(6), sent_message_recv.recv())
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,7 +39,9 @@ pub(crate) fn script_pubkey_for_on_chain_output(
|
|||||||
pub(crate) struct TxIndexTask<D: Db>(pub(crate) Rpc<D>);
|
pub(crate) struct TxIndexTask<D: Db>(pub(crate) Rpc<D>);
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for TxIndexTask<D> {
|
impl<D: Db> ContinuallyRan for TxIndexTask<D> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let latest_block_number = self
|
let latest_block_number = self
|
||||||
.0
|
.0
|
||||||
|
|||||||
@@ -29,3 +29,5 @@ serai-primitives = { path = "../../substrate/primitives", default-features = fal
|
|||||||
in-instructions-primitives = { package = "serai-in-instructions-primitives", path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std", "borsh"] }
|
in-instructions-primitives = { package = "serai-in-instructions-primitives", path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std", "borsh"] }
|
||||||
coins-primitives = { package = "serai-coins-primitives", path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] }
|
coins-primitives = { package = "serai-coins-primitives", path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] }
|
||||||
validator-sets-primitives = { package = "serai-validator-sets-primitives", path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std", "borsh"] }
|
validator-sets-primitives = { package = "serai-validator-sets-primitives", path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std", "borsh"] }
|
||||||
|
|
||||||
|
serai-cosign = { path = "../../coordinator/cosign", default-features = false }
|
||||||
|
|||||||
@@ -11,6 +11,8 @@ use validator_sets_primitives::{Session, KeyPair, Slash};
|
|||||||
use coins_primitives::OutInstructionWithBalance;
|
use coins_primitives::OutInstructionWithBalance;
|
||||||
use in_instructions_primitives::SignedBatch;
|
use in_instructions_primitives::SignedBatch;
|
||||||
|
|
||||||
|
use serai_cosign::{CosignIntent, SignedCosign};
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub struct SubstrateContext {
|
pub struct SubstrateContext {
|
||||||
pub serai_time: u64,
|
pub serai_time: u64,
|
||||||
@@ -24,7 +26,7 @@ pub mod key_gen {
|
|||||||
pub enum CoordinatorMessage {
|
pub enum CoordinatorMessage {
|
||||||
/// Instructs the Processor to begin the key generation process.
|
/// Instructs the Processor to begin the key generation process.
|
||||||
///
|
///
|
||||||
/// This is sent by the Coordinator when it creates the Tributary (TODO).
|
/// This is sent by the Coordinator when it creates the Tributary.
|
||||||
GenerateKey { session: Session, threshold: u16, evrf_public_keys: Vec<([u8; 32], Vec<u8>)> },
|
GenerateKey { session: Session, threshold: u16, evrf_public_keys: Vec<([u8; 32], Vec<u8>)> },
|
||||||
/// Received participations for the specified key generation protocol.
|
/// Received participations for the specified key generation protocol.
|
||||||
///
|
///
|
||||||
@@ -50,7 +52,8 @@ pub mod key_gen {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
|
// This set of messages is sent entirely and solely by serai-processor-key-gen.
|
||||||
|
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||||
pub enum ProcessorMessage {
|
pub enum ProcessorMessage {
|
||||||
// Participated in the specified key generation protocol.
|
// Participated in the specified key generation protocol.
|
||||||
Participation { session: Session, participation: Vec<u8> },
|
Participation { session: Session, participation: Vec<u8> },
|
||||||
@@ -141,7 +144,8 @@ pub mod sign {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
// This set of messages is sent entirely and solely by serai-processor-frost-attempt-manager.
|
||||||
|
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub enum ProcessorMessage {
|
pub enum ProcessorMessage {
|
||||||
// Participant sent an invalid message during the sign protocol.
|
// Participant sent an invalid message during the sign protocol.
|
||||||
InvalidParticipant { session: Session, participant: Participant },
|
InvalidParticipant { session: Session, participant: Participant },
|
||||||
@@ -155,39 +159,25 @@ pub mod sign {
|
|||||||
pub mod coordinator {
|
pub mod coordinator {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
// TODO: Remove this for the one defined in serai-cosign
|
|
||||||
pub fn cosign_block_msg(block_number: u64, block: [u8; 32]) -> Vec<u8> {
|
|
||||||
const DST: &[u8] = b"Cosign";
|
|
||||||
let mut res = vec![u8::try_from(DST.len()).unwrap()];
|
|
||||||
res.extend(DST);
|
|
||||||
res.extend(block_number.to_le_bytes());
|
|
||||||
res.extend(block);
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub enum CoordinatorMessage {
|
pub enum CoordinatorMessage {
|
||||||
/// Cosign the specified Substrate block.
|
/// Cosign the specified Substrate block.
|
||||||
///
|
///
|
||||||
/// This is sent by the Coordinator's Tributary scanner.
|
/// This is sent by the Coordinator's Tributary scanner.
|
||||||
CosignSubstrateBlock { session: Session, block_number: u64, block: [u8; 32] },
|
CosignSubstrateBlock { session: Session, intent: CosignIntent },
|
||||||
/// Sign the slash report for this session.
|
/// Sign the slash report for this session.
|
||||||
///
|
///
|
||||||
/// This is sent by the Coordinator's Tributary scanner.
|
/// This is sent by the Coordinator's Tributary scanner.
|
||||||
SignSlashReport { session: Session, report: Vec<Slash> },
|
SignSlashReport { session: Session, report: Vec<Slash> },
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
// This set of messages is sent entirely and solely by serai-processor-bin's implementation of
|
||||||
pub struct PlanMeta {
|
// the signers::Coordinator trait.
|
||||||
pub session: Session,
|
// TODO: Move message creation into serai-processor-signers
|
||||||
pub id: [u8; 32],
|
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
|
||||||
pub enum ProcessorMessage {
|
pub enum ProcessorMessage {
|
||||||
CosignedBlock { block_number: u64, block: [u8; 32], signature: Vec<u8> },
|
CosignedBlock { cosign: SignedCosign },
|
||||||
SignedBatch { batch: SignedBatch },
|
SignedBatch { batch: SignedBatch },
|
||||||
SubstrateBlockAck { block: u64, plans: Vec<PlanMeta> },
|
|
||||||
SignedSlashReport { session: Session, signature: Vec<u8> },
|
SignedSlashReport { session: Session, signature: Vec<u8> },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -213,17 +203,17 @@ pub mod substrate {
|
|||||||
pub enum CoordinatorMessage {
|
pub enum CoordinatorMessage {
|
||||||
/// Keys set on the Serai blockchain.
|
/// Keys set on the Serai blockchain.
|
||||||
///
|
///
|
||||||
/// This is set by the Coordinator's Substrate canonical event stream.
|
/// This is sent by the Coordinator's Substrate canonical event stream.
|
||||||
SetKeys { serai_time: u64, session: Session, key_pair: KeyPair },
|
SetKeys { serai_time: u64, session: Session, key_pair: KeyPair },
|
||||||
/// Slashes reported on the Serai blockchain OR the process timed out.
|
/// Slashes reported on the Serai blockchain OR the process timed out.
|
||||||
///
|
///
|
||||||
/// This is the final message for a session,
|
/// This is the final message for a session,
|
||||||
///
|
///
|
||||||
/// This is set by the Coordinator's Substrate canonical event stream.
|
/// This is sent by the Coordinator's Substrate canonical event stream.
|
||||||
SlashesReported { session: Session },
|
SlashesReported { session: Session },
|
||||||
/// A block from Serai with relevance to this processor.
|
/// A block from Serai with relevance to this processor.
|
||||||
///
|
///
|
||||||
/// This is set by the Coordinator's Substrate canonical event stream.
|
/// This is sent by the Coordinator's Substrate canonical event stream.
|
||||||
Block {
|
Block {
|
||||||
serai_block_number: u64,
|
serai_block_number: u64,
|
||||||
batch: Option<ExecutedBatch>,
|
batch: Option<ExecutedBatch>,
|
||||||
@@ -231,17 +221,16 @@ pub mod substrate {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub enum ProcessorMessage {}
|
pub struct PlanMeta {
|
||||||
impl BorshSerialize for ProcessorMessage {
|
pub session: Session,
|
||||||
fn serialize<W: borsh::io::Write>(&self, _writer: &mut W) -> borsh::io::Result<()> {
|
pub transaction_plan_id: [u8; 32],
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
impl BorshDeserialize for ProcessorMessage {
|
|
||||||
fn deserialize_reader<R: borsh::io::Read>(_reader: &mut R) -> borsh::io::Result<Self> {
|
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
unimplemented!()
|
pub enum ProcessorMessage {
|
||||||
}
|
// TODO: Have the processor send this
|
||||||
|
SubstrateBlockAck { block: [u8; 32], plans: Vec<PlanMeta> },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -268,7 +257,7 @@ impl_from!(sign, CoordinatorMessage, Sign);
|
|||||||
impl_from!(coordinator, CoordinatorMessage, Coordinator);
|
impl_from!(coordinator, CoordinatorMessage, Coordinator);
|
||||||
impl_from!(substrate, CoordinatorMessage, Substrate);
|
impl_from!(substrate, CoordinatorMessage, Substrate);
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub enum ProcessorMessage {
|
pub enum ProcessorMessage {
|
||||||
KeyGen(key_gen::ProcessorMessage),
|
KeyGen(key_gen::ProcessorMessage),
|
||||||
Sign(sign::ProcessorMessage),
|
Sign(sign::ProcessorMessage),
|
||||||
@@ -331,8 +320,8 @@ impl CoordinatorMessage {
|
|||||||
CoordinatorMessage::Coordinator(msg) => {
|
CoordinatorMessage::Coordinator(msg) => {
|
||||||
let (sub, id) = match msg {
|
let (sub, id) = match msg {
|
||||||
// We only cosign a block once, and Reattempt is a separate message
|
// We only cosign a block once, and Reattempt is a separate message
|
||||||
coordinator::CoordinatorMessage::CosignSubstrateBlock { block_number, .. } => {
|
coordinator::CoordinatorMessage::CosignSubstrateBlock { intent, .. } => {
|
||||||
(0, block_number.encode())
|
(0, intent.block_number.encode())
|
||||||
}
|
}
|
||||||
// We only sign one slash report, and Reattempt is a separate message
|
// We only sign one slash report, and Reattempt is a separate message
|
||||||
coordinator::CoordinatorMessage::SignSlashReport { session, .. } => (1, session.encode()),
|
coordinator::CoordinatorMessage::SignSlashReport { session, .. } => (1, session.encode()),
|
||||||
@@ -404,17 +393,26 @@ impl ProcessorMessage {
|
|||||||
}
|
}
|
||||||
ProcessorMessage::Coordinator(msg) => {
|
ProcessorMessage::Coordinator(msg) => {
|
||||||
let (sub, id) = match msg {
|
let (sub, id) = match msg {
|
||||||
coordinator::ProcessorMessage::CosignedBlock { block, .. } => (0, block.encode()),
|
coordinator::ProcessorMessage::CosignedBlock { cosign } => {
|
||||||
|
(0, cosign.cosign.block_hash.encode())
|
||||||
|
}
|
||||||
coordinator::ProcessorMessage::SignedBatch { batch, .. } => (1, batch.batch.id.encode()),
|
coordinator::ProcessorMessage::SignedBatch { batch, .. } => (1, batch.batch.id.encode()),
|
||||||
coordinator::ProcessorMessage::SubstrateBlockAck { block, .. } => (2, block.encode()),
|
coordinator::ProcessorMessage::SignedSlashReport { session, .. } => (2, session.encode()),
|
||||||
coordinator::ProcessorMessage::SignedSlashReport { session, .. } => (3, session.encode()),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut res = vec![PROCESSOR_UID, TYPE_COORDINATOR_UID, sub];
|
let mut res = vec![PROCESSOR_UID, TYPE_COORDINATOR_UID, sub];
|
||||||
res.extend(&id);
|
res.extend(&id);
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
ProcessorMessage::Substrate(_) => panic!("requesting intent for empty message type"),
|
ProcessorMessage::Substrate(msg) => {
|
||||||
|
let (sub, id) = match msg {
|
||||||
|
substrate::ProcessorMessage::SubstrateBlockAck { block, .. } => (0, block.encode()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut res = vec![PROCESSOR_UID, TYPE_SUBSTRATE_UID, sub];
|
||||||
|
res.extend(&id);
|
||||||
|
res
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,10 @@ use serai_db::{DbTxn, Db};
|
|||||||
|
|
||||||
use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch};
|
use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch};
|
||||||
|
|
||||||
use primitives::{EncodableG, task::ContinuallyRan};
|
use primitives::{
|
||||||
|
EncodableG,
|
||||||
|
task::{DoesNotError, ContinuallyRan},
|
||||||
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToBatchDb, BatchData, BatchToReportDb},
|
db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToBatchDb, BatchData, BatchToReportDb},
|
||||||
index,
|
index,
|
||||||
@@ -60,7 +63,9 @@ impl<D: Db, S: ScannerFeed> BatchTask<D, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for BatchTask<D, S> {
|
impl<D: Db, S: ScannerFeed> ContinuallyRan for BatchTask<D, S> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let highest_batchable = {
|
let highest_batchable = {
|
||||||
// Fetch the next to scan block
|
// Fetch the next to scan block
|
||||||
|
|||||||
@@ -190,7 +190,9 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> EventualityTask<D, S, Sch> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTask<D, S, Sch> {
|
impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTask<D, S, Sch> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
// Fetch the highest acknowledged block
|
// Fetch the highest acknowledged block
|
||||||
let Some(highest_acknowledged) = ScannerGlobalDb::<S>::highest_acknowledged_block(&self.db)
|
let Some(highest_acknowledged) = ScannerGlobalDb::<S>::highest_acknowledged_block(&self.db)
|
||||||
|
|||||||
@@ -58,7 +58,9 @@ impl<D: Db, S: ScannerFeed> IndexTask<D, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for IndexTask<D, S> {
|
impl<D: Db, S: ScannerFeed> ContinuallyRan for IndexTask<D, S> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
// Fetch the latest finalized block
|
// Fetch the latest finalized block
|
||||||
let our_latest_finalized = IndexDb::latest_finalized_block(&self.db)
|
let our_latest_finalized = IndexDb::latest_finalized_block(&self.db)
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use serai_db::{DbTxn, Db};
|
|||||||
|
|
||||||
use serai_validator_sets_primitives::Session;
|
use serai_validator_sets_primitives::Session;
|
||||||
|
|
||||||
use primitives::task::ContinuallyRan;
|
use primitives::task::{DoesNotError, ContinuallyRan};
|
||||||
use crate::{
|
use crate::{
|
||||||
db::{BatchData, BatchToReportDb, BatchesToSign},
|
db::{BatchData, BatchToReportDb, BatchesToSign},
|
||||||
substrate, ScannerFeed,
|
substrate, ScannerFeed,
|
||||||
@@ -27,7 +27,9 @@ impl<D: Db, S: ScannerFeed> ReportTask<D, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for ReportTask<D, S> {
|
impl<D: Db, S: ScannerFeed> ContinuallyRan for ReportTask<D, S> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
loop {
|
loop {
|
||||||
|
|||||||
@@ -98,7 +98,9 @@ impl<D: Db, S: ScannerFeed> ScanTask<D, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for ScanTask<D, S> {
|
impl<D: Db, S: ScannerFeed> ContinuallyRan for ScanTask<D, S> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
// Fetch the safe to scan block
|
// Fetch the safe to scan block
|
||||||
let latest_scannable =
|
let latest_scannable =
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use serai_db::{Get, DbTxn, Db};
|
|||||||
use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance};
|
use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance};
|
||||||
|
|
||||||
use messages::substrate::ExecutedBatch;
|
use messages::substrate::ExecutedBatch;
|
||||||
use primitives::task::ContinuallyRan;
|
use primitives::task::{DoesNotError, ContinuallyRan};
|
||||||
use crate::{
|
use crate::{
|
||||||
db::{ScannerGlobalDb, SubstrateToEventualityDb, AcknowledgedBatches},
|
db::{ScannerGlobalDb, SubstrateToEventualityDb, AcknowledgedBatches},
|
||||||
index, batch, ScannerFeed, KeyFor,
|
index, batch, ScannerFeed, KeyFor,
|
||||||
@@ -50,7 +50,9 @@ impl<D: Db, S: ScannerFeed> SubstrateTask<D, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for SubstrateTask<D, S> {
|
impl<D: Db, S: ScannerFeed> ContinuallyRan for SubstrateTask<D, S> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
loop {
|
loop {
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use serai_db::{Get, DbTxn, Db};
|
|||||||
|
|
||||||
use messages::sign::VariantSignId;
|
use messages::sign::VariantSignId;
|
||||||
|
|
||||||
use primitives::task::ContinuallyRan;
|
use primitives::task::{DoesNotError, ContinuallyRan};
|
||||||
use scanner::{BatchesToSign, AcknowledgedBatches};
|
use scanner::{BatchesToSign, AcknowledgedBatches};
|
||||||
|
|
||||||
use frost_attempt_manager::*;
|
use frost_attempt_manager::*;
|
||||||
@@ -79,7 +79,9 @@ impl<D: Db, E: GroupEncoding> BatchSignerTask<D, E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, E: Send + GroupEncoding> ContinuallyRan for BatchSignerTask<D, E> {
|
impl<D: Db, E: Send + GroupEncoding> ContinuallyRan for BatchSignerTask<D, E> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut iterated = false;
|
let mut iterated = false;
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,9 @@ impl<D: Db, C: Coordinator> CoordinatorTask<D, C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, C: Coordinator> ContinuallyRan for CoordinatorTask<D, C> {
|
impl<D: Db, C: Coordinator> ContinuallyRan for CoordinatorTask<D, C> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut iterated = false;
|
let mut iterated = false;
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ use serai_db::{DbTxn, Db};
|
|||||||
|
|
||||||
use messages::{sign::VariantSignId, coordinator::cosign_block_msg};
|
use messages::{sign::VariantSignId, coordinator::cosign_block_msg};
|
||||||
|
|
||||||
use primitives::task::ContinuallyRan;
|
use primitives::task::{DoesNotError, ContinuallyRan};
|
||||||
|
|
||||||
use frost_attempt_manager::*;
|
use frost_attempt_manager::*;
|
||||||
|
|
||||||
@@ -51,7 +51,9 @@ impl<D: Db> CosignerTask<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for CosignerTask<D> {
|
impl<D: Db> ContinuallyRan for CosignerTask<D> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, DoesNotError>> {
|
||||||
async move {
|
async move {
|
||||||
let mut iterated = false;
|
let mut iterated = false;
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ use serai_db::{DbTxn, Db};
|
|||||||
|
|
||||||
use messages::sign::VariantSignId;
|
use messages::sign::VariantSignId;
|
||||||
|
|
||||||
use primitives::task::ContinuallyRan;
|
use primitives::task::{DoesNotError, ContinuallyRan};
|
||||||
use scanner::ScannerFeed;
|
use scanner::ScannerFeed;
|
||||||
|
|
||||||
use frost_attempt_manager::*;
|
use frost_attempt_manager::*;
|
||||||
@@ -52,7 +52,9 @@ impl<D: Db, S: ScannerFeed> SlashReportSignerTask<D, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for SlashReportSignerTask<D, S> {
|
impl<D: Db, S: ScannerFeed> ContinuallyRan for SlashReportSignerTask<D, S> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut iterated = false;
|
let mut iterated = false;
|
||||||
|
|
||||||
|
|||||||
@@ -92,7 +92,9 @@ impl<D: Db, ST: SignableTransaction, P: TransactionPublisher<TransactionFor<ST>>
|
|||||||
impl<D: Db, ST: SignableTransaction, P: TransactionPublisher<TransactionFor<ST>>> ContinuallyRan
|
impl<D: Db, ST: SignableTransaction, P: TransactionPublisher<TransactionFor<ST>>> ContinuallyRan
|
||||||
for TransactionSignerTask<D, ST, P>
|
for TransactionSignerTask<D, ST, P>
|
||||||
{
|
{
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = P::EphemeralError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async {
|
async {
|
||||||
let mut iterated = false;
|
let mut iterated = false;
|
||||||
|
|
||||||
@@ -222,11 +224,7 @@ impl<D: Db, ST: SignableTransaction, P: TransactionPublisher<TransactionFor<ST>>
|
|||||||
let tx = TransactionFor::<ST>::read(&mut tx_buf).unwrap();
|
let tx = TransactionFor::<ST>::read(&mut tx_buf).unwrap();
|
||||||
assert!(tx_buf.is_empty());
|
assert!(tx_buf.is_empty());
|
||||||
|
|
||||||
self
|
self.publisher.publish(tx).await?;
|
||||||
.publisher
|
|
||||||
.publish(tx)
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("couldn't re-broadcast transactions: {e:?}"))?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
self.last_publication = Instant::now();
|
self.last_publication = Instant::now();
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ pub enum Call {
|
|||||||
},
|
},
|
||||||
report_slashes {
|
report_slashes {
|
||||||
network: NetworkId,
|
network: NetworkId,
|
||||||
slashes: BoundedVec<(SeraiAddress, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET / 3 }>>,
|
slashes: BoundedVec<(SeraiAddress, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 / 3 }>>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
},
|
},
|
||||||
allocate {
|
allocate {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user