mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-12 14:09:25 +00:00
Compare commits
1 Commits
be2098d2e1
...
undroppabl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce3b90541e |
5
Cargo.lock
generated
5
Cargo.lock
generated
@@ -8318,13 +8318,14 @@ dependencies = [
|
|||||||
"blake2",
|
"blake2",
|
||||||
"borsh",
|
"borsh",
|
||||||
"ciphersuite",
|
"ciphersuite",
|
||||||
"dkg",
|
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"frost-schnorrkel",
|
"frost-schnorrkel",
|
||||||
"hex",
|
"hex",
|
||||||
"log",
|
"log",
|
||||||
|
"modular-frost",
|
||||||
"parity-scale-codec",
|
"parity-scale-codec",
|
||||||
"rand_core",
|
"rand_core",
|
||||||
|
"schnorr-signatures",
|
||||||
"schnorrkel",
|
"schnorrkel",
|
||||||
"serai-client",
|
"serai-client",
|
||||||
"serai-coordinator-libp2p-p2p",
|
"serai-coordinator-libp2p-p2p",
|
||||||
@@ -8386,7 +8387,6 @@ version = "0.1.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"bitvec",
|
"bitvec",
|
||||||
"borsh",
|
"borsh",
|
||||||
"dkg",
|
|
||||||
"futures",
|
"futures",
|
||||||
"log",
|
"log",
|
||||||
"parity-scale-codec",
|
"parity-scale-codec",
|
||||||
@@ -8429,7 +8429,6 @@ dependencies = [
|
|||||||
"blake2",
|
"blake2",
|
||||||
"borsh",
|
"borsh",
|
||||||
"ciphersuite",
|
"ciphersuite",
|
||||||
"dkg",
|
|
||||||
"log",
|
"log",
|
||||||
"parity-scale-codec",
|
"parity-scale-codec",
|
||||||
"rand_core",
|
"rand_core",
|
||||||
|
|||||||
@@ -30,13 +30,53 @@ pub trait Get {
|
|||||||
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
||||||
/// randomly, or any other action, at time of write or at time of commit.
|
/// randomly, or any other action, at time of write or at time of commit.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub trait DbTxn: Send + Get {
|
pub trait DbTxn: Sized + Send + Get {
|
||||||
/// Write a value to this key.
|
/// Write a value to this key.
|
||||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
||||||
/// Delete the value from this key.
|
/// Delete the value from this key.
|
||||||
fn del(&mut self, key: impl AsRef<[u8]>);
|
fn del(&mut self, key: impl AsRef<[u8]>);
|
||||||
/// Commit this transaction.
|
/// Commit this transaction.
|
||||||
fn commit(self);
|
fn commit(self);
|
||||||
|
/// Close this transaction.
|
||||||
|
///
|
||||||
|
/// This is equivalent to `Drop` on transactions which can be dropped. This is explicit and works
|
||||||
|
/// with transactions which can't be dropped.
|
||||||
|
fn close(self) {
|
||||||
|
drop(self);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Credit for the idea goes to https://jack.wrenn.fyi/blog/undroppable
|
||||||
|
pub struct Undroppable<T>(Option<T>);
|
||||||
|
impl<T> Drop for Undroppable<T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// Use an assertion at compile time to prevent this code from compiling if generated
|
||||||
|
#[allow(clippy::assertions_on_constants)]
|
||||||
|
const {
|
||||||
|
assert!(false, "Undroppable DbTxn was dropped. Ensure all code paths call commit or close");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<T: DbTxn> Get for Undroppable<T> {
|
||||||
|
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||||
|
self.0.as_ref().unwrap().get(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<T: DbTxn> DbTxn for Undroppable<T> {
|
||||||
|
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||||
|
self.0.as_mut().unwrap().put(key, value);
|
||||||
|
}
|
||||||
|
fn del(&mut self, key: impl AsRef<[u8]>) {
|
||||||
|
self.0.as_mut().unwrap().del(key);
|
||||||
|
}
|
||||||
|
fn commit(mut self) {
|
||||||
|
self.0.take().unwrap().commit();
|
||||||
|
let _ = core::mem::ManuallyDrop::new(self);
|
||||||
|
}
|
||||||
|
fn close(mut self) {
|
||||||
|
drop(self.0.take().unwrap());
|
||||||
|
let _ = core::mem::ManuallyDrop::new(self);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A database supporting atomic transaction.
|
/// A database supporting atomic transaction.
|
||||||
@@ -51,6 +91,10 @@ pub trait Db: 'static + Send + Sync + Clone + Get {
|
|||||||
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
||||||
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
||||||
}
|
}
|
||||||
/// Open a new transaction.
|
/// Open a new transaction which may be dropped.
|
||||||
fn txn(&mut self) -> Self::Transaction<'_>;
|
fn unsafe_txn(&mut self) -> Self::Transaction<'_>;
|
||||||
|
/// Open a new transaction which must be committed or closed.
|
||||||
|
fn txn(&mut self) -> Undroppable<Self::Transaction<'_>> {
|
||||||
|
Undroppable(Some(self.unsafe_txn()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ impl Get for MemDb {
|
|||||||
}
|
}
|
||||||
impl Db for MemDb {
|
impl Db for MemDb {
|
||||||
type Transaction<'a> = MemDbTxn<'a>;
|
type Transaction<'a> = MemDbTxn<'a>;
|
||||||
fn txn(&mut self) -> MemDbTxn<'_> {
|
fn unsafe_txn(&mut self) -> MemDbTxn<'_> {
|
||||||
MemDbTxn(self, HashMap::new(), HashSet::new())
|
MemDbTxn(self, HashMap::new(), HashSet::new())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ impl Get for Arc<ParityDb> {
|
|||||||
}
|
}
|
||||||
impl Db for Arc<ParityDb> {
|
impl Db for Arc<ParityDb> {
|
||||||
type Transaction<'a> = Transaction<'a>;
|
type Transaction<'a> = Transaction<'a>;
|
||||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
|
||||||
Transaction(self, vec![])
|
Transaction(self, vec![])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
|
|||||||
}
|
}
|
||||||
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
||||||
type Transaction<'a> = Transaction<'a, T>;
|
type Transaction<'a> = Transaction<'a, T>;
|
||||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
|
||||||
let mut opts = WriteOptions::default();
|
let mut opts = WriteOptions::default();
|
||||||
opts.set_sync(true);
|
opts.set_sync(true);
|
||||||
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
||||||
|
|||||||
@@ -25,13 +25,12 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
|||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
|
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||||
dkg = { path = "../crypto/dkg", default-features = false, features = ["std"] }
|
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
|
||||||
|
frost = { package = "modular-frost", path = "../crypto/frost" }
|
||||||
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
|
||||||
|
|
||||||
zalloc = { path = "../common/zalloc" }
|
zalloc = { path = "../common/zalloc" }
|
||||||
serai-db = { path = "../common/db" }
|
serai-db = { path = "../common/db" }
|
||||||
@@ -44,6 +43,9 @@ tributary-sdk = { path = "./tributary-sdk" }
|
|||||||
|
|
||||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||||
|
|
||||||
|
|||||||
@@ -24,6 +24,15 @@ pub(crate) struct CosignDelayTask<D: Db> {
|
|||||||
pub(crate) db: D,
|
pub(crate) db: D,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct AwaitUndroppable<T: DbTxn>(Option<core::mem::ManuallyDrop<Undroppable<T>>>);
|
||||||
|
impl<T: DbTxn> Drop for AwaitUndroppable<T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Some(mut txn) = self.0.take() {
|
||||||
|
(unsafe { core::mem::ManuallyDrop::take(&mut txn) }).close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
||||||
type Error = DoesNotError;
|
type Error = DoesNotError;
|
||||||
|
|
||||||
@@ -35,14 +44,18 @@ impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
|||||||
|
|
||||||
// Receive the next block to mark as cosigned
|
// Receive the next block to mark as cosigned
|
||||||
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
||||||
|
txn.close();
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Calculate when we should mark it as valid
|
// Calculate when we should mark it as valid
|
||||||
let time_valid =
|
let time_valid =
|
||||||
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
||||||
// Sleep until then
|
// Sleep until then
|
||||||
|
let mut txn = AwaitUndroppable(Some(core::mem::ManuallyDrop::new(txn)));
|
||||||
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
||||||
.await;
|
.await;
|
||||||
|
let mut txn = core::mem::ManuallyDrop::into_inner(txn.0.take().unwrap());
|
||||||
|
|
||||||
// Set the cosigned block
|
// Set the cosigned block
|
||||||
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::time::{Duration, Instant, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
@@ -77,27 +77,17 @@ pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u
|
|||||||
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
||||||
pub(crate) db: D,
|
pub(crate) db: D,
|
||||||
pub(crate) request: R,
|
pub(crate) request: R,
|
||||||
pub(crate) last_request_for_cosigns: Instant,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
||||||
type Error = String;
|
type Error = String;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
let should_request_cosigns = |last_request_for_cosigns: &mut Instant| {
|
|
||||||
const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(60);
|
|
||||||
if Instant::now() < (*last_request_for_cosigns + REQUEST_COSIGNS_SPACING) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
*last_request_for_cosigns = Instant::now();
|
|
||||||
true
|
|
||||||
};
|
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
let mut known_cosign = None;
|
let mut known_cosign = None;
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.unsafe_txn();
|
||||||
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
||||||
else {
|
else {
|
||||||
break;
|
break;
|
||||||
@@ -128,13 +118,12 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
|
|||||||
// Check if the sum weight doesn't cross the required threshold
|
// Check if the sum weight doesn't cross the required threshold
|
||||||
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||||
// Request the necessary cosigns over the network
|
// Request the necessary cosigns over the network
|
||||||
if should_request_cosigns(&mut self.last_request_for_cosigns) {
|
// TODO: Add a timer to ensure this isn't called too often
|
||||||
self
|
self
|
||||||
.request
|
.request
|
||||||
.request_notable_cosigns(global_session)
|
.request_notable_cosigns(global_session)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?;
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
}
|
|
||||||
// We return an error so the delay before this task is run again increases
|
// We return an error so the delay before this task is run again increases
|
||||||
return Err(format!(
|
return Err(format!(
|
||||||
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||||
@@ -191,13 +180,11 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
|
|||||||
// If this session hasn't yet produced notable cosigns, then we presume we'll see
|
// If this session hasn't yet produced notable cosigns, then we presume we'll see
|
||||||
// the desired non-notable cosigns as part of normal operations, without needing to
|
// the desired non-notable cosigns as part of normal operations, without needing to
|
||||||
// explicitly request them
|
// explicitly request them
|
||||||
if should_request_cosigns(&mut self.last_request_for_cosigns) {
|
self
|
||||||
self
|
.request
|
||||||
.request
|
.request_notable_cosigns(global_session)
|
||||||
.request_notable_cosigns(global_session)
|
.await
|
||||||
.await
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
.map_err(|e| format!("{e:?}"))?;
|
|
||||||
}
|
|
||||||
// We return an error so the delay before this task is run again increases
|
// We return an error so the delay before this task is run again increases
|
||||||
return Err(format!(
|
return Err(format!(
|
||||||
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
||||||
|
|
||||||
for block_number in start_block_number ..= latest_block_number {
|
for block_number in start_block_number ..= latest_block_number {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.unsafe_txn();
|
||||||
|
|
||||||
let (block, mut has_events) =
|
let (block, mut has_events) =
|
||||||
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use core::{fmt::Debug, future::Future};
|
use core::{fmt::Debug, future::Future};
|
||||||
use std::{sync::Arc, collections::HashMap, time::Instant};
|
use std::{sync::Arc, collections::HashMap};
|
||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
@@ -288,12 +288,8 @@ impl<D: Db> Cosigning<D> {
|
|||||||
.continually_run(intend_task, vec![evaluator_task_handle]),
|
.continually_run(intend_task, vec![evaluator_task_handle]),
|
||||||
);
|
);
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
(evaluator::CosignEvaluatorTask {
|
(evaluator::CosignEvaluatorTask { db: db.clone(), request })
|
||||||
db: db.clone(),
|
.continually_run(evaluator_task, vec![delay_task_handle]),
|
||||||
request,
|
|
||||||
last_request_for_cosigns: Instant::now(),
|
|
||||||
})
|
|
||||||
.continually_run(evaluator_task, vec![delay_task_handle]),
|
|
||||||
);
|
);
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
(delay::CosignDelayTask { db: db.clone() })
|
(delay::CosignDelayTask { db: db.clone() })
|
||||||
@@ -428,7 +424,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
||||||
// cosign
|
// cosign
|
||||||
|
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.unsafe_txn();
|
||||||
|
|
||||||
if !faulty {
|
if !faulty {
|
||||||
// If this is for a future global session, we don't acknowledge this cosign at this time
|
// If this is for a future global session, we don't acknowledge this cosign at this time
|
||||||
@@ -484,3 +480,30 @@ impl<D: Db> Cosigning<D> {
|
|||||||
res
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
struct RNC;
|
||||||
|
impl RequestNotableCosigns for RNC {
|
||||||
|
/// The error type which may be encountered when requesting notable cosigns.
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
/// Request the notable cosigns for this global session.
|
||||||
|
fn request_notable_cosigns(
|
||||||
|
&self,
|
||||||
|
global_session: [u8; 32],
|
||||||
|
) -> impl Send + Future<Output = Result<(), Self::Error>> {
|
||||||
|
async move { Ok(()) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test() {
|
||||||
|
let db: serai_db::MemDb = serai_db::MemDb::new();
|
||||||
|
let serai = unsafe { core::mem::transmute(0u64) };
|
||||||
|
let request = RNC;
|
||||||
|
let tasks = vec![];
|
||||||
|
let _ = Cosigning::spawn(db, serai, request, tasks);
|
||||||
|
core::future::pending().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -51,14 +51,6 @@ impl Validators {
|
|||||||
serai: impl Borrow<Serai>,
|
serai: impl Borrow<Serai>,
|
||||||
sessions: impl Borrow<HashMap<NetworkId, Session>>,
|
sessions: impl Borrow<HashMap<NetworkId, Session>>,
|
||||||
) -> Result<Vec<(NetworkId, Session, HashSet<PeerId>)>, SeraiError> {
|
) -> Result<Vec<(NetworkId, Session, HashSet<PeerId>)>, SeraiError> {
|
||||||
/*
|
|
||||||
This uses the latest finalized block, not the latest cosigned block, which should be fine as
|
|
||||||
in the worst case, we'd connect to unexpected validators. They still shouldn't be able to
|
|
||||||
bypass the cosign protocol unless a historical global session was malicious, in which case
|
|
||||||
the cosign protocol already breaks.
|
|
||||||
|
|
||||||
Besides, we can't connect to historical validators, only the current validators.
|
|
||||||
*/
|
|
||||||
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
|
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
|
||||||
let temporal_serai = temporal_serai.validator_sets();
|
let temporal_serai = temporal_serai.validator_sets();
|
||||||
|
|
||||||
|
|||||||
@@ -3,11 +3,9 @@ use std::{path::Path, fs};
|
|||||||
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait};
|
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait};
|
||||||
use serai_db::{create_db, db_channel};
|
use serai_db::{create_db, db_channel};
|
||||||
|
|
||||||
use dkg::Participant;
|
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::NetworkId,
|
primitives::NetworkId,
|
||||||
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
validator_sets::primitives::{Session, ValidatorSet},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_cosign::SignedCosign;
|
use serai_cosign::SignedCosign;
|
||||||
@@ -15,7 +13,7 @@ use serai_coordinator_substrate::NewSetInformation;
|
|||||||
use serai_coordinator_tributary::Transaction;
|
use serai_coordinator_tributary::Transaction;
|
||||||
|
|
||||||
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||||
pub(crate) type Db = std::sync::Arc<serai_db::ParityDb>;
|
pub(crate) type Db = serai_db::ParityDb;
|
||||||
#[cfg(feature = "rocksdb")]
|
#[cfg(feature = "rocksdb")]
|
||||||
pub(crate) type Db = serai_db::RocksDB;
|
pub(crate) type Db = serai_db::RocksDB;
|
||||||
|
|
||||||
@@ -78,10 +76,6 @@ create_db! {
|
|||||||
LastProcessorMessage: (network: NetworkId) -> u64,
|
LastProcessorMessage: (network: NetworkId) -> u64,
|
||||||
// Cosigns we produced and tried to intake yet incurred an error while doing so
|
// Cosigns we produced and tried to intake yet incurred an error while doing so
|
||||||
ErroneousCosigns: () -> Vec<SignedCosign>,
|
ErroneousCosigns: () -> Vec<SignedCosign>,
|
||||||
// The keys to confirm and set on the Serai network
|
|
||||||
KeysToConfirm: (set: ValidatorSet) -> KeyPair,
|
|
||||||
// The key was set on the Serai network
|
|
||||||
KeySet: (set: ValidatorSet) -> (),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -99,51 +93,21 @@ mod _internal_db {
|
|||||||
|
|
||||||
db_channel! {
|
db_channel! {
|
||||||
Coordinator {
|
Coordinator {
|
||||||
// Tributary transactions to publish from the Processor messages
|
// Tributary transactions to publish
|
||||||
TributaryTransactionsFromProcessorMessages: (set: ValidatorSet) -> Transaction,
|
TributaryTransactions: (set: ValidatorSet) -> Transaction,
|
||||||
// Tributary transactions to publish from the DKG confirmation task
|
|
||||||
TributaryTransactionsFromDkgConfirmation: (set: ValidatorSet) -> Transaction,
|
|
||||||
// Participants to remove
|
|
||||||
RemoveParticipant: (set: ValidatorSet) -> Participant,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct TributaryTransactionsFromProcessorMessages;
|
pub(crate) struct TributaryTransactions;
|
||||||
impl TributaryTransactionsFromProcessorMessages {
|
impl TributaryTransactions {
|
||||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, tx: &Transaction) {
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, tx: &Transaction) {
|
||||||
// If this set has yet to be retired, send this transaction
|
// If this set has yet to be retired, send this transaction
|
||||||
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||||
_internal_db::TributaryTransactionsFromProcessorMessages::send(txn, set, tx);
|
_internal_db::TributaryTransactions::send(txn, set, tx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<Transaction> {
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<Transaction> {
|
||||||
_internal_db::TributaryTransactionsFromProcessorMessages::try_recv(txn, set)
|
_internal_db::TributaryTransactions::try_recv(txn, set)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct TributaryTransactionsFromDkgConfirmation;
|
|
||||||
impl TributaryTransactionsFromDkgConfirmation {
|
|
||||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, tx: &Transaction) {
|
|
||||||
// If this set has yet to be retired, send this transaction
|
|
||||||
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
|
||||||
_internal_db::TributaryTransactionsFromDkgConfirmation::send(txn, set, tx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<Transaction> {
|
|
||||||
_internal_db::TributaryTransactionsFromDkgConfirmation::try_recv(txn, set)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct RemoveParticipant;
|
|
||||||
impl RemoveParticipant {
|
|
||||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, participant: Participant) {
|
|
||||||
// If this set has yet to be retired, send this transaction
|
|
||||||
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
|
||||||
_internal_db::RemoveParticipant::send(txn, set, &participant);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<Participant> {
|
|
||||||
_internal_db::RemoveParticipant::try_recv(txn, set)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,434 +0,0 @@
|
|||||||
use core::{ops::Deref, future::Future};
|
|
||||||
use std::{boxed::Box, collections::HashMap};
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
use rand_core::OsRng;
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
|
||||||
use frost_schnorrkel::{
|
|
||||||
frost::{
|
|
||||||
dkg::{Participant, musig::musig},
|
|
||||||
FrostError,
|
|
||||||
sign::*,
|
|
||||||
},
|
|
||||||
Schnorrkel,
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db as DbTrait};
|
|
||||||
|
|
||||||
use serai_client::{
|
|
||||||
primitives::SeraiAddress,
|
|
||||||
validator_sets::primitives::{ValidatorSet, musig_context, set_keys_message},
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_task::{DoesNotError, ContinuallyRan};
|
|
||||||
|
|
||||||
use serai_coordinator_substrate::{NewSetInformation, Keys};
|
|
||||||
use serai_coordinator_tributary::{Transaction, DkgConfirmationMessages};
|
|
||||||
|
|
||||||
use crate::{KeysToConfirm, KeySet, TributaryTransactionsFromDkgConfirmation};
|
|
||||||
|
|
||||||
fn schnorrkel() -> Schnorrkel {
|
|
||||||
Schnorrkel::new(b"substrate") // TODO: Pull the constant for this
|
|
||||||
}
|
|
||||||
|
|
||||||
fn our_i(
|
|
||||||
set: &NewSetInformation,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
data: &HashMap<Participant, Vec<u8>>,
|
|
||||||
) -> Participant {
|
|
||||||
let public = SeraiAddress((Ristretto::generator() * key.deref()).to_bytes());
|
|
||||||
|
|
||||||
let mut our_i = None;
|
|
||||||
for participant in data.keys() {
|
|
||||||
let validator_index = usize::from(u16::from(*participant) - 1);
|
|
||||||
let (validator, _weight) = set.validators[validator_index];
|
|
||||||
if validator == public {
|
|
||||||
our_i = Some(*participant);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
our_i.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take a HashMap of participations with non-contiguous Participants and convert them to a
|
|
||||||
// contiguous sequence.
|
|
||||||
//
|
|
||||||
// The input data is expected to not include our own data, which also won't be in the output data.
|
|
||||||
//
|
|
||||||
// Returns the mapping from the contiguous Participants to the original Participants.
|
|
||||||
fn make_contiguous<T>(
|
|
||||||
our_i: Participant,
|
|
||||||
mut data: HashMap<Participant, Vec<u8>>,
|
|
||||||
transform: impl Fn(Vec<u8>) -> std::io::Result<T>,
|
|
||||||
) -> Result<HashMap<Participant, T>, Participant> {
|
|
||||||
assert!(!data.contains_key(&our_i));
|
|
||||||
|
|
||||||
let mut ordered_participants = data.keys().copied().collect::<Vec<_>>();
|
|
||||||
ordered_participants.sort_by_key(|participant| u16::from(*participant));
|
|
||||||
|
|
||||||
let mut our_i = Some(our_i);
|
|
||||||
let mut contiguous = HashMap::new();
|
|
||||||
let mut i = 1;
|
|
||||||
for participant in ordered_participants {
|
|
||||||
// If this is the first participant after our own index, increment to account for our index
|
|
||||||
if let Some(our_i_value) = our_i {
|
|
||||||
if u16::from(participant) > u16::from(our_i_value) {
|
|
||||||
i += 1;
|
|
||||||
our_i = None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let contiguous_index = Participant::new(i).unwrap();
|
|
||||||
let data = match transform(data.remove(&participant).unwrap()) {
|
|
||||||
Ok(data) => data,
|
|
||||||
Err(_) => Err(participant)?,
|
|
||||||
};
|
|
||||||
contiguous.insert(contiguous_index, data);
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
Ok(contiguous)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_frost_error<T>(result: Result<T, FrostError>) -> Result<T, Participant> {
|
|
||||||
match &result {
|
|
||||||
Ok(_) => Ok(result.unwrap()),
|
|
||||||
Err(FrostError::InvalidPreprocess(participant) | FrostError::InvalidShare(participant)) => {
|
|
||||||
Err(*participant)
|
|
||||||
}
|
|
||||||
// All of these should be unreachable
|
|
||||||
Err(
|
|
||||||
FrostError::InternalError(_) |
|
|
||||||
FrostError::InvalidParticipant(_, _) |
|
|
||||||
FrostError::InvalidSigningSet(_) |
|
|
||||||
FrostError::InvalidParticipantQuantity(_, _) |
|
|
||||||
FrostError::DuplicatedParticipant(_) |
|
|
||||||
FrostError::MissingParticipant(_),
|
|
||||||
) => {
|
|
||||||
result.unwrap();
|
|
||||||
unreachable!("continued execution after unwrapping Result::Err");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
enum Signer {
|
|
||||||
Preprocess { attempt: u32, seed: CachedPreprocess, preprocess: [u8; 64] },
|
|
||||||
Share {
|
|
||||||
attempt: u32,
|
|
||||||
musig_validators: Vec<SeraiAddress>,
|
|
||||||
share: [u8; 32],
|
|
||||||
machine: Box<AlgorithmSignatureMachine<Ristretto, Schnorrkel>>,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Performs the DKG Confirmation protocol.
|
|
||||||
pub(crate) struct ConfirmDkgTask<CD: DbTrait, TD: DbTrait> {
|
|
||||||
db: CD,
|
|
||||||
|
|
||||||
set: NewSetInformation,
|
|
||||||
tributary_db: TD,
|
|
||||||
|
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
signer: Option<Signer>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<CD: DbTrait, TD: DbTrait> ConfirmDkgTask<CD, TD> {
|
|
||||||
pub(crate) fn new(
|
|
||||||
db: CD,
|
|
||||||
set: NewSetInformation,
|
|
||||||
tributary_db: TD,
|
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
) -> Self {
|
|
||||||
Self { db, set, tributary_db, key, signer: None }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn slash(db: &mut CD, set: ValidatorSet, validator: SeraiAddress) {
|
|
||||||
let mut txn = db.txn();
|
|
||||||
TributaryTransactionsFromDkgConfirmation::send(
|
|
||||||
&mut txn,
|
|
||||||
set,
|
|
||||||
&Transaction::RemoveParticipant { participant: validator, signed: Default::default() },
|
|
||||||
);
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn preprocess(
|
|
||||||
db: &mut CD,
|
|
||||||
set: ValidatorSet,
|
|
||||||
attempt: u32,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
signer: &mut Option<Signer>,
|
|
||||||
) {
|
|
||||||
// Perform the preprocess
|
|
||||||
let (machine, preprocess) = AlgorithmMachine::new(
|
|
||||||
schnorrkel(),
|
|
||||||
// We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet
|
|
||||||
musig(&musig_context(set), key, &[Ristretto::generator() * key.deref()]).unwrap().into(),
|
|
||||||
)
|
|
||||||
.preprocess(&mut OsRng);
|
|
||||||
// We take the preprocess so we can use it in a distinct machine with the actual Musig
|
|
||||||
// parameters
|
|
||||||
let seed = machine.cache();
|
|
||||||
|
|
||||||
let mut preprocess_bytes = [0u8; 64];
|
|
||||||
preprocess_bytes.copy_from_slice(&preprocess.serialize());
|
|
||||||
let preprocess = preprocess_bytes;
|
|
||||||
|
|
||||||
let mut txn = db.txn();
|
|
||||||
// If this attempt has already been preprocessed for, the Tributary will de-duplicate it
|
|
||||||
// This may mean the Tributary preprocess is distinct from ours, but we check for that later
|
|
||||||
TributaryTransactionsFromDkgConfirmation::send(
|
|
||||||
&mut txn,
|
|
||||||
set,
|
|
||||||
&Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed: Default::default() },
|
|
||||||
);
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
*signer = Some(Signer::Preprocess { attempt, seed, preprocess });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
|
|
||||||
type Error = DoesNotError;
|
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
|
||||||
async move {
|
|
||||||
let mut made_progress = false;
|
|
||||||
|
|
||||||
// If we were sent a key to set, create the signer for it
|
|
||||||
if self.signer.is_none() && KeysToConfirm::get(&self.db, self.set.set).is_some() {
|
|
||||||
// Create and publish the initial preprocess
|
|
||||||
Self::preprocess(&mut self.db, self.set.set, 0, &self.key, &mut self.signer);
|
|
||||||
|
|
||||||
made_progress = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have keys to confirm, handle all messages from the tributary
|
|
||||||
if let Some(key_pair) = KeysToConfirm::get(&self.db, self.set.set) {
|
|
||||||
// Handle all messages from the Tributary
|
|
||||||
loop {
|
|
||||||
let mut tributary_txn = self.tributary_db.txn();
|
|
||||||
let Some(msg) = DkgConfirmationMessages::try_recv(&mut tributary_txn, self.set.set)
|
|
||||||
else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
|
|
||||||
match msg {
|
|
||||||
messages::sign::CoordinatorMessage::Reattempt {
|
|
||||||
id: messages::sign::SignId { attempt, .. },
|
|
||||||
} => {
|
|
||||||
// Create and publish the preprocess for the specified attempt
|
|
||||||
Self::preprocess(&mut self.db, self.set.set, attempt, &self.key, &mut self.signer);
|
|
||||||
}
|
|
||||||
messages::sign::CoordinatorMessage::Preprocesses {
|
|
||||||
id: messages::sign::SignId { attempt, .. },
|
|
||||||
mut preprocesses,
|
|
||||||
} => {
|
|
||||||
// Confirm the preprocess we're expected to sign with is the one we locally have
|
|
||||||
// It may be different if we rebooted and made a second preprocess for this attempt
|
|
||||||
let Some(Signer::Preprocess { attempt: our_attempt, seed, preprocess }) =
|
|
||||||
self.signer.take()
|
|
||||||
else {
|
|
||||||
// If this message is not expected, commit the txn to drop it and move on
|
|
||||||
// At some point, we'll get a Reattempt and reset
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Determine the MuSig key signed with
|
|
||||||
let musig_validators = {
|
|
||||||
let mut ordered_participants = preprocesses.keys().copied().collect::<Vec<_>>();
|
|
||||||
ordered_participants.sort_by_key(|participant| u16::from(*participant));
|
|
||||||
|
|
||||||
let mut res = vec![];
|
|
||||||
for participant in ordered_participants {
|
|
||||||
let (validator, _weight) =
|
|
||||||
self.set.validators[usize::from(u16::from(participant) - 1)];
|
|
||||||
res.push(validator);
|
|
||||||
}
|
|
||||||
res
|
|
||||||
};
|
|
||||||
|
|
||||||
let musig_public_keys = musig_validators
|
|
||||||
.iter()
|
|
||||||
.map(|key| {
|
|
||||||
Ristretto::read_G(&mut key.0.as_slice())
|
|
||||||
.expect("Serai validator had invalid public key")
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let keys =
|
|
||||||
musig(&musig_context(self.set.set), &self.key, &musig_public_keys).unwrap().into();
|
|
||||||
|
|
||||||
// Rebuild the machine
|
|
||||||
let (machine, preprocess_from_cache) =
|
|
||||||
AlgorithmSignMachine::from_cache(schnorrkel(), keys, seed);
|
|
||||||
assert_eq!(preprocess.as_slice(), preprocess_from_cache.serialize().as_slice());
|
|
||||||
|
|
||||||
// Ensure this is a consistent signing session
|
|
||||||
let our_i = our_i(&self.set, &self.key, &preprocesses);
|
|
||||||
let consistent = (attempt == our_attempt) &&
|
|
||||||
(preprocesses.remove(&our_i).unwrap().as_slice() == preprocess.as_slice());
|
|
||||||
if !consistent {
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reformat the preprocesses into the expected format for Musig
|
|
||||||
let preprocesses = match make_contiguous(our_i, preprocesses, |preprocess| {
|
|
||||||
machine.read_preprocess(&mut preprocess.as_slice())
|
|
||||||
}) {
|
|
||||||
Ok(preprocesses) => preprocesses,
|
|
||||||
// This yields the *original participant index*
|
|
||||||
Err(participant) => {
|
|
||||||
Self::slash(
|
|
||||||
&mut self.db,
|
|
||||||
self.set.set,
|
|
||||||
self.set.validators[usize::from(u16::from(participant) - 1)].0,
|
|
||||||
);
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Calculate our share
|
|
||||||
let (machine, share) = match handle_frost_error(
|
|
||||||
machine.sign(preprocesses, &set_keys_message(&self.set.set, &key_pair)),
|
|
||||||
) {
|
|
||||||
Ok((machine, share)) => (machine, share),
|
|
||||||
// This yields the *musig participant index*
|
|
||||||
Err(participant) => {
|
|
||||||
Self::slash(
|
|
||||||
&mut self.db,
|
|
||||||
self.set.set,
|
|
||||||
musig_validators[usize::from(u16::from(participant) - 1)],
|
|
||||||
);
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Send our share
|
|
||||||
let share = <[u8; 32]>::try_from(share.serialize()).unwrap();
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
TributaryTransactionsFromDkgConfirmation::send(
|
|
||||||
&mut txn,
|
|
||||||
self.set.set,
|
|
||||||
&Transaction::DkgConfirmationShare { attempt, share, signed: Default::default() },
|
|
||||||
);
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
self.signer = Some(Signer::Share {
|
|
||||||
attempt,
|
|
||||||
musig_validators,
|
|
||||||
share,
|
|
||||||
machine: Box::new(machine),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
messages::sign::CoordinatorMessage::Shares {
|
|
||||||
id: messages::sign::SignId { attempt, .. },
|
|
||||||
mut shares,
|
|
||||||
} => {
|
|
||||||
let Some(Signer::Share { attempt: our_attempt, musig_validators, share, machine }) =
|
|
||||||
self.signer.take()
|
|
||||||
else {
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Ensure this is a consistent signing session
|
|
||||||
let our_i = our_i(&self.set, &self.key, &shares);
|
|
||||||
let consistent = (attempt == our_attempt) &&
|
|
||||||
(shares.remove(&our_i).unwrap().as_slice() == share.as_slice());
|
|
||||||
if !consistent {
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reformat the shares into the expected format for Musig
|
|
||||||
let shares = match make_contiguous(our_i, shares, |share| {
|
|
||||||
machine.read_share(&mut share.as_slice())
|
|
||||||
}) {
|
|
||||||
Ok(shares) => shares,
|
|
||||||
// This yields the *original participant index*
|
|
||||||
Err(participant) => {
|
|
||||||
Self::slash(
|
|
||||||
&mut self.db,
|
|
||||||
self.set.set,
|
|
||||||
self.set.validators[usize::from(u16::from(participant) - 1)].0,
|
|
||||||
);
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
match handle_frost_error(machine.complete(shares)) {
|
|
||||||
Ok(signature) => {
|
|
||||||
// Create the bitvec of the participants
|
|
||||||
let mut signature_participants;
|
|
||||||
{
|
|
||||||
use bitvec::prelude::*;
|
|
||||||
signature_participants = bitvec![u8, Lsb0; 0; 0];
|
|
||||||
let mut i = 0;
|
|
||||||
for (validator, _) in &self.set.validators {
|
|
||||||
if Some(validator) == musig_validators.get(i) {
|
|
||||||
signature_participants.push(true);
|
|
||||||
i += 1;
|
|
||||||
} else {
|
|
||||||
signature_participants.push(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is safe to call multiple times as it'll just change which *valid*
|
|
||||||
// signature to publish
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
Keys::set(
|
|
||||||
&mut txn,
|
|
||||||
self.set.set,
|
|
||||||
key_pair.clone(),
|
|
||||||
signature_participants,
|
|
||||||
signature.into(),
|
|
||||||
);
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
// This yields the *musig participant index*
|
|
||||||
Err(participant) => {
|
|
||||||
Self::slash(
|
|
||||||
&mut self.db,
|
|
||||||
self.set.set,
|
|
||||||
musig_validators[usize::from(u16::from(participant) - 1)],
|
|
||||||
);
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Because we successfully handled this message, note we made proress
|
|
||||||
made_progress = true;
|
|
||||||
tributary_txn.commit();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the key has been set on Serai
|
|
||||||
if KeysToConfirm::get(&self.db, self.set.set).is_some() &&
|
|
||||||
KeySet::get(&self.db, self.set.set).is_some()
|
|
||||||
{
|
|
||||||
// Take the keys to confirm so we never instantiate the signer again
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
KeysToConfirm::take(&mut txn, self.set.set);
|
|
||||||
KeySet::take(&mut txn, self.set.set);
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
// Drop our own signer
|
|
||||||
// The task won't die until the Tributary does, but now it'll never do anything again
|
|
||||||
self.signer = None;
|
|
||||||
|
|
||||||
made_progress = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(made_progress)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -14,8 +14,8 @@ use borsh::BorshDeserialize;
|
|||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{NetworkId, PublicKey, SeraiAddress, Signature},
|
primitives::{NetworkId, PublicKey},
|
||||||
validator_sets::primitives::{ValidatorSet, KeyPair},
|
validator_sets::primitives::ValidatorSet,
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
use message_queue::{Service, client::MessageQueue};
|
use message_queue::{Service, client::MessageQueue};
|
||||||
@@ -23,17 +23,13 @@ use message_queue::{Service, client::MessageQueue};
|
|||||||
use serai_task::{Task, TaskHandle, ContinuallyRan};
|
use serai_task::{Task, TaskHandle, ContinuallyRan};
|
||||||
|
|
||||||
use serai_cosign::{Faulted, SignedCosign, Cosigning};
|
use serai_cosign::{Faulted, SignedCosign, Cosigning};
|
||||||
use serai_coordinator_substrate::{
|
use serai_coordinator_substrate::{CanonicalEventStream, EphemeralEventStream, SignSlashReport};
|
||||||
CanonicalEventStream, EphemeralEventStream, SignSlashReport, SetKeysTask, SignedBatches,
|
use serai_coordinator_tributary::{Signed, Transaction, SubstrateBlockPlans};
|
||||||
PublishBatchTask, SlashReports, PublishSlashReportTask,
|
|
||||||
};
|
|
||||||
use serai_coordinator_tributary::{SigningProtocolRound, Signed, Transaction, SubstrateBlockPlans};
|
|
||||||
|
|
||||||
mod db;
|
mod db;
|
||||||
use db::*;
|
use db::*;
|
||||||
|
|
||||||
mod tributary;
|
mod tributary;
|
||||||
mod dkg_confirmation;
|
|
||||||
|
|
||||||
mod substrate;
|
mod substrate;
|
||||||
use substrate::SubstrateTask;
|
use substrate::SubstrateTask;
|
||||||
@@ -149,25 +145,11 @@ fn spawn_cosigning<D: serai_db::Db>(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_network(
|
async fn handle_processor_messages(
|
||||||
mut db: impl serai_db::Db,
|
mut db: impl serai_db::Db,
|
||||||
message_queue: Arc<MessageQueue>,
|
message_queue: Arc<MessageQueue>,
|
||||||
serai: Arc<Serai>,
|
|
||||||
network: NetworkId,
|
network: NetworkId,
|
||||||
) {
|
) {
|
||||||
// Spawn the task to publish batches for this network
|
|
||||||
{
|
|
||||||
let (publish_batch_task_def, publish_batch_task) = Task::new();
|
|
||||||
tokio::spawn(
|
|
||||||
PublishBatchTask::new(db.clone(), serai.clone(), network)
|
|
||||||
.unwrap()
|
|
||||||
.continually_run(publish_batch_task_def, vec![]),
|
|
||||||
);
|
|
||||||
// Forget its handle so it always runs in the background
|
|
||||||
core::mem::forget(publish_batch_task);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle Processor messages
|
|
||||||
loop {
|
loop {
|
||||||
let (msg_id, msg) = {
|
let (msg_id, msg) = {
|
||||||
let msg = message_queue.next(Service::Processor(network)).await;
|
let msg = message_queue.next(Service::Processor(network)).await;
|
||||||
@@ -198,7 +180,7 @@ async fn handle_network(
|
|||||||
messages::ProcessorMessage::KeyGen(msg) => match msg {
|
messages::ProcessorMessage::KeyGen(msg) => match msg {
|
||||||
messages::key_gen::ProcessorMessage::Participation { session, participation } => {
|
messages::key_gen::ProcessorMessage::Participation { session, participation } => {
|
||||||
let set = ValidatorSet { network, session };
|
let set = ValidatorSet { network, session };
|
||||||
TributaryTransactionsFromProcessorMessages::send(
|
TributaryTransactions::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set,
|
set,
|
||||||
&Transaction::DkgParticipation { participation, signed: Signed::default() },
|
&Transaction::DkgParticipation { participation, signed: Signed::default() },
|
||||||
@@ -208,84 +190,45 @@ async fn handle_network(
|
|||||||
session,
|
session,
|
||||||
substrate_key,
|
substrate_key,
|
||||||
network_key,
|
network_key,
|
||||||
} => {
|
} => todo!("TODO Transaction::DkgConfirmationPreprocess"),
|
||||||
KeysToConfirm::set(
|
|
||||||
&mut txn,
|
|
||||||
ValidatorSet { network, session },
|
|
||||||
&KeyPair(
|
|
||||||
PublicKey::from_raw(substrate_key),
|
|
||||||
network_key
|
|
||||||
.try_into()
|
|
||||||
.expect("generated a network key which exceeds the maximum key length"),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
messages::key_gen::ProcessorMessage::Blame { session, participant } => {
|
messages::key_gen::ProcessorMessage::Blame { session, participant } => {
|
||||||
RemoveParticipant::send(&mut txn, ValidatorSet { network, session }, participant);
|
let set = ValidatorSet { network, session };
|
||||||
|
TributaryTransactions::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::RemoveParticipant {
|
||||||
|
participant: todo!("TODO"),
|
||||||
|
signed: Signed::default(),
|
||||||
|
},
|
||||||
|
);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
messages::ProcessorMessage::Sign(msg) => match msg {
|
messages::ProcessorMessage::Sign(msg) => match msg {
|
||||||
messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => {
|
messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => {
|
||||||
RemoveParticipant::send(&mut txn, ValidatorSet { network, session }, participant);
|
let set = ValidatorSet { network, session };
|
||||||
|
TributaryTransactions::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::RemoveParticipant {
|
||||||
|
participant: todo!("TODO"),
|
||||||
|
signed: Signed::default(),
|
||||||
|
},
|
||||||
|
);
|
||||||
}
|
}
|
||||||
messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => {
|
messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => {
|
||||||
let set = ValidatorSet { network, session: id.session };
|
todo!("TODO Transaction::Batch + Transaction::Sign")
|
||||||
if id.attempt == 0 {
|
|
||||||
// Batches are declared by their intent to be signed
|
|
||||||
if let messages::sign::VariantSignId::Batch(hash) = id.id {
|
|
||||||
TributaryTransactionsFromProcessorMessages::send(
|
|
||||||
&mut txn,
|
|
||||||
set,
|
|
||||||
&Transaction::Batch { hash },
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TributaryTransactionsFromProcessorMessages::send(
|
|
||||||
&mut txn,
|
|
||||||
set,
|
|
||||||
&Transaction::Sign {
|
|
||||||
id: id.id,
|
|
||||||
attempt: id.attempt,
|
|
||||||
round: SigningProtocolRound::Preprocess,
|
|
||||||
data: preprocesses,
|
|
||||||
signed: Signed::default(),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
messages::sign::ProcessorMessage::Shares { id, shares } => {
|
|
||||||
let set = ValidatorSet { network, session: id.session };
|
|
||||||
TributaryTransactionsFromProcessorMessages::send(
|
|
||||||
&mut txn,
|
|
||||||
set,
|
|
||||||
&Transaction::Sign {
|
|
||||||
id: id.id,
|
|
||||||
attempt: id.attempt,
|
|
||||||
round: SigningProtocolRound::Share,
|
|
||||||
data: shares,
|
|
||||||
signed: Signed::default(),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
messages::sign::ProcessorMessage::Shares { id, shares } => todo!("TODO Transaction::Sign"),
|
||||||
},
|
},
|
||||||
messages::ProcessorMessage::Coordinator(msg) => match msg {
|
messages::ProcessorMessage::Coordinator(msg) => match msg {
|
||||||
messages::coordinator::ProcessorMessage::CosignedBlock { cosign } => {
|
messages::coordinator::ProcessorMessage::CosignedBlock { cosign } => {
|
||||||
SignedCosigns::send(&mut txn, &cosign);
|
SignedCosigns::send(&mut txn, &cosign);
|
||||||
}
|
}
|
||||||
messages::coordinator::ProcessorMessage::SignedBatch { batch } => {
|
messages::coordinator::ProcessorMessage::SignedBatch { batch } => {
|
||||||
SignedBatches::send(&mut txn, &batch);
|
todo!("TODO PublishBatchTask")
|
||||||
}
|
}
|
||||||
messages::coordinator::ProcessorMessage::SignedSlashReport {
|
messages::coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
|
||||||
session,
|
todo!("TODO PublishSlashReportTask")
|
||||||
slash_report,
|
|
||||||
signature,
|
|
||||||
} => {
|
|
||||||
SlashReports::set(
|
|
||||||
&mut txn,
|
|
||||||
ValidatorSet { network, session },
|
|
||||||
slash_report,
|
|
||||||
Signature(signature),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
messages::ProcessorMessage::Substrate(msg) => match msg {
|
messages::ProcessorMessage::Substrate(msg) => match msg {
|
||||||
@@ -300,7 +243,7 @@ async fn handle_network(
|
|||||||
for (session, plans) in by_session {
|
for (session, plans) in by_session {
|
||||||
let set = ValidatorSet { network, session };
|
let set = ValidatorSet { network, session };
|
||||||
SubstrateBlockPlans::set(&mut txn, set, block, &plans);
|
SubstrateBlockPlans::set(&mut txn, set, block, &plans);
|
||||||
TributaryTransactionsFromProcessorMessages::send(
|
TributaryTransactions::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set,
|
set,
|
||||||
&Transaction::SubstrateBlock { hash: block },
|
&Transaction::SubstrateBlock { hash: block },
|
||||||
@@ -366,16 +309,10 @@ async fn main() {
|
|||||||
// Cleanup all historic Tributaries
|
// Cleanup all historic Tributaries
|
||||||
while let Some(to_cleanup) = TributaryCleanup::try_recv(&mut txn) {
|
while let Some(to_cleanup) = TributaryCleanup::try_recv(&mut txn) {
|
||||||
prune_tributary_db(to_cleanup);
|
prune_tributary_db(to_cleanup);
|
||||||
// Remove the keys to confirm for this network
|
|
||||||
KeysToConfirm::take(&mut txn, to_cleanup);
|
|
||||||
KeySet::take(&mut txn, to_cleanup);
|
|
||||||
// Drain the cosign intents created for this set
|
// Drain the cosign intents created for this set
|
||||||
while !Cosigning::<Db>::intended_cosigns(&mut txn, to_cleanup).is_empty() {}
|
while !Cosigning::<Db>::intended_cosigns(&mut txn, to_cleanup).is_empty() {}
|
||||||
// Drain the transactions to publish for this set
|
// Drain the transactions to publish for this set
|
||||||
while TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, to_cleanup).is_some() {}
|
while TributaryTransactions::try_recv(&mut txn, to_cleanup).is_some() {}
|
||||||
while TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, to_cleanup).is_some() {}
|
|
||||||
// Drain the participants to remove for this set
|
|
||||||
while RemoveParticipant::try_recv(&mut txn, to_cleanup).is_some() {}
|
|
||||||
// Remove the SignSlashReport notification
|
// Remove the SignSlashReport notification
|
||||||
SignSlashReport::try_recv(&mut txn, to_cleanup);
|
SignSlashReport::try_recv(&mut txn, to_cleanup);
|
||||||
}
|
}
|
||||||
@@ -439,7 +376,7 @@ async fn main() {
|
|||||||
EphemeralEventStream::new(
|
EphemeralEventStream::new(
|
||||||
db.clone(),
|
db.clone(),
|
||||||
serai.clone(),
|
serai.clone(),
|
||||||
SeraiAddress((<Ristretto as Ciphersuite>::generator() * serai_key.deref()).to_bytes()),
|
PublicKey::from_raw((<Ristretto as Ciphersuite>::generator() * serai_key.deref()).to_bytes()),
|
||||||
)
|
)
|
||||||
.continually_run(substrate_ephemeral_task_def, vec![substrate_task]),
|
.continually_run(substrate_ephemeral_task_def, vec![substrate_task]),
|
||||||
);
|
);
|
||||||
@@ -480,32 +417,12 @@ async fn main() {
|
|||||||
.continually_run(substrate_task_def, vec![]),
|
.continually_run(substrate_task_def, vec![]),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Handle each of the networks
|
// Handle all of the Processors' messages
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::NETWORKS {
|
||||||
if network == NetworkId::Serai {
|
if network == NetworkId::Serai {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network));
|
tokio::spawn(handle_processor_messages(db.clone(), message_queue.clone(), network));
|
||||||
}
|
|
||||||
|
|
||||||
// Spawn the task to set keys
|
|
||||||
{
|
|
||||||
let (set_keys_task_def, set_keys_task) = Task::new();
|
|
||||||
tokio::spawn(
|
|
||||||
SetKeysTask::new(db.clone(), serai.clone()).continually_run(set_keys_task_def, vec![]),
|
|
||||||
);
|
|
||||||
// Forget its handle so it always runs in the background
|
|
||||||
core::mem::forget(set_keys_task);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Spawn the task to publish slash reports
|
|
||||||
{
|
|
||||||
let (publish_slash_report_task_def, publish_slash_report_task) = Task::new();
|
|
||||||
tokio::spawn(
|
|
||||||
PublishSlashReportTask::new(db, serai).continually_run(publish_slash_report_task_def, vec![]),
|
|
||||||
);
|
|
||||||
// Always have this run in the background
|
|
||||||
core::mem::forget(publish_slash_report_task);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the spawned tasks ad-infinitum
|
// Run the spawned tasks ad-infinitum
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ use serai_task::ContinuallyRan;
|
|||||||
use serai_coordinator_tributary::Transaction;
|
use serai_coordinator_tributary::Transaction;
|
||||||
use serai_coordinator_p2p::P2p;
|
use serai_coordinator_p2p::P2p;
|
||||||
|
|
||||||
use crate::{Db, KeySet};
|
use crate::Db;
|
||||||
|
|
||||||
pub(crate) struct SubstrateTask<P: P2p> {
|
pub(crate) struct SubstrateTask<P: P2p> {
|
||||||
pub(crate) serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
pub(crate) serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
@@ -47,9 +47,8 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
match msg {
|
match msg {
|
||||||
messages::substrate::CoordinatorMessage::SetKeys { session, .. } => {
|
// TODO: Stop trying to confirm the DKG
|
||||||
KeySet::set(&mut txn, ValidatorSet { network, session }, &());
|
messages::substrate::CoordinatorMessage::SetKeys { .. } => todo!("TODO"),
|
||||||
}
|
|
||||||
messages::substrate::CoordinatorMessage::SlashesReported { session } => {
|
messages::substrate::CoordinatorMessage::SlashesReported { session } => {
|
||||||
let prior_retired = crate::db::RetiredTributary::get(&txn, network);
|
let prior_retired = crate::db::RetiredTributary::get(&txn, network);
|
||||||
let next_to_be_retired =
|
let next_to_be_retired =
|
||||||
|
|||||||
@@ -21,21 +21,10 @@ use message_queue::{Service, Metadata, client::MessageQueue};
|
|||||||
|
|
||||||
use serai_cosign::{Faulted, CosignIntent, Cosigning};
|
use serai_cosign::{Faulted, CosignIntent, Cosigning};
|
||||||
use serai_coordinator_substrate::{NewSetInformation, SignSlashReport};
|
use serai_coordinator_substrate::{NewSetInformation, SignSlashReport};
|
||||||
use serai_coordinator_tributary::{
|
use serai_coordinator_tributary::{Transaction, ProcessorMessages, CosignIntents, ScanTributaryTask};
|
||||||
Topic, Transaction, ProcessorMessages, CosignIntents, RecognizedTopics, ScanTributaryTask,
|
|
||||||
};
|
|
||||||
use serai_coordinator_p2p::P2p;
|
use serai_coordinator_p2p::P2p;
|
||||||
|
|
||||||
use crate::{
|
use crate::{Db, TributaryTransactions};
|
||||||
Db, TributaryTransactionsFromProcessorMessages, TributaryTransactionsFromDkgConfirmation,
|
|
||||||
RemoveParticipant, dkg_confirmation::ConfirmDkgTask,
|
|
||||||
};
|
|
||||||
|
|
||||||
create_db! {
|
|
||||||
Coordinator {
|
|
||||||
PublishOnRecognition: (set: ValidatorSet, topic: Topic) -> Transaction,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
db_channel! {
|
db_channel! {
|
||||||
Coordinator {
|
Coordinator {
|
||||||
@@ -158,101 +147,12 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[must_use]
|
/// Adds all of the transactions sent via `TributaryTransactions`.
|
||||||
async fn add_signed_unsigned_transaction<TD: DbTrait, P: P2p>(
|
|
||||||
tributary: &Tributary<TD, Transaction, P>,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
mut tx: Transaction,
|
|
||||||
) -> bool {
|
|
||||||
// If this is a signed transaction, sign it
|
|
||||||
if matches!(tx.kind(), TransactionKind::Signed(_, _)) {
|
|
||||||
tx.sign(&mut OsRng, tributary.genesis(), key);
|
|
||||||
}
|
|
||||||
|
|
||||||
let res = tributary.add_transaction(tx.clone()).await;
|
|
||||||
match &res {
|
|
||||||
// Fresh publication, already published
|
|
||||||
Ok(true | false) => {}
|
|
||||||
Err(
|
|
||||||
TransactionError::TooLargeTransaction |
|
|
||||||
TransactionError::InvalidSigner |
|
|
||||||
TransactionError::InvalidSignature |
|
|
||||||
TransactionError::InvalidContent,
|
|
||||||
) => {
|
|
||||||
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
|
|
||||||
}
|
|
||||||
// InvalidNonce may be out-of-order TXs, not invalid ones, but we only create nonce #n+1 after
|
|
||||||
// on-chain inclusion of the TX with nonce #n, so it is invalid within our context unless the
|
|
||||||
// issue is this transaction was already included on-chain
|
|
||||||
Err(TransactionError::InvalidNonce) => {
|
|
||||||
let TransactionKind::Signed(order, signed) = tx.kind() else {
|
|
||||||
panic!("non-Signed transaction had InvalidNonce");
|
|
||||||
};
|
|
||||||
let next_nonce = tributary
|
|
||||||
.next_nonce(&signed.signer, &order)
|
|
||||||
.await
|
|
||||||
.expect("signer who is a present validator didn't have a nonce");
|
|
||||||
assert!(next_nonce != signed.nonce);
|
|
||||||
// We're publishing an old transaction
|
|
||||||
if next_nonce > signed.nonce {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
panic!("nonce in transaction wasn't contiguous with nonce on-chain");
|
|
||||||
}
|
|
||||||
// We've published too many transactions recently
|
|
||||||
Err(TransactionError::TooManyInMempool) => {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// This isn't a Provided transaction so this should never be hit
|
|
||||||
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
|
||||||
}
|
|
||||||
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn add_with_recognition_check<TD: DbTrait, P: P2p>(
|
|
||||||
set: ValidatorSet,
|
|
||||||
tributary_db: &mut TD,
|
|
||||||
tributary: &Tributary<TD, Transaction, P>,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
tx: Transaction,
|
|
||||||
) -> bool {
|
|
||||||
let kind = tx.kind();
|
|
||||||
match kind {
|
|
||||||
TransactionKind::Provided(_) => provide_transaction(set, tributary, tx).await,
|
|
||||||
TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
|
|
||||||
// If this is a transaction with signing data, check the topic is recognized before
|
|
||||||
// publishing
|
|
||||||
let topic = tx.topic();
|
|
||||||
let still_requires_recognition = if let Some(topic) = topic {
|
|
||||||
(topic.requires_recognition() && (!RecognizedTopics::recognized(tributary_db, set, topic)))
|
|
||||||
.then_some(topic)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
if let Some(topic) = still_requires_recognition {
|
|
||||||
// Queue the transaction until the topic is recognized
|
|
||||||
// We use the Tributary DB for this so it's cleaned up when the Tributary DB is
|
|
||||||
let mut tributary_txn = tributary_db.txn();
|
|
||||||
PublishOnRecognition::set(&mut tributary_txn, set, topic, &tx);
|
|
||||||
tributary_txn.commit();
|
|
||||||
} else {
|
|
||||||
// Actually add the transaction
|
|
||||||
if !add_signed_unsigned_transaction(tributary, key, tx).await {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Adds all of the transactions sent via `TributaryTransactionsFromProcessorMessages`.
|
|
||||||
pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||||
db: CD,
|
db: CD,
|
||||||
tributary_db: TD,
|
tributary_db: TD,
|
||||||
tributary: Tributary<TD, Transaction, P>,
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
set: NewSetInformation,
|
set: ValidatorSet,
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
}
|
}
|
||||||
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> {
|
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> {
|
||||||
@@ -261,87 +161,49 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactio
|
|||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
|
|
||||||
// Provide/add all transactions sent our way
|
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some(tx) = TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, self.set.set)
|
let Some(mut tx) = TributaryTransactions::try_recv(&mut txn, self.set) else { break };
|
||||||
else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
|
|
||||||
if !add_with_recognition_check(
|
let kind = tx.kind();
|
||||||
self.set.set,
|
match kind {
|
||||||
&mut self.tributary_db,
|
TransactionKind::Provided(_) => provide_transaction(self.set, &self.tributary, tx).await,
|
||||||
&self.tributary,
|
TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
|
||||||
&self.key,
|
// If this is a signed transaction, sign it
|
||||||
tx,
|
if matches!(kind, TransactionKind::Signed(_, _)) {
|
||||||
)
|
tx.sign(&mut OsRng, self.tributary.genesis(), &self.key);
|
||||||
.await
|
}
|
||||||
{
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
made_progress = true;
|
// Actually add the transaction
|
||||||
txn.commit();
|
// TODO: If this is a preprocess, make sure the topic has been recognized
|
||||||
}
|
let res = self.tributary.add_transaction(tx.clone()).await;
|
||||||
|
match &res {
|
||||||
loop {
|
// Fresh publication, already published
|
||||||
let mut txn = self.db.txn();
|
Ok(true | false) => {}
|
||||||
let Some(tx) = TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, self.set.set)
|
Err(
|
||||||
else {
|
TransactionError::TooLargeTransaction |
|
||||||
break;
|
TransactionError::InvalidSigner |
|
||||||
};
|
TransactionError::InvalidNonce |
|
||||||
|
TransactionError::InvalidSignature |
|
||||||
if !add_with_recognition_check(
|
TransactionError::InvalidContent,
|
||||||
self.set.set,
|
) => {
|
||||||
&mut self.tributary_db,
|
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
|
||||||
&self.tributary,
|
}
|
||||||
&self.key,
|
// We've published too many transactions recently
|
||||||
tx,
|
// Drop this txn to try to publish it again later on a future iteration
|
||||||
)
|
Err(TransactionError::TooManyInMempool) => {
|
||||||
.await
|
drop(txn);
|
||||||
{
|
break;
|
||||||
break;
|
}
|
||||||
}
|
// This isn't a Provided transaction so this should never be hit
|
||||||
|
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
||||||
made_progress = true;
|
}
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provide/add all transactions due to newly recognized topics
|
|
||||||
loop {
|
|
||||||
let mut tributary_txn = self.tributary_db.txn();
|
|
||||||
let Some(topic) =
|
|
||||||
RecognizedTopics::try_recv_topic_requiring_recognition(&mut tributary_txn, self.set.set)
|
|
||||||
else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
if let Some(tx) = PublishOnRecognition::take(&mut tributary_txn, self.set.set, topic) {
|
|
||||||
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
made_progress = true;
|
|
||||||
tributary_txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Publish any participant removals
|
|
||||||
loop {
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
let Some(participant) = RemoveParticipant::try_recv(&mut txn, self.set.set) else { break };
|
|
||||||
let tx = Transaction::RemoveParticipant {
|
|
||||||
participant: self.set.participant_indexes_reverse_lookup[&participant],
|
|
||||||
signed: Default::default(),
|
|
||||||
};
|
|
||||||
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
made_progress = true;
|
made_progress = true;
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(made_progress)
|
Ok(made_progress)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -461,8 +323,6 @@ async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
|
|||||||
/// - Spawn the ScanTributaryTask
|
/// - Spawn the ScanTributaryTask
|
||||||
/// - Spawn the ProvideCosignCosignedTransactionsTask
|
/// - Spawn the ProvideCosignCosignedTransactionsTask
|
||||||
/// - Spawn the TributaryProcessorMessagesTask
|
/// - Spawn the TributaryProcessorMessagesTask
|
||||||
/// - Spawn the AddTributaryTransactionsTask
|
|
||||||
/// - Spawn the ConfirmDkgTask
|
|
||||||
/// - Spawn the SignSlashReportTask
|
/// - Spawn the SignSlashReportTask
|
||||||
/// - Iterate the scan task whenever a new block occurs (not just on the standard interval)
|
/// - Iterate the scan task whenever a new block occurs (not just on the standard interval)
|
||||||
pub(crate) async fn spawn_tributary<P: P2p>(
|
pub(crate) async fn spawn_tributary<P: P2p>(
|
||||||
@@ -543,45 +403,38 @@ pub(crate) async fn spawn_tributary<P: P2p>(
|
|||||||
// Spawn the scan task
|
// Spawn the scan task
|
||||||
let (scan_tributary_task_def, scan_tributary_task) = Task::new();
|
let (scan_tributary_task_def, scan_tributary_task) = Task::new();
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
ScanTributaryTask::<_, P>::new(tributary_db.clone(), set.clone(), reader)
|
ScanTributaryTask::<_, P>::new(tributary_db.clone(), &set, reader)
|
||||||
// This is the only handle for this TributaryProcessorMessagesTask, so when this task is
|
// This is the only handle for this TributaryProcessorMessagesTask, so when this task is
|
||||||
// dropped, it will be too
|
// dropped, it will be too
|
||||||
.continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]),
|
.continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Spawn the add transactions task
|
|
||||||
let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new();
|
|
||||||
tokio::spawn(
|
|
||||||
(AddTributaryTransactionsTask {
|
|
||||||
db: db.clone(),
|
|
||||||
tributary_db: tributary_db.clone(),
|
|
||||||
tributary: tributary.clone(),
|
|
||||||
set: set.clone(),
|
|
||||||
key: serai_key.clone(),
|
|
||||||
})
|
|
||||||
.continually_run(add_tributary_transactions_task_def, vec![]),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Spawn the task to confirm the DKG result
|
|
||||||
let (confirm_dkg_task_def, confirm_dkg_task) = Task::new();
|
|
||||||
tokio::spawn(
|
|
||||||
ConfirmDkgTask::new(db.clone(), set.clone(), tributary_db.clone(), serai_key.clone())
|
|
||||||
.continually_run(confirm_dkg_task_def, vec![add_tributary_transactions_task]),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Spawn the sign slash report task
|
// Spawn the sign slash report task
|
||||||
let (sign_slash_report_task_def, sign_slash_report_task) = Task::new();
|
let (sign_slash_report_task_def, sign_slash_report_task) = Task::new();
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
(SignSlashReportTask {
|
(SignSlashReportTask {
|
||||||
db: db.clone(),
|
db: db.clone(),
|
||||||
tributary_db,
|
tributary_db: tributary_db.clone(),
|
||||||
tributary: tributary.clone(),
|
tributary: tributary.clone(),
|
||||||
set: set.clone(),
|
set: set.clone(),
|
||||||
key: serai_key,
|
key: serai_key.clone(),
|
||||||
})
|
})
|
||||||
.continually_run(sign_slash_report_task_def, vec![]),
|
.continually_run(sign_slash_report_task_def, vec![]),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Spawn the add transactions task
|
||||||
|
let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(AddTributaryTransactionsTask {
|
||||||
|
db: db.clone(),
|
||||||
|
tributary_db,
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
set: set.set,
|
||||||
|
key: serai_key,
|
||||||
|
})
|
||||||
|
.continually_run(add_tributary_transactions_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
// Whenever a new block occurs, immediately run the scan task
|
// Whenever a new block occurs, immediately run the scan task
|
||||||
// This function also preserves the ProvideCosignCosignedTransactionsTask handle until the
|
// This function also preserves the ProvideCosignCosignedTransactionsTask handle until the
|
||||||
// Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad
|
// Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad
|
||||||
@@ -591,6 +444,10 @@ pub(crate) async fn spawn_tributary<P: P2p>(
|
|||||||
set.set,
|
set.set,
|
||||||
tributary,
|
tributary,
|
||||||
scan_tributary_task,
|
scan_tributary_task,
|
||||||
vec![provide_cosign_cosigned_transactions_task, confirm_dkg_task, sign_slash_report_task],
|
vec![
|
||||||
|
provide_cosign_cosigned_transactions_task,
|
||||||
|
sign_slash_report_task,
|
||||||
|
add_tributary_transactions_task,
|
||||||
|
],
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,9 +22,6 @@ bitvec = { version = "1", default-features = false, features = ["std"] }
|
|||||||
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use std::sync::Arc;
|
|||||||
use futures::stream::{StreamExt, FuturesOrdered};
|
use futures::stream::{StreamExt, FuturesOrdered};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{NetworkId, SeraiAddress, EmbeddedEllipticCurve},
|
primitives::{PublicKey, NetworkId, EmbeddedEllipticCurve},
|
||||||
validator_sets::primitives::MAX_KEY_SHARES_PER_SET,
|
validator_sets::primitives::MAX_KEY_SHARES_PER_SET,
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
@@ -26,14 +26,14 @@ create_db!(
|
|||||||
pub struct EphemeralEventStream<D: Db> {
|
pub struct EphemeralEventStream<D: Db> {
|
||||||
db: D,
|
db: D,
|
||||||
serai: Arc<Serai>,
|
serai: Arc<Serai>,
|
||||||
validator: SeraiAddress,
|
validator: PublicKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> EphemeralEventStream<D> {
|
impl<D: Db> EphemeralEventStream<D> {
|
||||||
/// Create a new ephemeral event stream.
|
/// Create a new ephemeral event stream.
|
||||||
///
|
///
|
||||||
/// Only one of these may exist over the provided database.
|
/// Only one of these may exist over the provided database.
|
||||||
pub fn new(db: D, serai: Arc<Serai>, validator: SeraiAddress) -> Self {
|
pub fn new(db: D, serai: Arc<Serai>, validator: PublicKey) -> Self {
|
||||||
Self { db, serai, validator }
|
Self { db, serai, validator }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -145,10 +145,6 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
"block #{block_number} declared a new set but didn't have the participants"
|
"block #{block_number} declared a new set but didn't have the participants"
|
||||||
))?
|
))?
|
||||||
};
|
};
|
||||||
let validators = validators
|
|
||||||
.into_iter()
|
|
||||||
.map(|(validator, weight)| (SeraiAddress::from(validator), weight))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
|
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
|
||||||
if in_set {
|
if in_set {
|
||||||
if u16::try_from(validators.len()).is_err() {
|
if u16::try_from(validators.len()).is_err() {
|
||||||
@@ -181,16 +177,14 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
embedded_elliptic_curve_keys.push_back(async move {
|
embedded_elliptic_curve_keys.push_back(async move {
|
||||||
tokio::try_join!(
|
tokio::try_join!(
|
||||||
// One future to fetch the substrate embedded key
|
// One future to fetch the substrate embedded key
|
||||||
serai.embedded_elliptic_curve_key(
|
serai
|
||||||
validator.into(),
|
.embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519),
|
||||||
EmbeddedEllipticCurve::Embedwards25519
|
|
||||||
),
|
|
||||||
// One future to fetch the external embedded key, if there is a distinct curve
|
// One future to fetch the external embedded key, if there is a distinct curve
|
||||||
async {
|
async {
|
||||||
// `embedded_elliptic_curves` is documented to have the second entry be the
|
// `embedded_elliptic_curves` is documented to have the second entry be the
|
||||||
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
||||||
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
|
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
|
||||||
serai.embedded_elliptic_curve_key(validator.into(), *curve).await.map(Some)
|
serai.embedded_elliptic_curve_key(validator, *curve).await.map(Some)
|
||||||
} else {
|
} else {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
@@ -221,22 +215,19 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut new_set = NewSetInformation {
|
crate::NewSet::send(
|
||||||
set: *set,
|
&mut txn,
|
||||||
serai_block: block.block_hash,
|
&NewSetInformation {
|
||||||
declaration_time: block.time,
|
set: *set,
|
||||||
// TODO: Why do we have this as an explicit field here?
|
serai_block: block.block_hash,
|
||||||
// Shouldn't this be inlined into the Processor's key gen code, where it's used?
|
declaration_time: block.time,
|
||||||
threshold: ((total_weight * 2) / 3) + 1,
|
// TODO: Why do we have this as an explicit field here?
|
||||||
validators,
|
// Shouldn't thiis be inlined into the Processor's key gen code, where it's used?
|
||||||
evrf_public_keys,
|
threshold: ((total_weight * 2) / 3) + 1,
|
||||||
participant_indexes: Default::default(),
|
validators,
|
||||||
participant_indexes_reverse_lookup: Default::default(),
|
evrf_public_keys,
|
||||||
};
|
},
|
||||||
// These aren't serialized, and we immediately serialize and drop this, so this isn't
|
);
|
||||||
// necessary. It's just good practice not have this be dirty
|
|
||||||
new_set.init_participant_indexes();
|
|
||||||
crate::NewSet::send(&mut txn, &new_set);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,16 +2,12 @@
|
|||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
use scale::{Encode, Decode};
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{io, BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use dkg::Participant;
|
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{NetworkId, SeraiAddress, Signature},
|
primitives::{NetworkId, PublicKey, Signature, SeraiAddress},
|
||||||
validator_sets::primitives::{Session, ValidatorSet, KeyPair, SlashReport},
|
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
||||||
in_instructions::primitives::SignedBatch,
|
in_instructions::primitives::SignedBatch,
|
||||||
Transaction,
|
Transaction,
|
||||||
};
|
};
|
||||||
@@ -30,9 +26,22 @@ pub use publish_batch::PublishBatchTask;
|
|||||||
mod publish_slash_report;
|
mod publish_slash_report;
|
||||||
pub use publish_slash_report::PublishSlashReportTask;
|
pub use publish_slash_report::PublishSlashReportTask;
|
||||||
|
|
||||||
|
fn borsh_serialize_validators<W: io::Write>(
|
||||||
|
validators: &Vec<(PublicKey, u16)>,
|
||||||
|
writer: &mut W,
|
||||||
|
) -> Result<(), io::Error> {
|
||||||
|
// This doesn't use `encode_to` as `encode_to` panics if the writer returns an error
|
||||||
|
writer.write_all(&validators.encode())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn borsh_deserialize_validators<R: io::Read>(
|
||||||
|
reader: &mut R,
|
||||||
|
) -> Result<Vec<(PublicKey, u16)>, io::Error> {
|
||||||
|
Decode::decode(&mut scale::IoReader(reader)).map_err(io::Error::other)
|
||||||
|
}
|
||||||
|
|
||||||
/// The information for a new set.
|
/// The information for a new set.
|
||||||
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
#[borsh(init = init_participant_indexes)]
|
|
||||||
pub struct NewSetInformation {
|
pub struct NewSetInformation {
|
||||||
/// The set.
|
/// The set.
|
||||||
pub set: ValidatorSet,
|
pub set: ValidatorSet,
|
||||||
@@ -43,37 +52,13 @@ pub struct NewSetInformation {
|
|||||||
/// The threshold to use.
|
/// The threshold to use.
|
||||||
pub threshold: u16,
|
pub threshold: u16,
|
||||||
/// The validators, with the amount of key shares they have.
|
/// The validators, with the amount of key shares they have.
|
||||||
pub validators: Vec<(SeraiAddress, u16)>,
|
#[borsh(
|
||||||
|
serialize_with = "borsh_serialize_validators",
|
||||||
|
deserialize_with = "borsh_deserialize_validators"
|
||||||
|
)]
|
||||||
|
pub validators: Vec<(PublicKey, u16)>,
|
||||||
/// The eVRF public keys.
|
/// The eVRF public keys.
|
||||||
///
|
|
||||||
/// This will have the necessary copies of the keys proper for each validator's weight,
|
|
||||||
/// accordingly syncing up with `participant_indexes`.
|
|
||||||
pub evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
pub evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||||
/// The participant indexes, indexed by their validator.
|
|
||||||
#[borsh(skip)]
|
|
||||||
pub participant_indexes: HashMap<SeraiAddress, Vec<Participant>>,
|
|
||||||
/// The validators, indexed by their participant indexes.
|
|
||||||
#[borsh(skip)]
|
|
||||||
pub participant_indexes_reverse_lookup: HashMap<Participant, SeraiAddress>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NewSetInformation {
|
|
||||||
fn init_participant_indexes(&mut self) {
|
|
||||||
let mut next_i = 1;
|
|
||||||
self.participant_indexes = HashMap::with_capacity(self.validators.len());
|
|
||||||
self.participant_indexes_reverse_lookup = HashMap::with_capacity(self.validators.len());
|
|
||||||
for (validator, weight) in &self.validators {
|
|
||||||
let mut these_is = Vec::with_capacity((*weight).into());
|
|
||||||
for _ in 0 .. *weight {
|
|
||||||
let this_i = Participant::new(next_i).unwrap();
|
|
||||||
next_i += 1;
|
|
||||||
|
|
||||||
these_is.push(this_i);
|
|
||||||
self.participant_indexes_reverse_lookup.insert(this_i, *validator);
|
|
||||||
}
|
|
||||||
self.participant_indexes.insert(*validator, these_is);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mod _public_db {
|
mod _public_db {
|
||||||
@@ -190,6 +175,8 @@ impl Keys {
|
|||||||
pub struct SignedBatches;
|
pub struct SignedBatches;
|
||||||
impl SignedBatches {
|
impl SignedBatches {
|
||||||
/// Send a `SignedBatch` to publish onto Serai.
|
/// Send a `SignedBatch` to publish onto Serai.
|
||||||
|
///
|
||||||
|
/// These will be published sequentially. Out-of-order sending risks hanging the task.
|
||||||
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
|
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
|
||||||
_public_db::SignedBatches::send(txn, batch.batch.network, batch);
|
_public_db::SignedBatches::send(txn, batch.batch.network, batch);
|
||||||
}
|
}
|
||||||
@@ -198,6 +185,10 @@ impl SignedBatches {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The slash report was invalid.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct InvalidSlashReport;
|
||||||
|
|
||||||
/// The slash reports to publish onto Serai.
|
/// The slash reports to publish onto Serai.
|
||||||
pub struct SlashReports;
|
pub struct SlashReports;
|
||||||
impl SlashReports {
|
impl SlashReports {
|
||||||
@@ -205,25 +196,30 @@ impl SlashReports {
|
|||||||
///
|
///
|
||||||
/// This only saves the most recent slashes as only a single session is eligible to have its
|
/// This only saves the most recent slashes as only a single session is eligible to have its
|
||||||
/// slashes reported at once.
|
/// slashes reported at once.
|
||||||
|
///
|
||||||
|
/// Returns Err if the slashes are invalid. Returns Ok if the slashes weren't detected as
|
||||||
|
/// invalid. Slashes may be considered invalid by the Serai blockchain later even if not detected
|
||||||
|
/// as invalid here.
|
||||||
pub fn set(
|
pub fn set(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ValidatorSet,
|
set: ValidatorSet,
|
||||||
slash_report: SlashReport,
|
slashes: Vec<(SeraiAddress, u32)>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) {
|
) -> Result<(), InvalidSlashReport> {
|
||||||
// If we have a more recent slash report, don't write this historic one
|
// If we have a more recent slash report, don't write this historic one
|
||||||
if let Some((existing_session, _)) = _public_db::SlashReports::get(txn, set.network) {
|
if let Some((existing_session, _)) = _public_db::SlashReports::get(txn, set.network) {
|
||||||
if existing_session.0 >= set.session.0 {
|
if existing_session.0 >= set.session.0 {
|
||||||
return;
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
|
let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
|
||||||
set.network,
|
set.network,
|
||||||
slash_report,
|
slashes.try_into().map_err(|_| InvalidSlashReport)?,
|
||||||
signature,
|
signature,
|
||||||
);
|
);
|
||||||
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
|
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> {
|
pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> {
|
||||||
let (session, tx) = _public_db::SlashReports::take(txn, network)?;
|
let (session, tx) = _public_db::SlashReports::take(txn, network)?;
|
||||||
|
|||||||
@@ -1,21 +1,14 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
use serai_db::{DbTxn, Db};
|
||||||
use serai_client::{primitives::NetworkId, in_instructions::primitives::SignedBatch, SeraiError, Serai};
|
|
||||||
|
use serai_client::{primitives::NetworkId, SeraiError, Serai};
|
||||||
|
|
||||||
use serai_db::{Get, DbTxn, Db, create_db};
|
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
use crate::SignedBatches;
|
use crate::SignedBatches;
|
||||||
|
|
||||||
create_db!(
|
|
||||||
CoordinatorSubstrate {
|
|
||||||
LastPublishedBatch: (network: NetworkId) -> u32,
|
|
||||||
BatchesToPublish: (network: NetworkId, batch: u32) -> SignedBatch,
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
/// Publish `SignedBatch`s from `SignedBatches` onto Serai.
|
/// Publish `SignedBatch`s from `SignedBatches` onto Serai.
|
||||||
pub struct PublishBatchTask<D: Db> {
|
pub struct PublishBatchTask<D: Db> {
|
||||||
db: D,
|
db: D,
|
||||||
@@ -41,52 +34,32 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
|||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
// Read from SignedBatches, which is sequential, into our own mapping
|
let mut made_progress = false;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some(batch) = SignedBatches::try_recv(&mut txn, self.network) else {
|
let Some(batch) = SignedBatches::try_recv(&mut txn, self.network) else {
|
||||||
|
// No batch to publish at this time
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
|
|
||||||
// If this is a Batch not yet published, save it into our unordered mapping
|
// Publish this Batch if it hasn't already been published
|
||||||
if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id) {
|
|
||||||
BatchesToPublish::set(&mut txn, self.network, batch.batch.id, &batch);
|
|
||||||
}
|
|
||||||
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Synchronize our last published batch with the Serai network's
|
|
||||||
let next_to_publish = {
|
|
||||||
// This uses the latest finalized block, not the latest cosigned block, which should be
|
|
||||||
// fine as in the worst case, the only impact is no longer attempting TX publication
|
|
||||||
let serai = self.serai.as_of_latest_finalized_block().await?;
|
let serai = self.serai.as_of_latest_finalized_block().await?;
|
||||||
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
|
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
|
||||||
|
if last_batch < Some(batch.batch.id) {
|
||||||
let mut txn = self.db.txn();
|
// This stream of Batches *should* be sequential within the larger context of the Serai
|
||||||
let mut our_last_batch = LastPublishedBatch::get(&txn, self.network);
|
// coordinator. In this library, we use a more relaxed definition and don't assert
|
||||||
while our_last_batch < last_batch {
|
// sequence. This does risk hanging the task, if Batch #n+1 is sent before Batch #n, but
|
||||||
let next_batch = our_last_batch.map(|batch| batch + 1).unwrap_or(0);
|
// that is a documented fault of the `SignedBatches` API.
|
||||||
// Clean up the Batch to publish since it's already been published
|
|
||||||
BatchesToPublish::take(&mut txn, self.network, next_batch);
|
|
||||||
our_last_batch = Some(next_batch);
|
|
||||||
}
|
|
||||||
if let Some(last_batch) = our_last_batch {
|
|
||||||
LastPublishedBatch::set(&mut txn, self.network, &last_batch);
|
|
||||||
}
|
|
||||||
last_batch.map(|batch| batch + 1).unwrap_or(0)
|
|
||||||
};
|
|
||||||
|
|
||||||
let made_progress =
|
|
||||||
if let Some(batch) = BatchesToPublish::get(&self.db, self.network, next_to_publish) {
|
|
||||||
self
|
self
|
||||||
.serai
|
.serai
|
||||||
.publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
|
.publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
|
||||||
.await?;
|
.await?;
|
||||||
true
|
}
|
||||||
} else {
|
|
||||||
false
|
txn.commit();
|
||||||
};
|
made_progress = true;
|
||||||
|
}
|
||||||
Ok(made_progress)
|
Ok(made_progress)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,82 +22,66 @@ impl<D: Db> PublishSlashReportTask<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> PublishSlashReportTask<D> {
|
|
||||||
// Returns if a slash report was successfully published
|
|
||||||
async fn publish(&mut self, network: NetworkId) -> Result<bool, String> {
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else {
|
|
||||||
// No slash report to publish
|
|
||||||
return Ok(false);
|
|
||||||
};
|
|
||||||
|
|
||||||
// This uses the latest finalized block, not the latest cosigned block, which should be
|
|
||||||
// fine as in the worst case, the only impact is no longer attempting TX publication
|
|
||||||
let serai = self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
let serai = serai.validator_sets();
|
|
||||||
let session_after_slash_report = Session(session.0 + 1);
|
|
||||||
let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
let current_session = current_session.map(|session| session.0);
|
|
||||||
// Only attempt to publish the slash report for session #n while session #n+1 is still
|
|
||||||
// active
|
|
||||||
let session_after_slash_report_retired = current_session > Some(session_after_slash_report.0);
|
|
||||||
if session_after_slash_report_retired {
|
|
||||||
// Commit the txn to drain this slash report from the database and not try it again later
|
|
||||||
txn.commit();
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
if Some(session_after_slash_report.0) != current_session {
|
|
||||||
// We already checked the current session wasn't greater, and they're not equal
|
|
||||||
assert!(current_session < Some(session_after_slash_report.0));
|
|
||||||
// This would mean the Serai node is resyncing and is behind where it prior was
|
|
||||||
Err("have a slash report for a session Serai has yet to retire".to_string())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this session which should publish a slash report already has, move on
|
|
||||||
let key_pending_slash_report =
|
|
||||||
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
if key_pending_slash_report.is_none() {
|
|
||||||
txn.commit();
|
|
||||||
return Ok(false);
|
|
||||||
};
|
|
||||||
|
|
||||||
match self.serai.publish(&slash_report).await {
|
|
||||||
Ok(()) => {
|
|
||||||
txn.commit();
|
|
||||||
Ok(true)
|
|
||||||
}
|
|
||||||
// This could be specific to this TX (such as an already in mempool error) and it may be
|
|
||||||
// worthwhile to continue iteration with the other pending slash reports. We assume this
|
|
||||||
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
|
|
||||||
// miniscule compared to the window available to publish the slash report. That makes
|
|
||||||
// this a non-issue.
|
|
||||||
Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
|
impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
|
||||||
type Error = String;
|
type Error = String;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
let mut error = None;
|
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::NETWORKS {
|
||||||
if network == NetworkId::Serai {
|
if network == NetworkId::Serai {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let network_res = self.publish(network).await;
|
let mut txn = self.db.txn();
|
||||||
// We made progress if any network successfully published their slash report
|
let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else {
|
||||||
made_progress |= network_res == Ok(true);
|
// No slash report to publish
|
||||||
// We want to yield the first error *after* attempting for every network
|
continue;
|
||||||
error = error.or(network_res.err());
|
};
|
||||||
}
|
|
||||||
// Yield the error
|
let serai =
|
||||||
if let Some(error) = error {
|
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||||
Err(error)?
|
let serai = serai.validator_sets();
|
||||||
|
let session_after_slash_report = Session(session.0 + 1);
|
||||||
|
let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let current_session = current_session.map(|session| session.0);
|
||||||
|
// Only attempt to publish the slash report for session #n while session #n+1 is still
|
||||||
|
// active
|
||||||
|
let session_after_slash_report_retired =
|
||||||
|
current_session > Some(session_after_slash_report.0);
|
||||||
|
if session_after_slash_report_retired {
|
||||||
|
// Commit the txn to drain this slash report from the database and not try it again later
|
||||||
|
txn.commit();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if Some(session_after_slash_report.0) != current_session {
|
||||||
|
// We already checked the current session wasn't greater, and they're not equal
|
||||||
|
assert!(current_session < Some(session_after_slash_report.0));
|
||||||
|
// This would mean the Serai node is resyncing and is behind where it prior was
|
||||||
|
Err("have a slash report for a session Serai has yet to retire".to_string())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this session which should publish a slash report already has, move on
|
||||||
|
let key_pending_slash_report =
|
||||||
|
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
if key_pending_slash_report.is_none() {
|
||||||
|
txn.commit();
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
match self.serai.publish(&slash_report).await {
|
||||||
|
Ok(()) => {
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
// This could be specific to this TX (such as an already in mempool error) and it may be
|
||||||
|
// worthwhile to continue iteration with the other pending slash reports. We assume this
|
||||||
|
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
|
||||||
|
// miniscule compared to the window available to publish the slash report. That makes
|
||||||
|
// this a non-issue.
|
||||||
|
Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}"))?,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Ok(made_progress)
|
Ok(made_progress)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,8 +39,6 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
|||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
// This uses the latest finalized block, not the latest cosigned block, which should be
|
|
||||||
// fine as in the worst case, the only impact is no longer attempting TX publication
|
|
||||||
let serai =
|
let serai =
|
||||||
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||||
let serai = serai.validator_sets();
|
let serai = serai.validator_sets();
|
||||||
|
|||||||
@@ -21,14 +21,13 @@ workspace = true
|
|||||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
|
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||||
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
|
|
||||||
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
|
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||||
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
serai-db = { path = "../../common/db" }
|
serai-db = { path = "../../common/db" }
|
||||||
|
|||||||
@@ -15,35 +15,20 @@ use crate::transaction::SigningProtocolRound;
|
|||||||
|
|
||||||
/// A topic within the database which the group participates in
|
/// A topic within the database which the group participates in
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||||
pub enum Topic {
|
pub(crate) enum Topic {
|
||||||
/// Vote to remove a participant
|
/// Vote to remove a participant
|
||||||
RemoveParticipant {
|
RemoveParticipant { participant: SeraiAddress },
|
||||||
/// The participant to remove
|
|
||||||
participant: SeraiAddress,
|
|
||||||
},
|
|
||||||
|
|
||||||
// DkgParticipation isn't represented here as participations are immediately sent to the
|
// DkgParticipation isn't represented here as participations are immediately sent to the
|
||||||
// processor, not accumulated within this databse
|
// processor, not accumulated within this databse
|
||||||
/// Participation in the signing protocol to confirm the DKG results on Substrate
|
/// Participation in the signing protocol to confirm the DKG results on Substrate
|
||||||
DkgConfirmation {
|
DkgConfirmation { attempt: u32, round: SigningProtocolRound },
|
||||||
/// The attempt number this is for
|
|
||||||
attempt: u32,
|
|
||||||
/// The round of the signing protocol
|
|
||||||
round: SigningProtocolRound,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// The local view of the SlashReport, to be aggregated into the final SlashReport
|
/// The local view of the SlashReport, to be aggregated into the final SlashReport
|
||||||
SlashReport,
|
SlashReport,
|
||||||
|
|
||||||
/// Participation in a signing protocol
|
/// Participation in a signing protocol
|
||||||
Sign {
|
Sign { id: VariantSignId, attempt: u32, round: SigningProtocolRound },
|
||||||
/// The ID of the signing protocol
|
|
||||||
id: VariantSignId,
|
|
||||||
/// The attempt number this is for
|
|
||||||
attempt: u32,
|
|
||||||
/// The round of the signing protocol
|
|
||||||
round: SigningProtocolRound,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum Participating {
|
enum Participating {
|
||||||
@@ -94,9 +79,9 @@ impl Topic {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The SignId for this topic
|
// The SignId for this topic
|
||||||
///
|
//
|
||||||
/// Returns None if Topic isn't Topic::Sign
|
// Returns None if Topic isn't Topic::Sign
|
||||||
pub(crate) fn sign_id(self, set: ValidatorSet) -> Option<messages::sign::SignId> {
|
pub(crate) fn sign_id(self, set: ValidatorSet) -> Option<messages::sign::SignId> {
|
||||||
#[allow(clippy::match_same_arms)]
|
#[allow(clippy::match_same_arms)]
|
||||||
match self {
|
match self {
|
||||||
@@ -107,33 +92,6 @@ impl Topic {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The SignId for this DKG Confirmation.
|
|
||||||
///
|
|
||||||
/// This is undefined except for being consistent to the DKG Confirmation signing protocol and
|
|
||||||
/// unique across sets.
|
|
||||||
///
|
|
||||||
/// Returns None if Topic isn't Topic::DkgConfirmation.
|
|
||||||
pub(crate) fn dkg_confirmation_sign_id(
|
|
||||||
self,
|
|
||||||
set: ValidatorSet,
|
|
||||||
) -> Option<messages::sign::SignId> {
|
|
||||||
#[allow(clippy::match_same_arms)]
|
|
||||||
match self {
|
|
||||||
Topic::RemoveParticipant { .. } => None,
|
|
||||||
Topic::DkgConfirmation { attempt, round: _ } => Some({
|
|
||||||
let id = {
|
|
||||||
let mut id = [0; 32];
|
|
||||||
let encoded_set = set.encode();
|
|
||||||
id[.. encoded_set.len()].copy_from_slice(&encoded_set);
|
|
||||||
VariantSignId::Batch(id)
|
|
||||||
};
|
|
||||||
SignId { session: set.session, id, attempt }
|
|
||||||
}),
|
|
||||||
Topic::SlashReport { .. } => None,
|
|
||||||
Topic::Sign { .. } => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The topic which precedes this topic as a prerequisite
|
/// The topic which precedes this topic as a prerequisite
|
||||||
///
|
///
|
||||||
/// The preceding topic must define this topic as succeeding
|
/// The preceding topic must define this topic as succeeding
|
||||||
@@ -180,22 +138,21 @@ impl Topic {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If this topic requires recognition before entries are permitted for it.
|
fn requires_whitelisting(&self) -> bool {
|
||||||
pub fn requires_recognition(&self) -> bool {
|
|
||||||
#[allow(clippy::match_same_arms)]
|
#[allow(clippy::match_same_arms)]
|
||||||
match self {
|
match self {
|
||||||
// We don't require recognition to remove a participant
|
// We don't require whitelisting to remove a participant
|
||||||
Topic::RemoveParticipant { .. } => false,
|
Topic::RemoveParticipant { .. } => false,
|
||||||
// We don't require recognition for the first attempt, solely the re-attempts
|
// We don't require whitelisting for the first attempt, solely the re-attempts
|
||||||
Topic::DkgConfirmation { attempt, .. } => *attempt != 0,
|
Topic::DkgConfirmation { attempt, .. } => *attempt != 0,
|
||||||
// We don't require recognition for the slash report
|
// We don't require whitelisting for the slash report
|
||||||
Topic::SlashReport { .. } => false,
|
Topic::SlashReport { .. } => false,
|
||||||
// We do require recognition for every sign protocol
|
// We do require whitelisting for every sign protocol
|
||||||
Topic::Sign { .. } => true,
|
Topic::Sign { .. } => true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn required_participation(&self, n: u16) -> u16 {
|
fn required_participation(&self, n: u64) -> u64 {
|
||||||
let _ = self;
|
let _ = self;
|
||||||
// All of our topics require 2/3rds participation
|
// All of our topics require 2/3rds participation
|
||||||
((2 * n) / 3) + 1
|
((2 * n) / 3) + 1
|
||||||
@@ -241,11 +198,11 @@ create_db!(
|
|||||||
// If this block has already been cosigned.
|
// If this block has already been cosigned.
|
||||||
Cosigned: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> (),
|
Cosigned: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> (),
|
||||||
|
|
||||||
// The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain.
|
// The plans to whitelist upon a `Transaction::SubstrateBlock` being included on-chain.
|
||||||
SubstrateBlockPlans: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> Vec<[u8; 32]>,
|
SubstrateBlockPlans: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> Vec<[u8; 32]>,
|
||||||
|
|
||||||
// The weight accumulated for a topic.
|
// The weight accumulated for a topic.
|
||||||
AccumulatedWeight: (set: ValidatorSet, topic: Topic) -> u16,
|
AccumulatedWeight: (set: ValidatorSet, topic: Topic) -> u64,
|
||||||
// The entries accumulated for a topic, by validator.
|
// The entries accumulated for a topic, by validator.
|
||||||
Accumulated: <D: Borshy>(set: ValidatorSet, topic: Topic, validator: SeraiAddress) -> D,
|
Accumulated: <D: Borshy>(set: ValidatorSet, topic: Topic, validator: SeraiAddress) -> D,
|
||||||
|
|
||||||
@@ -256,12 +213,7 @@ create_db!(
|
|||||||
|
|
||||||
db_channel!(
|
db_channel!(
|
||||||
CoordinatorTributary {
|
CoordinatorTributary {
|
||||||
// Messages to send to the processor
|
|
||||||
ProcessorMessages: (set: ValidatorSet) -> messages::CoordinatorMessage,
|
ProcessorMessages: (set: ValidatorSet) -> messages::CoordinatorMessage,
|
||||||
// Messages for the DKG confirmation
|
|
||||||
DkgConfirmationMessages: (set: ValidatorSet) -> messages::sign::CoordinatorMessage,
|
|
||||||
// Topics which have been explicitly recognized
|
|
||||||
RecognizedTopics: (set: ValidatorSet) -> Topic,
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -310,7 +262,7 @@ impl TributaryDb {
|
|||||||
);
|
);
|
||||||
ActivelyCosigning::set(txn, set, &substrate_block_hash);
|
ActivelyCosigning::set(txn, set, &substrate_block_hash);
|
||||||
|
|
||||||
Self::recognize_topic(
|
TributaryDb::recognize_topic(
|
||||||
txn,
|
txn,
|
||||||
set,
|
set,
|
||||||
Topic::Sign {
|
Topic::Sign {
|
||||||
@@ -340,10 +292,6 @@ impl TributaryDb {
|
|||||||
|
|
||||||
pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ValidatorSet, topic: Topic) {
|
pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ValidatorSet, topic: Topic) {
|
||||||
AccumulatedWeight::set(txn, set, topic, &0);
|
AccumulatedWeight::set(txn, set, topic, &0);
|
||||||
RecognizedTopics::send(txn, set, &topic);
|
|
||||||
}
|
|
||||||
pub(crate) fn recognized(getter: &impl Get, set: ValidatorSet, topic: Topic) -> bool {
|
|
||||||
AccumulatedWeight::get(getter, set, topic).is_some()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ValidatorSet, block_number: u64) {
|
pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ValidatorSet, block_number: u64) {
|
||||||
@@ -364,12 +312,6 @@ impl TributaryDb {
|
|||||||
Self::recognize_topic(txn, set, topic);
|
Self::recognize_topic(txn, set, topic);
|
||||||
if let Some(id) = topic.sign_id(set) {
|
if let Some(id) = topic.sign_id(set) {
|
||||||
Self::send_message(txn, set, messages::sign::CoordinatorMessage::Reattempt { id });
|
Self::send_message(txn, set, messages::sign::CoordinatorMessage::Reattempt { id });
|
||||||
} else if let Some(id) = topic.dkg_confirmation_sign_id(set) {
|
|
||||||
DkgConfirmationMessages::send(
|
|
||||||
txn,
|
|
||||||
set,
|
|
||||||
&messages::sign::CoordinatorMessage::Reattempt { id },
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -397,24 +339,19 @@ impl TributaryDb {
|
|||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ValidatorSet,
|
set: ValidatorSet,
|
||||||
validators: &[SeraiAddress],
|
validators: &[SeraiAddress],
|
||||||
total_weight: u16,
|
total_weight: u64,
|
||||||
block_number: u64,
|
block_number: u64,
|
||||||
topic: Topic,
|
topic: Topic,
|
||||||
validator: SeraiAddress,
|
validator: SeraiAddress,
|
||||||
validator_weight: u16,
|
validator_weight: u64,
|
||||||
data: &D,
|
data: &D,
|
||||||
) -> DataSet<D> {
|
) -> DataSet<D> {
|
||||||
// This function will only be called once for a (validator, topic) tuple due to how we handle
|
// This function will only be called once for a (validator, topic) tuple due to how we handle
|
||||||
// nonces on transactions (deterministically to the topic)
|
// nonces on transactions (deterministically to the topic)
|
||||||
|
|
||||||
let accumulated_weight = AccumulatedWeight::get(txn, set, topic);
|
let accumulated_weight = AccumulatedWeight::get(txn, set, topic);
|
||||||
if topic.requires_recognition() && accumulated_weight.is_none() {
|
if topic.requires_whitelisting() && accumulated_weight.is_none() {
|
||||||
Self::fatal_slash(
|
Self::fatal_slash(txn, set, validator, "participated in unrecognized topic");
|
||||||
txn,
|
|
||||||
set,
|
|
||||||
validator,
|
|
||||||
"participated in unrecognized topic which requires recognition",
|
|
||||||
);
|
|
||||||
return DataSet::None;
|
return DataSet::None;
|
||||||
}
|
}
|
||||||
let mut accumulated_weight = accumulated_weight.unwrap_or(0);
|
let mut accumulated_weight = accumulated_weight.unwrap_or(0);
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ use core::{marker::PhantomData, future::Future};
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use ciphersuite::group::GroupEncoding;
|
use ciphersuite::group::GroupEncoding;
|
||||||
use dkg::Participant;
|
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::SeraiAddress,
|
primitives::SeraiAddress,
|
||||||
@@ -28,14 +27,13 @@ use tributary_sdk::{
|
|||||||
use serai_cosign::CosignIntent;
|
use serai_cosign::CosignIntent;
|
||||||
use serai_coordinator_substrate::NewSetInformation;
|
use serai_coordinator_substrate::NewSetInformation;
|
||||||
|
|
||||||
use messages::sign::{VariantSignId, SignId};
|
use messages::sign::VariantSignId;
|
||||||
|
|
||||||
mod transaction;
|
mod transaction;
|
||||||
pub use transaction::{SigningProtocolRound, Signed, Transaction};
|
pub use transaction::{SigningProtocolRound, Signed, Transaction};
|
||||||
|
|
||||||
mod db;
|
mod db;
|
||||||
use db::*;
|
use db::*;
|
||||||
pub use db::Topic;
|
|
||||||
|
|
||||||
/// Messages to send to the Processors.
|
/// Messages to send to the Processors.
|
||||||
pub struct ProcessorMessages;
|
pub struct ProcessorMessages;
|
||||||
@@ -46,24 +44,6 @@ impl ProcessorMessages {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Messages for the DKG confirmation.
|
|
||||||
pub struct DkgConfirmationMessages;
|
|
||||||
impl DkgConfirmationMessages {
|
|
||||||
/// Receive a message for the DKG confirmation.
|
|
||||||
///
|
|
||||||
/// These messages use the ProcessorMessage API as that's what existing flows are designed
|
|
||||||
/// around, enabling their reuse. The ProcessorMessage includes a VariantSignId which isn't
|
|
||||||
/// applicable to the DKG confirmation (as there's no such variant of the VariantSignId). The
|
|
||||||
/// actual ID is undefined other than it will be consistent to the signing protocol and unique
|
|
||||||
/// across validator sets, with no guarantees of uniqueness across contexts.
|
|
||||||
pub fn try_recv(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
set: ValidatorSet,
|
|
||||||
) -> Option<messages::sign::CoordinatorMessage> {
|
|
||||||
db::DkgConfirmationMessages::try_recv(txn, set)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The cosign intents.
|
/// The cosign intents.
|
||||||
pub struct CosignIntents;
|
pub struct CosignIntents;
|
||||||
impl CosignIntents {
|
impl CosignIntents {
|
||||||
@@ -82,28 +62,10 @@ impl CosignIntents {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An interface to the topics recognized on this Tributary.
|
/// The plans to whitelist upon a `Transaction::SubstrateBlock` being included on-chain.
|
||||||
pub struct RecognizedTopics;
|
|
||||||
impl RecognizedTopics {
|
|
||||||
/// If this topic has been recognized by this Tributary.
|
|
||||||
///
|
|
||||||
/// This will either be by explicit recognition or participation.
|
|
||||||
pub fn recognized(getter: &impl Get, set: ValidatorSet, topic: Topic) -> bool {
|
|
||||||
TributaryDb::recognized(getter, set, topic)
|
|
||||||
}
|
|
||||||
/// The next topic requiring recognition which has been recognized by this Tributary.
|
|
||||||
pub fn try_recv_topic_requiring_recognition(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
set: ValidatorSet,
|
|
||||||
) -> Option<Topic> {
|
|
||||||
db::RecognizedTopics::try_recv(txn, set)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain.
|
|
||||||
pub struct SubstrateBlockPlans;
|
pub struct SubstrateBlockPlans;
|
||||||
impl SubstrateBlockPlans {
|
impl SubstrateBlockPlans {
|
||||||
/// Set the plans to recognize upon the associated `Transaction::SubstrateBlock` being included
|
/// Set the plans to whitelist upon the associated `Transaction::SubstrateBlock` being included
|
||||||
/// on-chain.
|
/// on-chain.
|
||||||
///
|
///
|
||||||
/// This must be done before the associated `Transaction::Cosign` is provided.
|
/// This must be done before the associated `Transaction::Cosign` is provided.
|
||||||
@@ -113,7 +75,7 @@ impl SubstrateBlockPlans {
|
|||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: [u8; 32],
|
||||||
plans: &Vec<[u8; 32]>,
|
plans: &Vec<[u8; 32]>,
|
||||||
) {
|
) {
|
||||||
db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, plans);
|
db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, &plans);
|
||||||
}
|
}
|
||||||
fn take(
|
fn take(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
@@ -128,32 +90,32 @@ struct ScanBlock<'a, TD: Db, TDT: DbTxn, P: P2p> {
|
|||||||
_td: PhantomData<TD>,
|
_td: PhantomData<TD>,
|
||||||
_p2p: PhantomData<P>,
|
_p2p: PhantomData<P>,
|
||||||
tributary_txn: &'a mut TDT,
|
tributary_txn: &'a mut TDT,
|
||||||
set: &'a NewSetInformation,
|
set: ValidatorSet,
|
||||||
validators: &'a [SeraiAddress],
|
validators: &'a [SeraiAddress],
|
||||||
total_weight: u16,
|
total_weight: u64,
|
||||||
validator_weights: &'a HashMap<SeraiAddress, u16>,
|
validator_weights: &'a HashMap<SeraiAddress, u64>,
|
||||||
}
|
}
|
||||||
impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
||||||
fn potentially_start_cosign(&mut self) {
|
fn potentially_start_cosign(&mut self) {
|
||||||
// Don't start a new cosigning instance if we're actively running one
|
// Don't start a new cosigning instance if we're actively running one
|
||||||
if TributaryDb::actively_cosigning(self.tributary_txn, self.set.set).is_some() {
|
if TributaryDb::actively_cosigning(self.tributary_txn, self.set).is_some() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch the latest intended-to-be-cosigned block
|
// Fetch the latest intended-to-be-cosigned block
|
||||||
let Some(latest_substrate_block_to_cosign) =
|
let Some(latest_substrate_block_to_cosign) =
|
||||||
TributaryDb::latest_substrate_block_to_cosign(self.tributary_txn, self.set.set)
|
TributaryDb::latest_substrate_block_to_cosign(self.tributary_txn, self.set)
|
||||||
else {
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
// If it was already cosigned, return
|
// If it was already cosigned, return
|
||||||
if TributaryDb::cosigned(self.tributary_txn, self.set.set, latest_substrate_block_to_cosign) {
|
if TributaryDb::cosigned(self.tributary_txn, self.set, latest_substrate_block_to_cosign) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let intent =
|
let intent =
|
||||||
CosignIntents::take(self.tributary_txn, self.set.set, latest_substrate_block_to_cosign)
|
CosignIntents::take(self.tributary_txn, self.set, latest_substrate_block_to_cosign)
|
||||||
.expect("Transaction::Cosign locally provided but CosignIntents wasn't populated");
|
.expect("Transaction::Cosign locally provided but CosignIntents wasn't populated");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
intent.block_hash, latest_substrate_block_to_cosign,
|
intent.block_hash, latest_substrate_block_to_cosign,
|
||||||
@@ -163,71 +125,20 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
// Mark us as actively cosigning
|
// Mark us as actively cosigning
|
||||||
TributaryDb::start_cosigning(
|
TributaryDb::start_cosigning(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
latest_substrate_block_to_cosign,
|
latest_substrate_block_to_cosign,
|
||||||
intent.block_number,
|
intent.block_number,
|
||||||
);
|
);
|
||||||
// Send the message for the processor to start signing
|
// Send the message for the processor to start signing
|
||||||
TributaryDb::send_message(
|
TributaryDb::send_message(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
|
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
|
||||||
session: self.set.set.session,
|
session: self.set.session,
|
||||||
intent,
|
intent,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn accumulate_dkg_confirmation<D: AsRef<[u8]> + Borshy>(
|
|
||||||
&mut self,
|
|
||||||
block_number: u64,
|
|
||||||
topic: Topic,
|
|
||||||
data: &D,
|
|
||||||
signer: SeraiAddress,
|
|
||||||
) -> Option<(SignId, HashMap<Participant, Vec<u8>>)> {
|
|
||||||
match TributaryDb::accumulate::<D>(
|
|
||||||
self.tributary_txn,
|
|
||||||
self.set.set,
|
|
||||||
self.validators,
|
|
||||||
self.total_weight,
|
|
||||||
block_number,
|
|
||||||
topic,
|
|
||||||
signer,
|
|
||||||
self.validator_weights[&signer],
|
|
||||||
data,
|
|
||||||
) {
|
|
||||||
DataSet::None => None,
|
|
||||||
DataSet::Participating(data_set) => {
|
|
||||||
let id = topic.dkg_confirmation_sign_id(self.set.set).unwrap();
|
|
||||||
|
|
||||||
// This will be used in a MuSig protocol, so the Participant indexes are the validator's
|
|
||||||
// position in the list regardless of their weight
|
|
||||||
let flatten_data_set = |data_set: HashMap<_, D>| {
|
|
||||||
let mut entries = HashMap::with_capacity(usize::from(self.total_weight));
|
|
||||||
for (validator, participation) in data_set {
|
|
||||||
let (index, (_validator, _weight)) = &self
|
|
||||||
.set
|
|
||||||
.validators
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.find(|(_i, (validator_i, _weight))| validator == *validator_i)
|
|
||||||
.unwrap();
|
|
||||||
// The index is zero-indexed yet participants are one-indexed
|
|
||||||
let index = index + 1;
|
|
||||||
|
|
||||||
entries.insert(
|
|
||||||
Participant::new(u16::try_from(index).unwrap()).unwrap(),
|
|
||||||
participation.as_ref().to_vec(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
entries
|
|
||||||
};
|
|
||||||
let data_set = flatten_data_set(data_set);
|
|
||||||
Some((id, data_set))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_application_tx(&mut self, block_number: u64, tx: Transaction) {
|
fn handle_application_tx(&mut self, block_number: u64, tx: Transaction) {
|
||||||
let signer = |signed: Signed| SeraiAddress(signed.signer().to_bytes());
|
let signer = |signed: Signed| SeraiAddress(signed.signer().to_bytes());
|
||||||
|
|
||||||
@@ -236,14 +147,13 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
// TODO: The fact they can publish these TXs makes this a notable spam vector
|
// TODO: The fact they can publish these TXs makes this a notable spam vector
|
||||||
if TributaryDb::is_fatally_slashed(
|
if TributaryDb::is_fatally_slashed(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
SeraiAddress(signer.to_bytes()),
|
SeraiAddress(signer.to_bytes()),
|
||||||
) {
|
) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let topic = tx.topic();
|
|
||||||
match tx {
|
match tx {
|
||||||
// Accumulate this vote and fatally slash the participant if past the threshold
|
// Accumulate this vote and fatally slash the participant if past the threshold
|
||||||
Transaction::RemoveParticipant { participant, signed } => {
|
Transaction::RemoveParticipant { participant, signed } => {
|
||||||
@@ -253,7 +163,7 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
if !self.validators.iter().any(|validator| *validator == participant) {
|
if !self.validators.iter().any(|validator| *validator == participant) {
|
||||||
TributaryDb::fatal_slash(
|
TributaryDb::fatal_slash(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
signer,
|
signer,
|
||||||
"voted to remove non-existent participant",
|
"voted to remove non-existent participant",
|
||||||
);
|
);
|
||||||
@@ -262,23 +172,18 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
|
|
||||||
match TributaryDb::accumulate(
|
match TributaryDb::accumulate(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
self.validators,
|
self.validators,
|
||||||
self.total_weight,
|
self.total_weight,
|
||||||
block_number,
|
block_number,
|
||||||
topic.unwrap(),
|
Topic::RemoveParticipant { participant },
|
||||||
signer,
|
signer,
|
||||||
self.validator_weights[&signer],
|
self.validator_weights[&signer],
|
||||||
&(),
|
&(),
|
||||||
) {
|
) {
|
||||||
DataSet::None => {}
|
DataSet::None => {}
|
||||||
DataSet::Participating(_) => {
|
DataSet::Participating(_) => {
|
||||||
TributaryDb::fatal_slash(
|
TributaryDb::fatal_slash(self.tributary_txn, self.set, participant, "voted to remove");
|
||||||
self.tributary_txn,
|
|
||||||
self.set.set,
|
|
||||||
participant,
|
|
||||||
"voted to remove",
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -287,52 +192,28 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
Transaction::DkgParticipation { participation, signed } => {
|
Transaction::DkgParticipation { participation, signed } => {
|
||||||
TributaryDb::send_message(
|
TributaryDb::send_message(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
messages::key_gen::CoordinatorMessage::Participation {
|
messages::key_gen::CoordinatorMessage::Participation {
|
||||||
session: self.set.set.session,
|
session: self.set.session,
|
||||||
participant: self.set.participant_indexes[&signer(signed)][0],
|
participant: todo!("TODO"),
|
||||||
participation,
|
participation,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Transaction::DkgConfirmationPreprocess { attempt: _, preprocess, signed } => {
|
Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed } => {
|
||||||
let topic = topic.unwrap();
|
// Accumulate the preprocesses into our own FROST attempt manager
|
||||||
let signer = signer(signed);
|
todo!("TODO")
|
||||||
|
|
||||||
let Some((id, data_set)) =
|
|
||||||
self.accumulate_dkg_confirmation(block_number, topic, &preprocess, signer)
|
|
||||||
else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
db::DkgConfirmationMessages::send(
|
|
||||||
self.tributary_txn,
|
|
||||||
self.set.set,
|
|
||||||
&messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set },
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
Transaction::DkgConfirmationShare { attempt: _, share, signed } => {
|
Transaction::DkgConfirmationShare { attempt, share, signed } => {
|
||||||
let topic = topic.unwrap();
|
// Accumulate the shares into our own FROST attempt manager
|
||||||
let signer = signer(signed);
|
todo!("TODO: SetKeysTask")
|
||||||
|
|
||||||
let Some((id, data_set)) =
|
|
||||||
self.accumulate_dkg_confirmation(block_number, topic, &share, signer)
|
|
||||||
else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
db::DkgConfirmationMessages::send(
|
|
||||||
self.tributary_txn,
|
|
||||||
self.set.set,
|
|
||||||
&messages::sign::CoordinatorMessage::Shares { id, shares: data_set },
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::Cosign { substrate_block_hash } => {
|
Transaction::Cosign { substrate_block_hash } => {
|
||||||
// Update the latest intended-to-be-cosigned Substrate block
|
// Update the latest intended-to-be-cosigned Substrate block
|
||||||
TributaryDb::set_latest_substrate_block_to_cosign(
|
TributaryDb::set_latest_substrate_block_to_cosign(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
substrate_block_hash,
|
substrate_block_hash,
|
||||||
);
|
);
|
||||||
// Start a new cosign if we aren't already working on one
|
// Start a new cosign if we aren't already working on one
|
||||||
@@ -345,32 +226,32 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
not-yet-Cosigned cosigns, we flag all cosigned blocks as cosigned. Then, when we choose
|
not-yet-Cosigned cosigns, we flag all cosigned blocks as cosigned. Then, when we choose
|
||||||
the next block to work on, we won't if it's already been cosigned.
|
the next block to work on, we won't if it's already been cosigned.
|
||||||
*/
|
*/
|
||||||
TributaryDb::mark_cosigned(self.tributary_txn, self.set.set, substrate_block_hash);
|
TributaryDb::mark_cosigned(self.tributary_txn, self.set, substrate_block_hash);
|
||||||
|
|
||||||
// If we aren't actively cosigning this block, return
|
// If we aren't actively cosigning this block, return
|
||||||
// This occurs when we have Cosign TXs A, B, C, we received Cosigned for A and start on C,
|
// This occurs when we have Cosign TXs A, B, C, we received Cosigned for A and start on C,
|
||||||
// and then receive Cosigned for B
|
// and then receive Cosigned for B
|
||||||
if TributaryDb::actively_cosigning(self.tributary_txn, self.set.set) !=
|
if TributaryDb::actively_cosigning(self.tributary_txn, self.set) !=
|
||||||
Some(substrate_block_hash)
|
Some(substrate_block_hash)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since this is the block we were cosigning, mark us as having finished cosigning
|
// Since this is the block we were cosigning, mark us as having finished cosigning
|
||||||
TributaryDb::finish_cosigning(self.tributary_txn, self.set.set);
|
TributaryDb::finish_cosigning(self.tributary_txn, self.set);
|
||||||
|
|
||||||
// Start working on the next cosign
|
// Start working on the next cosign
|
||||||
self.potentially_start_cosign();
|
self.potentially_start_cosign();
|
||||||
}
|
}
|
||||||
Transaction::SubstrateBlock { hash } => {
|
Transaction::SubstrateBlock { hash } => {
|
||||||
// Recognize all of the IDs this Substrate block causes to be signed
|
// Whitelist all of the IDs this Substrate block causes to be signed
|
||||||
let plans = SubstrateBlockPlans::take(self.tributary_txn, self.set.set, hash).expect(
|
let plans = SubstrateBlockPlans::take(self.tributary_txn, self.set, hash).expect(
|
||||||
"Transaction::SubstrateBlock locally provided but SubstrateBlockPlans wasn't populated",
|
"Transaction::SubstrateBlock locally provided but SubstrateBlockPlans wasn't populated",
|
||||||
);
|
);
|
||||||
for plan in plans {
|
for plan in plans {
|
||||||
TributaryDb::recognize_topic(
|
TributaryDb::recognize_topic(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
Topic::Sign {
|
Topic::Sign {
|
||||||
id: VariantSignId::Transaction(plan),
|
id: VariantSignId::Transaction(plan),
|
||||||
attempt: 0,
|
attempt: 0,
|
||||||
@@ -380,10 +261,10 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Transaction::Batch { hash } => {
|
Transaction::Batch { hash } => {
|
||||||
// Recognize the signing of this batch
|
// Whitelist the signing of this batch
|
||||||
TributaryDb::recognize_topic(
|
TributaryDb::recognize_topic(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
Topic::Sign {
|
Topic::Sign {
|
||||||
id: VariantSignId::Batch(hash),
|
id: VariantSignId::Batch(hash),
|
||||||
attempt: 0,
|
attempt: 0,
|
||||||
@@ -398,7 +279,7 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
if slash_points.len() != self.validators.len() {
|
if slash_points.len() != self.validators.len() {
|
||||||
TributaryDb::fatal_slash(
|
TributaryDb::fatal_slash(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
signer,
|
signer,
|
||||||
"slash report was for a distinct amount of signers",
|
"slash report was for a distinct amount of signers",
|
||||||
);
|
);
|
||||||
@@ -408,11 +289,11 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
// Accumulate, and if past the threshold, calculate *the* slash report and start signing it
|
// Accumulate, and if past the threshold, calculate *the* slash report and start signing it
|
||||||
match TributaryDb::accumulate(
|
match TributaryDb::accumulate(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
self.validators,
|
self.validators,
|
||||||
self.total_weight,
|
self.total_weight,
|
||||||
block_number,
|
block_number,
|
||||||
topic.unwrap(),
|
Topic::SlashReport,
|
||||||
signer,
|
signer,
|
||||||
self.validator_weights[&signer],
|
self.validator_weights[&signer],
|
||||||
&slash_points,
|
&slash_points,
|
||||||
@@ -426,6 +307,10 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
have a supermajority agree the slash should be fatal. If there isn't a supermajority,
|
have a supermajority agree the slash should be fatal. If there isn't a supermajority,
|
||||||
but the median believe the slash should be fatal, we need to fallback to a large
|
but the median believe the slash should be fatal, we need to fallback to a large
|
||||||
constant.
|
constant.
|
||||||
|
|
||||||
|
Also, TODO, each slash point should probably be considered as
|
||||||
|
`MAX_KEY_SHARES_PER_SET * BLOCK_TIME` seconds of downtime. As this time crosses
|
||||||
|
various thresholds (1 day, 3 days, etc), a multiplier should be attached.
|
||||||
*/
|
*/
|
||||||
let mut median_slash_report = Vec::with_capacity(self.validators.len());
|
let mut median_slash_report = Vec::with_capacity(self.validators.len());
|
||||||
for i in 0 .. self.validators.len() {
|
for i in 0 .. self.validators.len() {
|
||||||
@@ -466,7 +351,7 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
|
|
||||||
// Create the resulting slash report
|
// Create the resulting slash report
|
||||||
let mut slash_report = vec![];
|
let mut slash_report = vec![];
|
||||||
for points in amortized_slash_report {
|
for (validator, points) in self.validators.iter().copied().zip(amortized_slash_report) {
|
||||||
// TODO: Natively store this as a `Slash`
|
// TODO: Natively store this as a `Slash`
|
||||||
if points == u32::MAX {
|
if points == u32::MAX {
|
||||||
slash_report.push(Slash::Fatal);
|
slash_report.push(Slash::Fatal);
|
||||||
@@ -479,7 +364,7 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
// Recognize the topic for signing the slash report
|
// Recognize the topic for signing the slash report
|
||||||
TributaryDb::recognize_topic(
|
TributaryDb::recognize_topic(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
Topic::Sign {
|
Topic::Sign {
|
||||||
id: VariantSignId::SlashReport,
|
id: VariantSignId::SlashReport,
|
||||||
attempt: 0,
|
attempt: 0,
|
||||||
@@ -489,24 +374,24 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
// Send the message for the processor to start signing
|
// Send the message for the processor to start signing
|
||||||
TributaryDb::send_message(
|
TributaryDb::send_message(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
messages::coordinator::CoordinatorMessage::SignSlashReport {
|
messages::coordinator::CoordinatorMessage::SignSlashReport {
|
||||||
session: self.set.set.session,
|
session: self.set.session,
|
||||||
slash_report: slash_report.try_into().unwrap(),
|
report: slash_report,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::Sign { id: _, attempt: _, round, data, signed } => {
|
Transaction::Sign { id, attempt, round, data, signed } => {
|
||||||
let topic = topic.unwrap();
|
let topic = Topic::Sign { id, attempt, round };
|
||||||
let signer = signer(signed);
|
let signer = signer(signed);
|
||||||
|
|
||||||
if data.len() != usize::from(self.validator_weights[&signer]) {
|
if u64::try_from(data.len()).unwrap() != self.validator_weights[&signer] {
|
||||||
TributaryDb::fatal_slash(
|
TributaryDb::fatal_slash(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
signer,
|
signer,
|
||||||
"signer signed with a distinct amount of key shares than they had key shares",
|
"signer signed with a distinct amount of key shares than they had key shares",
|
||||||
);
|
);
|
||||||
@@ -515,7 +400,7 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
|
|
||||||
match TributaryDb::accumulate(
|
match TributaryDb::accumulate(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
self.validators,
|
self.validators,
|
||||||
self.total_weight,
|
self.total_weight,
|
||||||
block_number,
|
block_number,
|
||||||
@@ -526,22 +411,12 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
) {
|
) {
|
||||||
DataSet::None => {}
|
DataSet::None => {}
|
||||||
DataSet::Participating(data_set) => {
|
DataSet::Participating(data_set) => {
|
||||||
let id = topic.sign_id(self.set.set).expect("Topic::Sign didn't have SignId");
|
let id = topic.sign_id(self.set).expect("Topic::Sign didn't have SignId");
|
||||||
let flatten_data_set = |data_set: HashMap<_, Vec<_>>| {
|
let flatten_data_set = |data_set| todo!("TODO");
|
||||||
let mut entries = HashMap::with_capacity(usize::from(self.total_weight));
|
|
||||||
for (validator, shares) in data_set {
|
|
||||||
let indexes = &self.set.participant_indexes[&validator];
|
|
||||||
assert_eq!(indexes.len(), shares.len());
|
|
||||||
for (index, share) in indexes.iter().zip(shares) {
|
|
||||||
entries.insert(*index, share);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
entries
|
|
||||||
};
|
|
||||||
let data_set = flatten_data_set(data_set);
|
let data_set = flatten_data_set(data_set);
|
||||||
TributaryDb::send_message(
|
TributaryDb::send_message(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
match round {
|
match round {
|
||||||
SigningProtocolRound::Preprocess => {
|
SigningProtocolRound::Preprocess => {
|
||||||
messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set }
|
messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set }
|
||||||
@@ -552,13 +427,13 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_block(mut self, block_number: u64, block: Block<Transaction>) {
|
fn handle_block(mut self, block_number: u64, block: Block<Transaction>) {
|
||||||
TributaryDb::start_of_block(self.tributary_txn, self.set.set, block_number);
|
TributaryDb::start_of_block(self.tributary_txn, self.set, block_number);
|
||||||
|
|
||||||
for tx in block.transactions {
|
for tx in block.transactions {
|
||||||
match tx {
|
match tx {
|
||||||
@@ -585,7 +460,7 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
// errors, mark the node as fatally slashed
|
// errors, mark the node as fatally slashed
|
||||||
TributaryDb::fatal_slash(
|
TributaryDb::fatal_slash(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
SeraiAddress(msgs.0.msg.sender),
|
SeraiAddress(msgs.0.msg.sender),
|
||||||
&format!("invalid tendermint messages: {msgs:?}"),
|
&format!("invalid tendermint messages: {msgs:?}"),
|
||||||
);
|
);
|
||||||
@@ -601,10 +476,10 @@ impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
|||||||
/// The task to scan the Tributary, populating `ProcessorMessages`.
|
/// The task to scan the Tributary, populating `ProcessorMessages`.
|
||||||
pub struct ScanTributaryTask<TD: Db, P: P2p> {
|
pub struct ScanTributaryTask<TD: Db, P: P2p> {
|
||||||
tributary_db: TD,
|
tributary_db: TD,
|
||||||
set: NewSetInformation,
|
set: ValidatorSet,
|
||||||
validators: Vec<SeraiAddress>,
|
validators: Vec<SeraiAddress>,
|
||||||
total_weight: u16,
|
total_weight: u64,
|
||||||
validator_weights: HashMap<SeraiAddress, u16>,
|
validator_weights: HashMap<SeraiAddress, u64>,
|
||||||
tributary: TributaryReader<TD, Transaction>,
|
tributary: TributaryReader<TD, Transaction>,
|
||||||
_p2p: PhantomData<P>,
|
_p2p: PhantomData<P>,
|
||||||
}
|
}
|
||||||
@@ -613,13 +488,15 @@ impl<TD: Db, P: P2p> ScanTributaryTask<TD, P> {
|
|||||||
/// Create a new instance of this task.
|
/// Create a new instance of this task.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
tributary_db: TD,
|
tributary_db: TD,
|
||||||
set: NewSetInformation,
|
new_set: &NewSetInformation,
|
||||||
tributary: TributaryReader<TD, Transaction>,
|
tributary: TributaryReader<TD, Transaction>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut validators = Vec::with_capacity(set.validators.len());
|
let mut validators = Vec::with_capacity(new_set.validators.len());
|
||||||
let mut total_weight = 0;
|
let mut total_weight = 0;
|
||||||
let mut validator_weights = HashMap::with_capacity(set.validators.len());
|
let mut validator_weights = HashMap::with_capacity(new_set.validators.len());
|
||||||
for (validator, weight) in set.validators.iter().copied() {
|
for (validator, weight) in new_set.validators.iter().copied() {
|
||||||
|
let validator = SeraiAddress::from(validator);
|
||||||
|
let weight = u64::from(weight);
|
||||||
validators.push(validator);
|
validators.push(validator);
|
||||||
total_weight += weight;
|
total_weight += weight;
|
||||||
validator_weights.insert(validator, weight);
|
validator_weights.insert(validator, weight);
|
||||||
@@ -627,7 +504,7 @@ impl<TD: Db, P: P2p> ScanTributaryTask<TD, P> {
|
|||||||
|
|
||||||
ScanTributaryTask {
|
ScanTributaryTask {
|
||||||
tributary_db,
|
tributary_db,
|
||||||
set,
|
set: new_set.set,
|
||||||
validators,
|
validators,
|
||||||
total_weight,
|
total_weight,
|
||||||
validator_weights,
|
validator_weights,
|
||||||
@@ -643,7 +520,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
|
|||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let (mut last_block_number, mut last_block_hash) =
|
let (mut last_block_number, mut last_block_hash) =
|
||||||
TributaryDb::last_handled_tributary_block(&self.tributary_db, self.set.set)
|
TributaryDb::last_handled_tributary_block(&self.tributary_db, self.set)
|
||||||
.unwrap_or((0, self.tributary.genesis()));
|
.unwrap_or((0, self.tributary.genesis()));
|
||||||
|
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
@@ -662,7 +539,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
|
|||||||
if !self.tributary.locally_provided_txs_in_block(&block_hash, order) {
|
if !self.tributary.locally_provided_txs_in_block(&block_hash, order) {
|
||||||
return Err(format!(
|
return Err(format!(
|
||||||
"didn't have the provided Transactions on-chain for set (ephemeral error): {:?}",
|
"didn't have the provided Transactions on-chain for set (ephemeral error): {:?}",
|
||||||
self.set.set
|
self.set
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -672,7 +549,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
|
|||||||
_td: PhantomData::<TD>,
|
_td: PhantomData::<TD>,
|
||||||
_p2p: PhantomData::<P>,
|
_p2p: PhantomData::<P>,
|
||||||
tributary_txn: &mut tributary_txn,
|
tributary_txn: &mut tributary_txn,
|
||||||
set: &self.set,
|
set: self.set,
|
||||||
validators: &self.validators,
|
validators: &self.validators,
|
||||||
total_weight: self.total_weight,
|
total_weight: self.total_weight,
|
||||||
validator_weights: &self.validator_weights,
|
validator_weights: &self.validator_weights,
|
||||||
@@ -680,7 +557,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
|
|||||||
.handle_block(block_number, block);
|
.handle_block(block_number, block);
|
||||||
TributaryDb::set_last_handled_tributary_block(
|
TributaryDb::set_last_handled_tributary_block(
|
||||||
&mut tributary_txn,
|
&mut tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
block_number,
|
block_number,
|
||||||
block_hash,
|
block_hash,
|
||||||
);
|
);
|
||||||
@@ -700,6 +577,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
|
|||||||
pub fn slash_report_transaction(getter: &impl Get, set: &NewSetInformation) -> Transaction {
|
pub fn slash_report_transaction(getter: &impl Get, set: &NewSetInformation) -> Transaction {
|
||||||
let mut slash_points = Vec::with_capacity(set.validators.len());
|
let mut slash_points = Vec::with_capacity(set.validators.len());
|
||||||
for (validator, _weight) in set.validators.iter().copied() {
|
for (validator, _weight) in set.validators.iter().copied() {
|
||||||
|
let validator = SeraiAddress::from(validator);
|
||||||
slash_points.push(SlashPoints::get(getter, set.set, validator).unwrap_or(0));
|
slash_points.push(SlashPoints::get(getter, set.set, validator).unwrap_or(0));
|
||||||
}
|
}
|
||||||
Transaction::SlashReport { slash_points, signed: Signed::default() }
|
Transaction::SlashReport { slash_points, signed: Signed::default() }
|
||||||
|
|||||||
@@ -25,8 +25,6 @@ use tributary_sdk::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::db::Topic;
|
|
||||||
|
|
||||||
/// The round this data is for, within a signing protocol.
|
/// The round this data is for, within a signing protocol.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||||
pub enum SigningProtocolRound {
|
pub enum SigningProtocolRound {
|
||||||
@@ -182,7 +180,7 @@ pub enum Transaction {
|
|||||||
///
|
///
|
||||||
/// This is provided after the block has been cosigned.
|
/// This is provided after the block has been cosigned.
|
||||||
///
|
///
|
||||||
/// With the acknowledgement of a Substrate block, we can recognize all the `VariantSignId`s
|
/// With the acknowledgement of a Substrate block, we can whitelist all the `VariantSignId`s
|
||||||
/// resulting from its handling.
|
/// resulting from its handling.
|
||||||
SubstrateBlock {
|
SubstrateBlock {
|
||||||
/// The hash of the Substrate block
|
/// The hash of the Substrate block
|
||||||
@@ -259,7 +257,9 @@ impl TransactionTrait for Transaction {
|
|||||||
|
|
||||||
Transaction::Cosign { .. } => TransactionKind::Provided("Cosign"),
|
Transaction::Cosign { .. } => TransactionKind::Provided("Cosign"),
|
||||||
Transaction::Cosigned { .. } => TransactionKind::Provided("Cosigned"),
|
Transaction::Cosigned { .. } => TransactionKind::Provided("Cosigned"),
|
||||||
|
// TODO: Provide this
|
||||||
Transaction::SubstrateBlock { .. } => TransactionKind::Provided("SubstrateBlock"),
|
Transaction::SubstrateBlock { .. } => TransactionKind::Provided("SubstrateBlock"),
|
||||||
|
// TODO: Provide this
|
||||||
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
||||||
|
|
||||||
Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed(
|
Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed(
|
||||||
@@ -318,36 +318,6 @@ impl TransactionTrait for Transaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Transaction {
|
impl Transaction {
|
||||||
/// The topic in the database for this transaction.
|
|
||||||
pub fn topic(&self) -> Option<Topic> {
|
|
||||||
#[allow(clippy::match_same_arms)] // This doesn't make semantic sense here
|
|
||||||
match self {
|
|
||||||
Transaction::RemoveParticipant { participant, .. } => {
|
|
||||||
Some(Topic::RemoveParticipant { participant: *participant })
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::DkgParticipation { .. } => None,
|
|
||||||
Transaction::DkgConfirmationPreprocess { attempt, .. } => {
|
|
||||||
Some(Topic::DkgConfirmation { attempt: *attempt, round: SigningProtocolRound::Preprocess })
|
|
||||||
}
|
|
||||||
Transaction::DkgConfirmationShare { attempt, .. } => {
|
|
||||||
Some(Topic::DkgConfirmation { attempt: *attempt, round: SigningProtocolRound::Share })
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provided TXs
|
|
||||||
Transaction::Cosign { .. } |
|
|
||||||
Transaction::Cosigned { .. } |
|
|
||||||
Transaction::SubstrateBlock { .. } |
|
|
||||||
Transaction::Batch { .. } => None,
|
|
||||||
|
|
||||||
Transaction::Sign { id, attempt, round, .. } => {
|
|
||||||
Some(Topic::Sign { id: *id, attempt: *attempt, round: *round })
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::SlashReport { .. } => Some(Topic::SlashReport),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sign a transaction.
|
/// Sign a transaction.
|
||||||
///
|
///
|
||||||
/// Panics if signing a transaction whose type isn't `TransactionKind::Signed`.
|
/// Panics if signing a transaction whose type isn't `TransactionKind::Signed`.
|
||||||
|
|||||||
@@ -29,8 +29,8 @@ pub(crate) fn generators<C: EvrfCurve>() -> &'static EvrfGenerators<C> {
|
|||||||
.or_insert_with(|| {
|
.or_insert_with(|| {
|
||||||
// If we haven't prior needed generators for this Ciphersuite, generate new ones
|
// If we haven't prior needed generators for this Ciphersuite, generate new ones
|
||||||
Box::leak(Box::new(EvrfGenerators::<C>::new(
|
Box::leak(Box::new(EvrfGenerators::<C>::new(
|
||||||
(MAX_KEY_SHARES_PER_SET * 2 / 3) + 1,
|
((MAX_KEY_SHARES_PER_SET * 2 / 3) + 1).try_into().unwrap(),
|
||||||
MAX_KEY_SHARES_PER_SET,
|
MAX_KEY_SHARES_PER_SET.try_into().unwrap(),
|
||||||
)))
|
)))
|
||||||
})
|
})
|
||||||
.downcast_ref()
|
.downcast_ref()
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use borsh::{BorshSerialize, BorshDeserialize};
|
|||||||
use dkg::Participant;
|
use dkg::Participant;
|
||||||
|
|
||||||
use serai_primitives::BlockHash;
|
use serai_primitives::BlockHash;
|
||||||
use validator_sets_primitives::{Session, KeyPair, SlashReport};
|
use validator_sets_primitives::{Session, KeyPair, Slash};
|
||||||
use coins_primitives::OutInstructionWithBalance;
|
use coins_primitives::OutInstructionWithBalance;
|
||||||
use in_instructions_primitives::SignedBatch;
|
use in_instructions_primitives::SignedBatch;
|
||||||
|
|
||||||
@@ -100,9 +100,7 @@ pub mod sign {
|
|||||||
Self::Cosign(cosign) => {
|
Self::Cosign(cosign) => {
|
||||||
f.debug_struct("VariantSignId::Cosign").field("0", &cosign).finish()
|
f.debug_struct("VariantSignId::Cosign").field("0", &cosign).finish()
|
||||||
}
|
}
|
||||||
Self::Batch(batch) => {
|
Self::Batch(batch) => f.debug_struct("VariantSignId::Batch").field("0", &batch).finish(),
|
||||||
f.debug_struct("VariantSignId::Batch").field("0", &hex::encode(batch)).finish()
|
|
||||||
}
|
|
||||||
Self::SlashReport => f.debug_struct("VariantSignId::SlashReport").finish(),
|
Self::SlashReport => f.debug_struct("VariantSignId::SlashReport").finish(),
|
||||||
Self::Transaction(tx) => {
|
Self::Transaction(tx) => {
|
||||||
f.debug_struct("VariantSignId::Transaction").field("0", &hex::encode(tx)).finish()
|
f.debug_struct("VariantSignId::Transaction").field("0", &hex::encode(tx)).finish()
|
||||||
@@ -170,7 +168,7 @@ pub mod coordinator {
|
|||||||
/// Sign the slash report for this session.
|
/// Sign the slash report for this session.
|
||||||
///
|
///
|
||||||
/// This is sent by the Coordinator's Tributary scanner.
|
/// This is sent by the Coordinator's Tributary scanner.
|
||||||
SignSlashReport { session: Session, slash_report: SlashReport },
|
SignSlashReport { session: Session, report: Vec<Slash> },
|
||||||
}
|
}
|
||||||
|
|
||||||
// This set of messages is sent entirely and solely by serai-processor-bin's implementation of
|
// This set of messages is sent entirely and solely by serai-processor-bin's implementation of
|
||||||
@@ -180,7 +178,7 @@ pub mod coordinator {
|
|||||||
pub enum ProcessorMessage {
|
pub enum ProcessorMessage {
|
||||||
CosignedBlock { cosign: SignedCosign },
|
CosignedBlock { cosign: SignedCosign },
|
||||||
SignedBatch { batch: SignedBatch },
|
SignedBatch { batch: SignedBatch },
|
||||||
SignedSlashReport { session: Session, slash_report: SlashReport, signature: [u8; 64] },
|
SignedSlashReport { session: Session, signature: Vec<u8> },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ pub enum Call {
|
|||||||
},
|
},
|
||||||
report_slashes {
|
report_slashes {
|
||||||
network: NetworkId,
|
network: NetworkId,
|
||||||
slashes: SlashReport,
|
slashes: BoundedVec<(SeraiAddress, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 / 3 }>>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
},
|
},
|
||||||
allocate {
|
allocate {
|
||||||
|
|||||||
@@ -5,10 +5,10 @@ use sp_runtime::BoundedVec;
|
|||||||
|
|
||||||
use serai_abi::primitives::Amount;
|
use serai_abi::primitives::Amount;
|
||||||
pub use serai_abi::validator_sets::primitives;
|
pub use serai_abi::validator_sets::primitives;
|
||||||
use primitives::{MAX_KEY_LEN, Session, ValidatorSet, KeyPair, SlashReport};
|
use primitives::{MAX_KEY_LEN, Session, ValidatorSet, KeyPair};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
primitives::{EmbeddedEllipticCurve, NetworkId},
|
primitives::{EmbeddedEllipticCurve, NetworkId, SeraiAddress},
|
||||||
Transaction, Serai, TemporalSerai, SeraiError,
|
Transaction, Serai, TemporalSerai, SeraiError,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -238,7 +238,12 @@ impl<'a> SeraiValidatorSets<'a> {
|
|||||||
|
|
||||||
pub fn report_slashes(
|
pub fn report_slashes(
|
||||||
network: NetworkId,
|
network: NetworkId,
|
||||||
slashes: SlashReport,
|
// TODO: This bounds a maximum length but takes more space than just publishing all the u32s
|
||||||
|
// (50 * (32 + 4)) > (150 * 4)
|
||||||
|
slashes: sp_runtime::BoundedVec<
|
||||||
|
(SeraiAddress, u32),
|
||||||
|
sp_core::ConstU32<{ primitives::MAX_KEY_SHARES_PER_SET_U32 / 3 }>,
|
||||||
|
>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) -> Transaction {
|
) -> Transaction {
|
||||||
Serai::unsigned(serai_abi::Call::ValidatorSets(
|
Serai::unsigned(serai_abi::Call::ValidatorSets(
|
||||||
|
|||||||
@@ -111,7 +111,13 @@ impl From<Call> for RuntimeCall {
|
|||||||
serai_abi::validator_sets::Call::report_slashes { network, slashes, signature } => {
|
serai_abi::validator_sets::Call::report_slashes { network, slashes, signature } => {
|
||||||
RuntimeCall::ValidatorSets(validator_sets::Call::report_slashes {
|
RuntimeCall::ValidatorSets(validator_sets::Call::report_slashes {
|
||||||
network,
|
network,
|
||||||
slashes,
|
slashes: <_>::try_from(
|
||||||
|
slashes
|
||||||
|
.into_iter()
|
||||||
|
.map(|(addr, slash)| (PublicKey::from(addr), slash))
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
signature,
|
signature,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -295,7 +301,17 @@ impl TryInto<Call> for RuntimeCall {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
validator_sets::Call::report_slashes { network, slashes, signature } => {
|
validator_sets::Call::report_slashes { network, slashes, signature } => {
|
||||||
serai_abi::validator_sets::Call::report_slashes { network, slashes, signature }
|
serai_abi::validator_sets::Call::report_slashes {
|
||||||
|
network,
|
||||||
|
slashes: <_>::try_from(
|
||||||
|
slashes
|
||||||
|
.into_iter()
|
||||||
|
.map(|(addr, slash)| (SeraiAddress::from(addr), slash))
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
|
signature,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
validator_sets::Call::allocate { network, amount } => {
|
validator_sets::Call::allocate { network, amount } => {
|
||||||
serai_abi::validator_sets::Call::allocate { network, amount }
|
serai_abi::validator_sets::Call::allocate { network, amount }
|
||||||
|
|||||||
@@ -1010,7 +1010,7 @@ pub mod pallet {
|
|||||||
pub fn report_slashes(
|
pub fn report_slashes(
|
||||||
origin: OriginFor<T>,
|
origin: OriginFor<T>,
|
||||||
network: NetworkId,
|
network: NetworkId,
|
||||||
slashes: SlashReport,
|
slashes: BoundedVec<(Public, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 / 3 }>>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) -> DispatchResult {
|
) -> DispatchResult {
|
||||||
ensure_none(origin)?;
|
ensure_none(origin)?;
|
||||||
|
|||||||
@@ -210,30 +210,6 @@ impl Slash {
|
|||||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||||
pub struct SlashReport(pub BoundedVec<Slash, ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 }>>);
|
pub struct SlashReport(pub BoundedVec<Slash, ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 }>>);
|
||||||
|
|
||||||
#[cfg(feature = "borsh")]
|
|
||||||
impl BorshSerialize for SlashReport {
|
|
||||||
fn serialize<W: borsh::io::Write>(&self, writer: &mut W) -> borsh::io::Result<()> {
|
|
||||||
BorshSerialize::serialize(self.0.as_slice(), writer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[cfg(feature = "borsh")]
|
|
||||||
impl BorshDeserialize for SlashReport {
|
|
||||||
fn deserialize_reader<R: borsh::io::Read>(reader: &mut R) -> borsh::io::Result<Self> {
|
|
||||||
let slashes = Vec::<Slash>::deserialize_reader(reader)?;
|
|
||||||
slashes
|
|
||||||
.try_into()
|
|
||||||
.map(Self)
|
|
||||||
.map_err(|_| borsh::io::Error::other("length of slash report exceeds max validators"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<Vec<Slash>> for SlashReport {
|
|
||||||
type Error = &'static str;
|
|
||||||
fn try_from(slashes: Vec<Slash>) -> Result<SlashReport, &'static str> {
|
|
||||||
slashes.try_into().map(Self).map_err(|_| "length of slash report exceeds max validators")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is assumed binding to the ValidatorSet via the key signed with
|
// This is assumed binding to the ValidatorSet via the key signed with
|
||||||
pub fn report_slashes_message(slashes: &SlashReport) -> Vec<u8> {
|
pub fn report_slashes_message(slashes: &SlashReport) -> Vec<u8> {
|
||||||
(b"ValidatorSets-report_slashes", slashes).encode()
|
(b"ValidatorSets-report_slashes", slashes).encode()
|
||||||
|
|||||||
Reference in New Issue
Block a user