mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 20:29:23 +00:00
Reattempts (#483)
* Schedule re-attempts and add a (not filled out) match statement to actually execute them A comment explains the methodology. To copy it here: """ This is because we *always* re-attempt any protocol which had participation. That doesn't mean we *should* re-attempt this protocol. The alternatives were: 1) Note on-chain we completed a protocol, halting re-attempts upon 34%. 2) Vote on-chain to re-attempt a protocol. This schema doesn't have any additional messages upon the success case (whereas alternative #1 does) and doesn't have overhead (as alternative #2 does, sending votes and then preprocesses. This only sends preprocesses). """ Any signing protocol which reaches sufficient participation will be re-attempted until it no longer does. * Have the Substrate scanner track DKG removals/completions for the Tributary code * Don't keep trying to publish a participant removal if we've already set keys * Pad out the re-attempt match a bit more * Have CosignEvaluator reload from the DB * Correctly schedule cosign re-attempts * Actuall spawn new DKG removal attempts * Use u32 for Batch ID in SubstrateSignableId, finish Batch re-attempt routing The batch ID was an opaque [u8; 5] which also included the network, yet that's redundant and unhelpful. * Clarify a pair of TODOs in the coordinator * Remove old TODO * Final comment cleanup * Correct usage of TARGET_BLOCK_TIME in reattempt scheduler It's in ms and I assumed it was in s. * Have coordinator tests drop BatchReattempts which aren't relevant yet may exist * Bug fix and pointless oddity removal We scheduled a re-attempt upon receiving 2/3rds of preprocesses and upon receiving 2/3rds of shares, so any signing protocol could cause two re-attempts (not one more). The coordinator tests randomly generated the Batch ID since it was prior an opaque byte array. While that didn't break the test, it was pointless and did make the already-succeeded check before re-attempting impossible to hit. * Add log statements, correct dead-lock in coordinator tests * Increase pessimistic timeout on recv_message to compensate for tighter best-case timeouts * Further bump timeout by a minute AFAICT, GH failed by just a few seconds. This also is worst-case in a single instance, making it fine to be decently long. * Further further bump timeout due to lack of distinct error
This commit is contained in:
@@ -12,7 +12,7 @@ use serai_client::{
|
||||
SeraiError, Block, Serai, TemporalSerai,
|
||||
primitives::{BlockHash, NetworkId},
|
||||
validator_sets::{
|
||||
primitives::{ValidatorSet, KeyPair, amortize_excess_key_shares},
|
||||
primitives::{ValidatorSet, amortize_excess_key_shares},
|
||||
ValidatorSetsEvent,
|
||||
},
|
||||
in_instructions::InInstructionsEvent,
|
||||
@@ -25,7 +25,11 @@ use processor_messages::SubstrateContext;
|
||||
|
||||
use tokio::{sync::mpsc, time::sleep};
|
||||
|
||||
use crate::{Db, processors::Processors, tributary::TributarySpec};
|
||||
use crate::{
|
||||
Db,
|
||||
processors::Processors,
|
||||
tributary::{TributarySpec, SeraiDkgRemoval, SeraiDkgCompleted},
|
||||
};
|
||||
|
||||
mod db;
|
||||
pub use db::*;
|
||||
@@ -114,37 +118,6 @@ async fn handle_new_set<D: Db>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_key_gen<Pro: Processors>(
|
||||
processors: &Pro,
|
||||
serai: &Serai,
|
||||
block: &Block,
|
||||
set: ValidatorSet,
|
||||
key_pair: KeyPair,
|
||||
) -> Result<(), SeraiError> {
|
||||
processors
|
||||
.send(
|
||||
set.network,
|
||||
processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair {
|
||||
context: SubstrateContext {
|
||||
serai_time: block.time().unwrap() / 1000,
|
||||
network_latest_finalized_block: serai
|
||||
.as_of(block.hash())
|
||||
.in_instructions()
|
||||
.latest_block_for_network(set.network)
|
||||
.await?
|
||||
// The processor treats this as a magic value which will cause it to find a network
|
||||
// block which has a time greater than or equal to the Serai time
|
||||
.unwrap_or(BlockHash([0; 32])),
|
||||
},
|
||||
session: set.session,
|
||||
key_pair,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_batch_and_burns<Pro: Processors>(
|
||||
txn: &mut impl DbTxn,
|
||||
processors: &Pro,
|
||||
@@ -249,6 +222,19 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||
// Define an indexed event ID.
|
||||
let mut event_id = 0;
|
||||
|
||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||
let mut txn = db.txn();
|
||||
for removal in serai.as_of(hash).validator_sets().participant_removed_events().await? {
|
||||
let ValidatorSetsEvent::ParticipantRemoved { set, removed } = removal else {
|
||||
panic!("ParticipantRemoved event wasn't ParticipantRemoved: {removal:?}");
|
||||
};
|
||||
SeraiDkgRemoval::set(&mut txn, set, removed.0, &());
|
||||
}
|
||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||
txn.commit();
|
||||
}
|
||||
event_id += 1;
|
||||
|
||||
// If a new validator set was activated, create tributary/inform processor to do a DKG
|
||||
for new_set in serai.as_of(hash).validator_sets().new_set_events().await? {
|
||||
// Individually mark each event as handled so on reboot, we minimize duplicates
|
||||
@@ -279,12 +265,31 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||
for key_gen in serai.as_of(hash).validator_sets().key_gen_events().await? {
|
||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||
log::info!("found fresh key gen event {:?}", key_gen);
|
||||
if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen {
|
||||
handle_key_gen(processors, serai, &block, set, key_pair).await?;
|
||||
} else {
|
||||
let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen else {
|
||||
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
|
||||
}
|
||||
};
|
||||
processors
|
||||
.send(
|
||||
set.network,
|
||||
processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair {
|
||||
context: SubstrateContext {
|
||||
serai_time: block.time().unwrap() / 1000,
|
||||
network_latest_finalized_block: serai
|
||||
.as_of(block.hash())
|
||||
.in_instructions()
|
||||
.latest_block_for_network(set.network)
|
||||
.await?
|
||||
// The processor treats this as a magic value which will cause it to find a network
|
||||
// block which has a time greater than or equal to the Serai time
|
||||
.unwrap_or(BlockHash([0; 32])),
|
||||
},
|
||||
session: set.session,
|
||||
key_pair,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
let mut txn = db.txn();
|
||||
SeraiDkgCompleted::set(&mut txn, set, &());
|
||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user