mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 04:09:23 +00:00
Reattempts (#483)
* Schedule re-attempts and add a (not filled out) match statement to actually execute them A comment explains the methodology. To copy it here: """ This is because we *always* re-attempt any protocol which had participation. That doesn't mean we *should* re-attempt this protocol. The alternatives were: 1) Note on-chain we completed a protocol, halting re-attempts upon 34%. 2) Vote on-chain to re-attempt a protocol. This schema doesn't have any additional messages upon the success case (whereas alternative #1 does) and doesn't have overhead (as alternative #2 does, sending votes and then preprocesses. This only sends preprocesses). """ Any signing protocol which reaches sufficient participation will be re-attempted until it no longer does. * Have the Substrate scanner track DKG removals/completions for the Tributary code * Don't keep trying to publish a participant removal if we've already set keys * Pad out the re-attempt match a bit more * Have CosignEvaluator reload from the DB * Correctly schedule cosign re-attempts * Actuall spawn new DKG removal attempts * Use u32 for Batch ID in SubstrateSignableId, finish Batch re-attempt routing The batch ID was an opaque [u8; 5] which also included the network, yet that's redundant and unhelpful. * Clarify a pair of TODOs in the coordinator * Remove old TODO * Final comment cleanup * Correct usage of TARGET_BLOCK_TIME in reattempt scheduler It's in ms and I assumed it was in s. * Have coordinator tests drop BatchReattempts which aren't relevant yet may exist * Bug fix and pointless oddity removal We scheduled a re-attempt upon receiving 2/3rds of preprocesses and upon receiving 2/3rds of shares, so any signing protocol could cause two re-attempts (not one more). The coordinator tests randomly generated the Batch ID since it was prior an opaque byte array. While that didn't break the test, it was pointless and did make the already-succeeded check before re-attempting impossible to hit. * Add log statements, correct dead-lock in coordinator tests * Increase pessimistic timeout on recv_message to compensate for tighter best-case timeouts * Further bump timeout by a minute AFAICT, GH failed by just a few seconds. This also is worst-case in a single instance, making it fine to be decently long. * Further further bump timeout due to lack of distinct error
This commit is contained in:
@@ -269,9 +269,22 @@ impl Processor {
|
||||
assert_eq!(msg.id, *next_recv_id);
|
||||
|
||||
let msg_msg = borsh::from_slice(&msg.msg).unwrap();
|
||||
if !is_cosign_message(&msg_msg) {
|
||||
// Remove any BatchReattempts clogging the pipe
|
||||
// TODO: Set up a wrapper around serai-client so we aren't throwing this away yet
|
||||
// leave it for the tests
|
||||
if matches!(
|
||||
msg_msg,
|
||||
messages::CoordinatorMessage::Coordinator(
|
||||
messages::coordinator::CoordinatorMessage::BatchReattempt { .. }
|
||||
)
|
||||
) {
|
||||
queue.ack(Service::Coordinator, msg.id).await;
|
||||
*next_recv_id += 1;
|
||||
continue;
|
||||
}
|
||||
if !is_cosign_message(&msg_msg) {
|
||||
continue;
|
||||
};
|
||||
queue.ack(Service::Coordinator, msg.id).await;
|
||||
*next_recv_id += 1;
|
||||
msg_msg
|
||||
@@ -393,17 +406,13 @@ impl Processor {
|
||||
*next_send_id += 1;
|
||||
}
|
||||
|
||||
/// Receive a message from the coordinator as a processor.
|
||||
pub async fn recv_message(&mut self) -> CoordinatorMessage {
|
||||
async fn recv_message_inner(&mut self) -> CoordinatorMessage {
|
||||
loop {
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
let mut queue_lock = self.queue.lock().await;
|
||||
let (_, next_recv_id, queue) = &mut *queue_lock;
|
||||
// Set a timeout of an entire 6 minutes as cosigning may be delayed by up to 5 minutes
|
||||
let msg = tokio::time::timeout(Duration::from_secs(6 * 60), queue.next(Service::Coordinator))
|
||||
.await
|
||||
.unwrap();
|
||||
let msg = queue.next(Service::Coordinator).await;
|
||||
assert_eq!(msg.from, Service::Coordinator);
|
||||
assert_eq!(msg.id, *next_recv_id);
|
||||
|
||||
@@ -419,6 +428,13 @@ impl Processor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Receive a message from the coordinator as a processor.
|
||||
pub async fn recv_message(&mut self) -> CoordinatorMessage {
|
||||
// Set a timeout of 15 minutes to allow effectively any protocol to occur without a fear of
|
||||
// an arbitrary timeout cutting it short
|
||||
tokio::time::timeout(Duration::from_secs(15 * 60), self.recv_message_inner()).await.unwrap()
|
||||
}
|
||||
|
||||
pub async fn set_substrate_key(
|
||||
&mut self,
|
||||
substrate_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
|
||||
@@ -38,9 +38,7 @@ pub async fn batch(
|
||||
substrate_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
batch: Batch,
|
||||
) -> u64 {
|
||||
let mut id = [0; 5];
|
||||
OsRng.fill_bytes(&mut id);
|
||||
let id = SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt: 0 };
|
||||
let id = SubstrateSignId { session, id: SubstrateSignableId::Batch(batch.id), attempt: 0 };
|
||||
|
||||
for processor in processors.iter_mut() {
|
||||
processor
|
||||
@@ -222,8 +220,19 @@ pub async fn batch(
|
||||
// Verify the coordinator sends SubstrateBlock to all processors
|
||||
let last_block = serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap();
|
||||
for processor in processors {
|
||||
// Handle a potential re-attempt message in the pipeline
|
||||
let mut received = processor.recv_message().await;
|
||||
if matches!(
|
||||
received,
|
||||
messages::CoordinatorMessage::Coordinator(
|
||||
messages::coordinator::CoordinatorMessage::BatchReattempt { .. }
|
||||
)
|
||||
) {
|
||||
received = processor.recv_message().await
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
processor.recv_message().await,
|
||||
received,
|
||||
messages::CoordinatorMessage::Substrate(
|
||||
messages::substrate::CoordinatorMessage::SubstrateBlock {
|
||||
context: SubstrateContext {
|
||||
|
||||
@@ -46,8 +46,8 @@ pub(crate) fn new_test() -> (Vec<(Handles, <Ristretto as Ciphersuite>::F)>, Dock
|
||||
// Use an RPC to enaluate if a condition was met, with the following time being a timeout
|
||||
// https://github.com/serai-dex/serai/issues/340
|
||||
pub(crate) async fn wait_for_tributary() {
|
||||
tokio::time::sleep(Duration::from_secs(20)).await;
|
||||
tokio::time::sleep(Duration::from_secs(15)).await;
|
||||
if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
|
||||
tokio::time::sleep(Duration::from_secs(40)).await;
|
||||
tokio::time::sleep(Duration::from_secs(6)).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ use dkg::{Participant, tests::clone_without};
|
||||
|
||||
use messages::{coordinator::*, SubstrateContext};
|
||||
|
||||
use scale::Encode;
|
||||
use serai_client::{
|
||||
primitives::{
|
||||
BlockHash, Amount, Balance, crypto::RuntimePublic, PublicKey, SeraiAddress, NetworkId,
|
||||
@@ -28,11 +27,7 @@ pub(crate) async fn recv_batch_preprocesses(
|
||||
batch: &Batch,
|
||||
attempt: u32,
|
||||
) -> (SubstrateSignId, HashMap<Participant, [u8; 64]>) {
|
||||
let id = SubstrateSignId {
|
||||
session,
|
||||
id: SubstrateSignableId::Batch((batch.network, batch.id).encode().try_into().unwrap()),
|
||||
attempt,
|
||||
};
|
||||
let id = SubstrateSignId { session, id: SubstrateSignableId::Batch(batch.id), attempt };
|
||||
|
||||
let mut block = None;
|
||||
let mut preprocesses = HashMap::new();
|
||||
|
||||
Reference in New Issue
Block a user