Simply Coordinator/Processors::send by accepting impl Into *Message

This commit is contained in:
Luke Parker
2023-09-29 04:19:59 -04:00
parent 0eff3d9453
commit bd5491dfd5
7 changed files with 69 additions and 89 deletions

View File

@@ -88,19 +88,17 @@ async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
processors
.send(
set.network,
processor_messages::CoordinatorMessage::KeyGen(
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
id: processor_messages::key_gen::KeyGenId { set, attempt: 0 },
params: frost::ThresholdParams::new(
spec.t(),
spec.n(),
spec
.i(Ristretto::generator() * key.deref())
.expect("adding a tributary for a set we aren't in set for"),
)
.unwrap(),
},
),
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
id: processor_messages::key_gen::KeyGenId { set, attempt: 0 },
params: frost::ThresholdParams::new(
spec.t(),
spec.n(),
spec
.i(Ristretto::generator() * key.deref())
.expect("adding a tributary for a set we aren't in set for"),
)
.unwrap(),
},
)
.await;
@@ -625,10 +623,8 @@ async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
// Re-define batch
// We can't drop it, yet it shouldn't be accidentally used in the following block
#[allow(clippy::let_unit_value)]
#[allow(clippy::let_unit_value, unused_variables)]
let batch = ();
#[allow(clippy::let_unit_value)]
let _ = batch;
// Verify all `Batch`s which we've already indexed from Substrate
// This won't be complete, as it only runs when a `Batch` message is received, which

View File

@@ -14,14 +14,15 @@ pub struct Message {
#[async_trait::async_trait]
pub trait Processors: 'static + Send + Sync + Clone {
async fn send(&self, network: NetworkId, msg: CoordinatorMessage);
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>);
async fn recv(&mut self, network: NetworkId) -> Message;
async fn ack(&mut self, msg: Message);
}
#[async_trait::async_trait]
impl Processors for Arc<MessageQueue> {
async fn send(&self, network: NetworkId, msg: CoordinatorMessage) {
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>) {
let msg: CoordinatorMessage = msg.into();
let metadata =
Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() };
let msg = serde_json::to_string(&msg).unwrap();

View File

@@ -18,7 +18,7 @@ use serai_client::{
use serai_db::DbTxn;
use processor_messages::{SubstrateContext, CoordinatorMessage};
use processor_messages::SubstrateContext;
use tokio::time::sleep;
@@ -102,21 +102,19 @@ async fn handle_key_gen<D: Db, Pro: Processors>(
processors
.send(
set.network,
CoordinatorMessage::Substrate(
processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair {
context: SubstrateContext {
serai_time: block.time().unwrap() / 1000,
network_latest_finalized_block: serai
.get_latest_block_for_network(block.hash(), set.network)
.await?
// The processor treats this as a magic value which will cause it to find a network
// block which has a time greater than or equal to the Serai time
.unwrap_or(BlockHash([0; 32])),
},
set,
key_pair,
processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair {
context: SubstrateContext {
serai_time: block.time().unwrap() / 1000,
network_latest_finalized_block: serai
.get_latest_block_for_network(block.hash(), set.network)
.await?
// The processor treats this as a magic value which will cause it to find a network
// block which has a time greater than or equal to the Serai time
.unwrap_or(BlockHash([0; 32])),
},
),
set,
key_pair,
},
)
.await;
@@ -197,18 +195,16 @@ async fn handle_batch_and_burns<D: Db, Pro: Processors>(
processors
.send(
network,
CoordinatorMessage::Substrate(
processor_messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext {
serai_time: block.time().unwrap() / 1000,
network_latest_finalized_block,
},
network,
block: block.number(),
burns: burns.remove(&network).unwrap(),
batches: batches.remove(&network).unwrap(),
processor_messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext {
serai_time: block.time().unwrap() / 1000,
network_latest_finalized_block,
},
),
network,
block: block.number(),
burns: burns.remove(&network).unwrap(),
batches: batches.remove(&network).unwrap(),
},
)
.await;
}

View File

@@ -30,10 +30,10 @@ impl MemProcessors {
#[async_trait::async_trait]
impl Processors for MemProcessors {
async fn send(&self, network: NetworkId, msg: CoordinatorMessage) {
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>) {
let mut processors = self.0.write().await;
let processor = processors.entry(network).or_insert_with(VecDeque::new);
processor.push_back(msg);
processor.push_back(msg.into());
}
async fn recv(&mut self, _: NetworkId) -> Message {
todo!()

View File

@@ -25,8 +25,8 @@ use serai_client::{
use tributary::Signed;
use processor_messages::{
CoordinatorMessage, coordinator,
key_gen::{self, KeyGenId},
coordinator,
sign::{self, SignId},
};
@@ -309,10 +309,10 @@ pub(crate) async fn handle_application_tx<
processors
.send(
spec.set().network,
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { set: spec.set(), attempt },
commitments,
}),
},
)
.await;
}
@@ -365,10 +365,10 @@ pub(crate) async fn handle_application_tx<
processors
.send(
spec.set().network,
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
key_gen::CoordinatorMessage::Shares {
id: KeyGenId { set: spec.set(), attempt },
shares,
}),
},
)
.await;
}
@@ -458,10 +458,10 @@ pub(crate) async fn handle_application_tx<
processors
.send(
spec.set().network,
CoordinatorMessage::Coordinator(coordinator::CoordinatorMessage::BatchPreprocesses {
coordinator::CoordinatorMessage::BatchPreprocesses {
id: SignId { key, id: data.plan, attempt: data.attempt },
preprocesses,
}),
},
)
.await;
}
@@ -485,13 +485,13 @@ pub(crate) async fn handle_application_tx<
processors
.send(
spec.set().network,
CoordinatorMessage::Coordinator(coordinator::CoordinatorMessage::BatchShares {
coordinator::CoordinatorMessage::BatchShares {
id: SignId { key, id: data.plan, attempt: data.attempt },
shares: shares
.into_iter()
.map(|(validator, share)| (validator, share.try_into().unwrap()))
.collect(),
}),
},
)
.await;
}
@@ -517,7 +517,7 @@ pub(crate) async fn handle_application_tx<
processors
.send(
spec.set().network,
CoordinatorMessage::Sign(sign::CoordinatorMessage::Preprocesses {
sign::CoordinatorMessage::Preprocesses {
id: SignId {
key: key_pair
.expect("completed SignPreprocess despite not setting the key pair")
@@ -527,7 +527,7 @@ pub(crate) async fn handle_application_tx<
attempt: data.attempt,
},
preprocesses,
}),
},
)
.await;
}
@@ -551,7 +551,7 @@ pub(crate) async fn handle_application_tx<
processors
.send(
spec.set().network,
CoordinatorMessage::Sign(sign::CoordinatorMessage::Shares {
sign::CoordinatorMessage::Shares {
id: SignId {
key: key_pair
.expect("completed SignShares despite not setting the key pair")
@@ -561,7 +561,7 @@ pub(crate) async fn handle_application_tx<
attempt: data.attempt,
},
shares,
}),
},
)
.await;
}
@@ -581,11 +581,7 @@ pub(crate) async fn handle_application_tx<
processors
.send(
spec.set().network,
CoordinatorMessage::Sign(sign::CoordinatorMessage::Completed {
key: key_pair.1.to_vec(),
id: plan,
tx: tx_hash,
}),
sign::CoordinatorMessage::Completed { key: key_pair.1.to_vec(), id: plan, tx: tx_hash },
)
.await;
}