Add a processor API to the coordinator

This commit is contained in:
Luke Parker
2023-04-17 02:10:33 -04:00
parent 595cd6d404
commit 92a868e574
3 changed files with 139 additions and 69 deletions

View File

@@ -16,21 +16,24 @@ use tokio::time::sleep;
mod db; mod db;
pub use db::*; pub use db::*;
mod transaction; pub mod tributary;
pub use transaction::Transaction as TributaryTransaction;
mod p2p; mod p2p;
pub use p2p::*; pub use p2p::*;
pub mod processor;
use processor::Processor;
mod substrate; mod substrate;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
async fn run<D: Db, P: P2p>( async fn run<D: Db, Pro: Processor, P: P2p>(
db: D, db: D,
key: Zeroizing<<Ristretto as Ciphersuite>::F>, key: Zeroizing<<Ristretto as Ciphersuite>::F>,
p2p: P, p2p: P,
mut processor: Pro,
serai: Serai, serai: Serai,
) { ) {
let mut db = MainDb::new(db); let mut db = MainDb::new(db);
@@ -39,8 +42,15 @@ async fn run<D: Db, P: P2p>(
tokio::spawn(async move { tokio::spawn(async move {
loop { loop {
match substrate::handle_new_blocks(&mut db, &key, &p2p, &serai, &mut last_substrate_block) match substrate::handle_new_blocks(
.await &mut db,
&key,
&p2p,
&mut processor,
&serai,
&mut last_substrate_block,
)
.await
{ {
Ok(()) => {} Ok(()) => {}
Err(e) => { Err(e) => {
@@ -63,16 +73,21 @@ async fn run<D: Db, P: P2p>(
#[tokio::main] #[tokio::main]
async fn main() { async fn main() {
let db = MemDb::new(); // TODO let db = MemDb::new(); // TODO
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::ZERO); // TODO let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::ZERO); // TODO
let p2p = LocalP2p {}; // TODO let p2p = LocalP2p {}; // TODO
let processor = processor::MemProcessor::new(); // TODO
let serai = || async { let serai = || async {
loop { loop {
let Ok(serai) = Serai::new("ws://127.0.0.1:9944").await else { let Ok(serai) = Serai::new("ws://127.0.0.1:9944").await else {
log::error!("couldn't connect to the Serai node"); log::error!("couldn't connect to the Serai node");
sleep(Duration::from_secs(5)).await;
continue continue
}; };
return serai; return serai;
} }
}; };
run(db, key, p2p, serai().await).await run(db, key, p2p, processor, serai().await).await
} }

View File

@@ -0,0 +1,41 @@
use std::{
sync::{Arc, RwLock},
collections::VecDeque,
};
use processor_messages::{ProcessorMessage, CoordinatorMessage};
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Message {
pub id: u64,
pub msg: ProcessorMessage,
}
#[async_trait::async_trait]
pub trait Processor: 'static + Send + Sync {
async fn send(&mut self, msg: CoordinatorMessage);
async fn recv(&mut self) -> Message;
async fn ack(&mut self, msg: Message);
}
// TODO: Move this to tests
pub struct MemProcessor(Arc<RwLock<VecDeque<Message>>>);
impl MemProcessor {
#[allow(clippy::new_without_default)]
pub fn new() -> MemProcessor {
MemProcessor(Arc::new(RwLock::new(VecDeque::new())))
}
}
#[async_trait::async_trait]
impl Processor for MemProcessor {
async fn send(&mut self, _: CoordinatorMessage) {
todo!()
}
async fn recv(&mut self) -> Message {
todo!()
}
async fn ack(&mut self, _: Message) {
todo!()
}
}

View File

@@ -3,7 +3,6 @@ use std::collections::{HashSet, HashMap};
use zeroize::Zeroizing; use zeroize::Zeroizing;
use transcript::{Transcript, RecommendedTranscript};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::{Participant, ThresholdParams}; use frost::{Participant, ThresholdParams};
@@ -20,9 +19,9 @@ use serai_client::{
use tributary::Tributary; use tributary::Tributary;
use processor_messages::{SubstrateContext, key_gen::KeyGenId}; use processor_messages::{SubstrateContext, key_gen::KeyGenId, CoordinatorMessage};
use crate::{Db, MainDb, TributaryTransaction, P2p}; use crate::{Db, MainDb, P2p, processor::Processor};
async fn get_coin_key(serai: &Serai, set: ValidatorSet) -> Result<Option<Vec<u8>>, SeraiError> { async fn get_coin_key(serai: &Serai, set: ValidatorSet) -> Result<Option<Vec<u8>>, SeraiError> {
Ok(serai.get_keys(set).await?.map(|keys| keys.1.into_inner())) Ok(serai.get_keys(set).await?.map(|keys| keys.1.into_inner()))
@@ -46,10 +45,11 @@ async fn in_set(
)) ))
} }
async fn handle_new_set<D: Db, P: P2p>( async fn handle_new_set<D: Db, Pro: Processor, P: P2p>(
db: &mut MainDb<D>, db: &mut MainDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
p2p: &P, p2p: &P,
processor: &mut Pro,
serai: &Serai, serai: &Serai,
block: &Block, block: &Block,
set: ValidatorSet, set: ValidatorSet,
@@ -69,21 +69,11 @@ async fn handle_new_set<D: Db, P: P2p>(
validators.insert(participant, amount.0 / set_data.bond.0); validators.insert(participant, amount.0 / set_data.bond.0);
} }
// Calculate the genesis for this Tributary
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis");
// This locks it to a specific Serai chain
genesis.append_message(b"serai_block", block.hash());
genesis.append_message(b"session", set.session.0.to_le_bytes());
genesis.append_message(b"network", set.network.0.to_le_bytes());
let genesis = genesis.challenge(b"genesis");
let genesis_ref: &[u8] = genesis.as_ref();
let genesis = genesis_ref[.. 32].try_into().unwrap();
// TODO: Do something with this // TODO: Do something with this
let tributary = Tributary::<_, TributaryTransaction, _>::new( let tributary = Tributary::<_, crate::tributary::Transaction, _>::new(
// TODO2: Use a DB on a dedicated volume // TODO2: Use a DB on a dedicated volume
db.0.clone(), db.0.clone(),
genesis, crate::tributary::genesis(block.hash(), set),
block.time().expect("Serai block didn't have a timestamp set"), block.time().expect("Serai block didn't have a timestamp set"),
key.clone(), key.clone(),
validators, validators,
@@ -93,22 +83,27 @@ async fn handle_new_set<D: Db, P: P2p>(
.unwrap(); .unwrap();
// Trigger a DKG // Trigger a DKG
// TODO: Send this to processor. Check how it handles it being fired multiple times // TODO: Check how the processor handles thi being fired multiple times
// We already have a unique event ID based on block, event index (where event index is // We already have a unique event ID based on block, event index (where event index is
// the one generated in this handle_block function) // the one generated in this handle_block function)
// We could use that on this end and the processor end? // We could use that on this end and the processor end?
let msg = processor_messages::key_gen::CoordinatorMessage::GenerateKey { processor
id: KeyGenId { set, attempt: 0 }, .send(CoordinatorMessage::KeyGen(
params: ThresholdParams::new(t, n, i).unwrap(), processor_messages::key_gen::CoordinatorMessage::GenerateKey {
}; id: KeyGenId { set, attempt: 0 },
params: ThresholdParams::new(t, n, i).unwrap(),
},
))
.await;
} }
Ok(()) Ok(())
} }
async fn handle_key_gen<D: Db>( async fn handle_key_gen<D: Db, Pro: Processor>(
db: &mut MainDb<D>, db: &mut MainDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
processor: &mut Pro,
serai: &Serai, serai: &Serai,
block: &Block, block: &Block,
set: ValidatorSet, set: ValidatorSet,
@@ -119,25 +114,30 @@ async fn handle_key_gen<D: Db>(
.expect("KeyGen occurred for a set which doesn't exist") .expect("KeyGen occurred for a set which doesn't exist")
.is_some() .is_some()
{ {
// TODO: Send this to processor. Check how it handles it being fired multiple times // TODO: Check how the processor handles thi being fired multiple times
let msg = processor_messages::key_gen::CoordinatorMessage::ConfirmKeyPair { processor
context: SubstrateContext { .send(CoordinatorMessage::KeyGen(
coin_latest_finalized_block: serai processor_messages::key_gen::CoordinatorMessage::ConfirmKeyPair {
.get_latest_block_for_network(block.hash(), set.network) context: SubstrateContext {
.await? coin_latest_finalized_block: serai
.unwrap_or(BlockHash([0; 32])), // TODO: Have the processor override this .get_latest_block_for_network(block.hash(), set.network)
}, .await?
// TODO: Check the DB for which attempt used this key pair .unwrap_or(BlockHash([0; 32])), // TODO: Have the processor override this
id: KeyGenId { set, attempt: todo!() }, },
}; // TODO: Check the DB for which attempt used this key pair
id: KeyGenId { set, attempt: todo!() },
},
))
.await;
} }
Ok(()) Ok(())
} }
async fn handle_batch_and_burns<D: Db>( async fn handle_batch_and_burns<D: Db, Pro: Processor>(
db: &mut MainDb<D>, db: &mut MainDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
processor: &mut Pro,
serai: &Serai, serai: &Serai,
block: &Block, block: &Block,
) -> Result<(), SeraiError> { ) -> Result<(), SeraiError> {
@@ -170,17 +170,21 @@ async fn handle_batch_and_burns<D: Db>(
// the last batch will be the latest batch, so its block will be the latest block // the last batch will be the latest batch, so its block will be the latest block
batch_block.insert(network, network_block); batch_block.insert(network, network_block);
// TODO: Send this to processor. Check how it handles it being fired multiple times // TODO: Check how the processor handles thi being fired multiple times
let msg = processor_messages::coordinator::CoordinatorMessage::BatchSigned { processor
key: get_coin_key( .send(CoordinatorMessage::Coordinator(
serai, processor_messages::coordinator::CoordinatorMessage::BatchSigned {
// TODO2 key: get_coin_key(
ValidatorSet { network, session: Session(0) }, serai,
) // TODO2
.await? ValidatorSet { network, session: Session(0) },
.expect("ValidatorSet without keys signed a batch"), )
block: network_block, .await?
}; .expect("ValidatorSet without keys signed a batch"),
block: network_block,
},
))
.await;
} else { } else {
panic!("Batch event wasn't Batch: {batch:?}"); panic!("Batch event wasn't Batch: {batch:?}");
} }
@@ -224,18 +228,22 @@ async fn handle_batch_and_burns<D: Db>(
.expect("network had a batch/burn yet never set a latest block") .expect("network had a batch/burn yet never set a latest block")
}; };
// TODO: Send this to processor. Check how it handles it being fired multiple times // TODO: Check how the processor handles thi being fired multiple times
let msg = processor_messages::substrate::CoordinatorMessage::SubstrateBlock { processor
context: SubstrateContext { coin_latest_finalized_block }, .send(CoordinatorMessage::Substrate(
key: get_coin_key( processor_messages::substrate::CoordinatorMessage::SubstrateBlock {
serai, context: SubstrateContext { coin_latest_finalized_block },
// TODO2 key: get_coin_key(
ValidatorSet { network, session: Session(0) }, serai,
) // TODO2
.await? ValidatorSet { network, session: Session(0) },
.expect("batch/burn for network which never set keys"), )
burns: burns.remove(&network).unwrap(), .await?
}; .expect("batch/burn for network which never set keys"),
burns: burns.remove(&network).unwrap(),
},
))
.await;
} }
Ok(()) Ok(())
@@ -243,10 +251,11 @@ async fn handle_batch_and_burns<D: Db>(
// Handle a specific Substrate block, returning an error when it fails to get data // Handle a specific Substrate block, returning an error when it fails to get data
// (not blocking / holding) // (not blocking / holding)
async fn handle_block<D: Db, P: P2p>( async fn handle_block<D: Db, Pro: Processor, P: P2p>(
db: &mut MainDb<D>, db: &mut MainDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
p2p: &P, p2p: &P,
processor: &mut Pro,
serai: &Serai, serai: &Serai,
block: Block, block: Block,
) -> Result<(), SeraiError> { ) -> Result<(), SeraiError> {
@@ -263,7 +272,7 @@ async fn handle_block<D: Db, P: P2p>(
// stable) // stable)
if !db.handled_event(hash, event_id) { if !db.handled_event(hash, event_id) {
if let ValidatorSetsEvent::NewSet { set } = new_set { if let ValidatorSetsEvent::NewSet { set } = new_set {
handle_new_set(db, key, p2p, serai, &block, set).await?; handle_new_set(db, key, p2p, processor, serai, &block, set).await?;
} else { } else {
panic!("NewSet event wasn't NewSet: {new_set:?}"); panic!("NewSet event wasn't NewSet: {new_set:?}");
} }
@@ -276,7 +285,7 @@ async fn handle_block<D: Db, P: P2p>(
for key_gen in serai.get_key_gen_events(hash).await? { for key_gen in serai.get_key_gen_events(hash).await? {
if !db.handled_event(hash, event_id) { if !db.handled_event(hash, event_id) {
if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen { if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen {
handle_key_gen(db, key, serai, &block, set, key_pair).await?; handle_key_gen(db, key, processor, serai, &block, set, key_pair).await?;
} else { } else {
panic!("KeyGen event wasn't KeyGen: {key_gen:?}"); panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
} }
@@ -291,17 +300,18 @@ async fn handle_block<D: Db, P: P2p>(
// This does break the uniqueness of (hash, event_id) -> one event, yet // This does break the uniqueness of (hash, event_id) -> one event, yet
// (network, (hash, event_id)) remains valid as a unique ID for an event // (network, (hash, event_id)) remains valid as a unique ID for an event
if !db.handled_event(hash, event_id) { if !db.handled_event(hash, event_id) {
handle_batch_and_burns(db, key, serai, &block).await?; handle_batch_and_burns(db, key, processor, serai, &block).await?;
} }
db.handle_event(hash, event_id); db.handle_event(hash, event_id);
Ok(()) Ok(())
} }
pub async fn handle_new_blocks<D: Db, P: P2p>( pub async fn handle_new_blocks<D: Db, Pro: Processor, P: P2p>(
db: &mut MainDb<D>, db: &mut MainDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
p2p: &P, p2p: &P,
processor: &mut Pro,
serai: &Serai, serai: &Serai,
last_substrate_block: &mut u64, last_substrate_block: &mut u64,
) -> Result<(), SeraiError> { ) -> Result<(), SeraiError> {
@@ -318,11 +328,15 @@ pub async fn handle_new_blocks<D: Db, P: P2p>(
db, db,
key, key,
p2p, p2p,
processor,
serai, serai,
if b == latest_number { if b == latest_number {
latest.take().unwrap() latest.take().unwrap()
} else { } else {
serai.get_block_by_number(b).await?.unwrap() serai
.get_block_by_number(b)
.await?
.expect("couldn't get block before the latest finalized block")
}, },
) )
.await?; .await?;