2023-04-23 03:48:50 -04:00
|
|
|
use core::{ops::Deref, future::Future};
|
2023-04-17 00:50:56 -04:00
|
|
|
use std::collections::{HashSet, HashMap};
|
2023-04-15 17:38:47 -04:00
|
|
|
|
2023-04-16 00:51:56 -04:00
|
|
|
use zeroize::Zeroizing;
|
|
|
|
|
|
2023-04-16 03:16:53 -04:00
|
|
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
2023-04-20 05:04:08 -04:00
|
|
|
use frost::ThresholdParams;
|
2023-04-16 00:51:56 -04:00
|
|
|
|
2023-04-16 03:16:53 -04:00
|
|
|
use serai_client::{
|
|
|
|
|
SeraiError, Block, Serai,
|
|
|
|
|
primitives::BlockHash,
|
|
|
|
|
validator_sets::{
|
2023-04-17 00:50:56 -04:00
|
|
|
primitives::{Session, ValidatorSet, KeyPair},
|
2023-04-16 03:16:53 -04:00
|
|
|
ValidatorSetsEvent,
|
|
|
|
|
},
|
|
|
|
|
in_instructions::InInstructionsEvent,
|
|
|
|
|
tokens::{primitives::OutInstructionWithBalance, TokensEvent},
|
|
|
|
|
};
|
2023-04-16 00:51:56 -04:00
|
|
|
|
2023-04-20 05:04:08 -04:00
|
|
|
use serai_db::DbTxn;
|
|
|
|
|
|
2023-04-17 02:10:33 -04:00
|
|
|
use processor_messages::{SubstrateContext, key_gen::KeyGenId, CoordinatorMessage};
|
2023-04-16 03:16:53 -04:00
|
|
|
|
2023-04-23 16:56:23 -04:00
|
|
|
use crate::{Db, processor::Processor, tributary::TributarySpec};
|
2023-04-16 00:51:56 -04:00
|
|
|
|
2023-04-20 05:04:08 -04:00
|
|
|
mod db;
|
|
|
|
|
pub use db::*;
|
2023-04-16 03:16:53 -04:00
|
|
|
|
2023-04-17 00:50:56 -04:00
|
|
|
async fn in_set(
|
|
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
|
serai: &Serai,
|
|
|
|
|
set: ValidatorSet,
|
2023-04-20 05:04:08 -04:00
|
|
|
) -> Result<Option<bool>, SeraiError> {
|
2023-04-17 00:50:56 -04:00
|
|
|
let Some(data) = serai.get_validator_set(set).await? else {
|
|
|
|
|
return Ok(None);
|
|
|
|
|
};
|
|
|
|
|
let key = (Ristretto::generator() * key.deref()).to_bytes();
|
2023-04-20 05:04:08 -04:00
|
|
|
Ok(Some(data.participants.iter().any(|(participant, _)| participant.0 == key)))
|
2023-04-17 00:50:56 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-23 03:48:50 -04:00
|
|
|
async fn handle_new_set<
|
|
|
|
|
D: Db,
|
|
|
|
|
Fut: Future<Output = ()>,
|
2023-04-26 00:10:06 -04:00
|
|
|
CNT: Clone + Fn(&mut D, TributarySpec) -> Fut,
|
2023-04-23 03:48:50 -04:00
|
|
|
Pro: Processor,
|
|
|
|
|
>(
|
2023-04-26 00:10:06 -04:00
|
|
|
db: &mut D,
|
2023-04-17 00:50:56 -04:00
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
2023-04-26 00:10:06 -04:00
|
|
|
create_new_tributary: CNT,
|
2023-04-25 03:14:42 -04:00
|
|
|
processor: &Pro,
|
2023-04-17 00:50:56 -04:00
|
|
|
serai: &Serai,
|
|
|
|
|
block: &Block,
|
|
|
|
|
set: ValidatorSet,
|
|
|
|
|
) -> Result<(), SeraiError> {
|
2023-04-20 05:04:08 -04:00
|
|
|
if in_set(key, serai, set).await?.expect("NewSet for set which doesn't exist") {
|
2023-04-17 00:50:56 -04:00
|
|
|
let set_data = serai.get_validator_set(set).await?.expect("NewSet for set which doesn't exist");
|
|
|
|
|
|
2023-04-20 05:04:08 -04:00
|
|
|
let spec = TributarySpec::new(block.hash(), block.time().unwrap(), set, set_data);
|
2023-04-26 00:10:06 -04:00
|
|
|
create_new_tributary(db, spec.clone());
|
2023-04-17 00:50:56 -04:00
|
|
|
|
|
|
|
|
// Trigger a DKG
|
2023-04-17 20:16:58 -04:00
|
|
|
// TODO: Check how the processor handles this being fired multiple times
|
2023-04-17 00:50:56 -04:00
|
|
|
// We already have a unique event ID based on block, event index (where event index is
|
|
|
|
|
// the one generated in this handle_block function)
|
|
|
|
|
// We could use that on this end and the processor end?
|
2023-04-17 02:10:33 -04:00
|
|
|
processor
|
|
|
|
|
.send(CoordinatorMessage::KeyGen(
|
|
|
|
|
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
|
|
|
|
|
id: KeyGenId { set, attempt: 0 },
|
2023-04-20 05:04:08 -04:00
|
|
|
params: ThresholdParams::new(
|
|
|
|
|
spec.t(),
|
|
|
|
|
spec.n(),
|
|
|
|
|
spec
|
|
|
|
|
.i(Ristretto::generator() * key.deref())
|
|
|
|
|
.expect("In set for a set we aren't in set for"),
|
|
|
|
|
)
|
|
|
|
|
.unwrap(),
|
2023-04-17 02:10:33 -04:00
|
|
|
},
|
|
|
|
|
))
|
|
|
|
|
.await;
|
2023-04-16 03:16:53 -04:00
|
|
|
}
|
2023-04-17 00:50:56 -04:00
|
|
|
|
|
|
|
|
Ok(())
|
2023-04-16 03:16:53 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-20 05:04:08 -04:00
|
|
|
async fn handle_key_gen<Pro: Processor>(
|
2023-04-16 03:16:53 -04:00
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
2023-04-25 03:14:42 -04:00
|
|
|
processor: &Pro,
|
2023-04-16 03:16:53 -04:00
|
|
|
serai: &Serai,
|
2023-04-17 00:50:56 -04:00
|
|
|
block: &Block,
|
2023-04-16 03:16:53 -04:00
|
|
|
set: ValidatorSet,
|
2023-04-17 00:50:56 -04:00
|
|
|
key_pair: KeyPair,
|
|
|
|
|
) -> Result<(), SeraiError> {
|
2023-04-20 05:04:08 -04:00
|
|
|
if in_set(key, serai, set).await?.expect("KeyGen occurred for a set which doesn't exist") {
|
2023-04-17 20:16:58 -04:00
|
|
|
// TODO: Check how the processor handles this being fired multiple times
|
2023-04-17 02:10:33 -04:00
|
|
|
processor
|
2023-04-17 20:16:58 -04:00
|
|
|
.send(CoordinatorMessage::Substrate(
|
2023-04-17 19:39:36 -04:00
|
|
|
processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair {
|
2023-04-17 02:10:33 -04:00
|
|
|
context: SubstrateContext {
|
2023-04-18 03:04:52 -04:00
|
|
|
serai_time: block.time().unwrap(),
|
2023-04-17 02:10:33 -04:00
|
|
|
coin_latest_finalized_block: serai
|
|
|
|
|
.get_latest_block_for_network(block.hash(), set.network)
|
|
|
|
|
.await?
|
2023-04-18 03:04:52 -04:00
|
|
|
// The processor treats this as a magic value which will cause it to find a network
|
|
|
|
|
// block which has a time greater than or equal to the Serai time
|
|
|
|
|
.unwrap_or(BlockHash([0; 32])),
|
2023-04-17 02:10:33 -04:00
|
|
|
},
|
2023-04-18 01:09:22 -04:00
|
|
|
set,
|
|
|
|
|
key_pair,
|
2023-04-17 02:10:33 -04:00
|
|
|
},
|
|
|
|
|
))
|
|
|
|
|
.await;
|
2023-04-17 00:50:56 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
2023-04-16 03:16:53 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-20 05:04:08 -04:00
|
|
|
async fn handle_batch_and_burns<Pro: Processor>(
|
2023-04-25 03:14:42 -04:00
|
|
|
processor: &Pro,
|
2023-04-17 00:50:56 -04:00
|
|
|
serai: &Serai,
|
|
|
|
|
block: &Block,
|
|
|
|
|
) -> Result<(), SeraiError> {
|
|
|
|
|
let hash = block.hash();
|
|
|
|
|
|
|
|
|
|
// Track which networks had events with a Vec in ordr to preserve the insertion order
|
|
|
|
|
// While that shouldn't be needed, ensuring order never hurts, and may enable design choices
|
|
|
|
|
// with regards to Processor <-> Coordinator message passing
|
|
|
|
|
let mut networks_with_event = vec![];
|
|
|
|
|
let mut network_had_event = |burns: &mut HashMap<_, _>, network| {
|
|
|
|
|
// Don't insert this network multiple times
|
|
|
|
|
// A Vec is still used in order to maintain the insertion order
|
|
|
|
|
if !networks_with_event.contains(&network) {
|
|
|
|
|
networks_with_event.push(network);
|
|
|
|
|
burns.insert(network, vec![]);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let mut batch_block = HashMap::new();
|
|
|
|
|
let mut burns = HashMap::new();
|
|
|
|
|
|
|
|
|
|
for batch in serai.get_batch_events(hash).await? {
|
|
|
|
|
if let InInstructionsEvent::Batch { network, id: _, block: network_block } = batch {
|
|
|
|
|
network_had_event(&mut burns, network);
|
|
|
|
|
|
|
|
|
|
// Track what Serai acknowledges as the latest block for this network
|
|
|
|
|
// If this Substrate block has multiple batches, the last batch's block will overwrite the
|
|
|
|
|
// prior batches
|
|
|
|
|
// Since batches within a block are guaranteed to be ordered, thanks to their incremental ID,
|
|
|
|
|
// the last batch will be the latest batch, so its block will be the latest block
|
2023-04-17 20:16:58 -04:00
|
|
|
// This is just a mild optimization to prevent needing an additional RPC call to grab this
|
2023-04-17 00:50:56 -04:00
|
|
|
batch_block.insert(network, network_block);
|
|
|
|
|
} else {
|
|
|
|
|
panic!("Batch event wasn't Batch: {batch:?}");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for burn in serai.get_burn_events(hash).await? {
|
|
|
|
|
if let TokensEvent::Burn { address: _, balance, instruction } = burn {
|
2023-04-18 02:01:53 -04:00
|
|
|
let network = balance.coin.network();
|
2023-04-17 00:50:56 -04:00
|
|
|
network_had_event(&mut burns, network);
|
|
|
|
|
|
|
|
|
|
// network_had_event should register an entry in burns
|
|
|
|
|
let mut burns_so_far = burns.remove(&network).unwrap();
|
|
|
|
|
burns_so_far.push(OutInstructionWithBalance { balance, instruction });
|
|
|
|
|
burns.insert(network, burns_so_far);
|
|
|
|
|
} else {
|
|
|
|
|
panic!("Burn event wasn't Burn: {burn:?}");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert_eq!(HashSet::<&_>::from_iter(networks_with_event.iter()).len(), networks_with_event.len());
|
|
|
|
|
|
|
|
|
|
for network in networks_with_event {
|
|
|
|
|
let coin_latest_finalized_block = if let Some(block) = batch_block.remove(&network) {
|
|
|
|
|
block
|
|
|
|
|
} else {
|
|
|
|
|
// If it's had a batch or a burn, it must have had a block acknowledged
|
|
|
|
|
serai
|
|
|
|
|
.get_latest_block_for_network(hash, network)
|
|
|
|
|
.await?
|
|
|
|
|
.expect("network had a batch/burn yet never set a latest block")
|
|
|
|
|
};
|
|
|
|
|
|
2023-04-17 20:16:58 -04:00
|
|
|
// TODO: Check how the processor handles this being fired multiple times
|
2023-04-17 02:10:33 -04:00
|
|
|
processor
|
|
|
|
|
.send(CoordinatorMessage::Substrate(
|
|
|
|
|
processor_messages::substrate::CoordinatorMessage::SubstrateBlock {
|
2023-04-18 03:04:52 -04:00
|
|
|
context: SubstrateContext {
|
|
|
|
|
serai_time: block.time().unwrap(),
|
|
|
|
|
coin_latest_finalized_block,
|
|
|
|
|
},
|
2023-04-25 03:14:42 -04:00
|
|
|
network,
|
2023-04-20 15:37:22 -04:00
|
|
|
block: block.number(),
|
2023-04-20 05:04:08 -04:00
|
|
|
key: serai
|
|
|
|
|
.get_keys(ValidatorSet { network, session: Session(0) }) // TODO2
|
|
|
|
|
.await?
|
|
|
|
|
.map(|keys| keys.1.into_inner())
|
|
|
|
|
.expect("batch/burn for network which never set keys"),
|
2023-04-17 02:10:33 -04:00
|
|
|
burns: burns.remove(&network).unwrap(),
|
|
|
|
|
},
|
|
|
|
|
))
|
|
|
|
|
.await;
|
2023-04-17 00:50:56 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Handle a specific Substrate block, returning an error when it fails to get data
|
|
|
|
|
// (not blocking / holding)
|
2023-04-23 03:48:50 -04:00
|
|
|
async fn handle_block<
|
|
|
|
|
D: Db,
|
|
|
|
|
Fut: Future<Output = ()>,
|
2023-04-26 00:10:06 -04:00
|
|
|
CNT: Clone + Fn(&mut D, TributarySpec) -> Fut,
|
2023-04-23 03:48:50 -04:00
|
|
|
Pro: Processor,
|
|
|
|
|
>(
|
2023-04-20 05:04:08 -04:00
|
|
|
db: &mut SubstrateDb<D>,
|
2023-04-16 03:16:53 -04:00
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
2023-04-26 00:10:06 -04:00
|
|
|
create_new_tributary: CNT,
|
2023-04-25 03:14:42 -04:00
|
|
|
processor: &Pro,
|
2023-04-16 00:51:56 -04:00
|
|
|
serai: &Serai,
|
|
|
|
|
block: Block,
|
|
|
|
|
) -> Result<(), SeraiError> {
|
2023-04-15 17:38:47 -04:00
|
|
|
let hash = block.hash();
|
2023-04-16 00:51:56 -04:00
|
|
|
|
2023-04-17 00:50:56 -04:00
|
|
|
// Define an indexed event ID.
|
2023-04-16 00:51:56 -04:00
|
|
|
let mut event_id = 0;
|
2023-04-15 17:38:47 -04:00
|
|
|
|
|
|
|
|
// If a new validator set was activated, create tributary/inform processor to do a DKG
|
|
|
|
|
for new_set in serai.get_new_set_events(hash).await? {
|
2023-04-17 00:50:56 -04:00
|
|
|
// Individually mark each event as handled so on reboot, we minimize duplicates
|
|
|
|
|
// Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000
|
|
|
|
|
// events will successfully be incrementally handled (though the Serai connection should be
|
|
|
|
|
// stable)
|
2023-04-20 05:04:08 -04:00
|
|
|
if !SubstrateDb::<D>::handled_event(&db.0, hash, event_id) {
|
2023-04-16 00:51:56 -04:00
|
|
|
if let ValidatorSetsEvent::NewSet { set } = new_set {
|
2023-04-26 00:10:06 -04:00
|
|
|
handle_new_set(&mut db.0, key, create_new_tributary.clone(), processor, serai, &block, set)
|
2023-04-23 03:48:50 -04:00
|
|
|
.await?;
|
2023-04-16 00:51:56 -04:00
|
|
|
} else {
|
|
|
|
|
panic!("NewSet event wasn't NewSet: {new_set:?}");
|
|
|
|
|
}
|
2023-04-20 05:04:08 -04:00
|
|
|
let mut txn = db.0.txn();
|
|
|
|
|
SubstrateDb::<D>::handle_event(&mut txn, hash, event_id);
|
|
|
|
|
txn.commit();
|
2023-04-16 00:51:56 -04:00
|
|
|
}
|
|
|
|
|
event_id += 1;
|
2023-04-15 17:38:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If a key pair was confirmed, inform the processor
|
|
|
|
|
for key_gen in serai.get_key_gen_events(hash).await? {
|
2023-04-20 05:04:08 -04:00
|
|
|
if !SubstrateDb::<D>::handled_event(&db.0, hash, event_id) {
|
2023-04-16 03:16:53 -04:00
|
|
|
if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen {
|
2023-04-20 05:04:08 -04:00
|
|
|
handle_key_gen(key, processor, serai, &block, set, key_pair).await?;
|
2023-04-16 03:16:53 -04:00
|
|
|
} else {
|
|
|
|
|
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
|
|
|
|
|
}
|
2023-04-20 05:04:08 -04:00
|
|
|
let mut txn = db.0.txn();
|
|
|
|
|
SubstrateDb::<D>::handle_event(&mut txn, hash, event_id);
|
|
|
|
|
txn.commit();
|
2023-04-16 00:51:56 -04:00
|
|
|
}
|
|
|
|
|
event_id += 1;
|
2023-04-15 17:38:47 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-17 00:50:56 -04:00
|
|
|
// Finally, tell the processor of acknowledged blocks/burns
|
|
|
|
|
// This uses a single event as. unlike prior events which individually executed code, all
|
|
|
|
|
// following events share data collection
|
|
|
|
|
// This does break the uniqueness of (hash, event_id) -> one event, yet
|
|
|
|
|
// (network, (hash, event_id)) remains valid as a unique ID for an event
|
2023-04-20 05:04:08 -04:00
|
|
|
if !SubstrateDb::<D>::handled_event(&db.0, hash, event_id) {
|
|
|
|
|
handle_batch_and_burns(processor, serai, &block).await?;
|
2023-04-15 17:38:47 -04:00
|
|
|
}
|
2023-04-20 05:04:08 -04:00
|
|
|
let mut txn = db.0.txn();
|
|
|
|
|
SubstrateDb::<D>::handle_event(&mut txn, hash, event_id);
|
|
|
|
|
txn.commit();
|
2023-04-15 17:38:47 -04:00
|
|
|
|
2023-04-16 00:51:56 -04:00
|
|
|
Ok(())
|
2023-04-15 17:38:47 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-23 03:48:50 -04:00
|
|
|
pub async fn handle_new_blocks<
|
|
|
|
|
D: Db,
|
|
|
|
|
Fut: Future<Output = ()>,
|
2023-04-26 00:10:06 -04:00
|
|
|
CNT: Clone + Fn(&mut D, TributarySpec) -> Fut,
|
2023-04-23 03:48:50 -04:00
|
|
|
Pro: Processor,
|
|
|
|
|
>(
|
2023-04-20 05:04:08 -04:00
|
|
|
db: &mut SubstrateDb<D>,
|
2023-04-16 03:16:53 -04:00
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
2023-04-26 00:10:06 -04:00
|
|
|
create_new_tributary: CNT,
|
2023-04-25 03:14:42 -04:00
|
|
|
processor: &Pro,
|
2023-04-15 17:38:47 -04:00
|
|
|
serai: &Serai,
|
2023-04-20 05:04:08 -04:00
|
|
|
last_block: &mut u64,
|
2023-04-15 17:38:47 -04:00
|
|
|
) -> Result<(), SeraiError> {
|
|
|
|
|
// Check if there's been a new Substrate block
|
|
|
|
|
let latest = serai.get_latest_block().await?;
|
|
|
|
|
let latest_number = latest.number();
|
2023-04-20 05:04:08 -04:00
|
|
|
if latest_number == *last_block {
|
2023-04-15 17:38:47 -04:00
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
let mut latest = Some(latest);
|
|
|
|
|
|
2023-04-20 05:04:08 -04:00
|
|
|
for b in (*last_block + 1) ..= latest_number {
|
2023-04-16 00:51:56 -04:00
|
|
|
handle_block(
|
|
|
|
|
db,
|
2023-04-16 03:16:53 -04:00
|
|
|
key,
|
2023-04-26 00:10:06 -04:00
|
|
|
create_new_tributary.clone(),
|
2023-04-17 02:10:33 -04:00
|
|
|
processor,
|
2023-04-15 17:38:47 -04:00
|
|
|
serai,
|
|
|
|
|
if b == latest_number {
|
|
|
|
|
latest.take().unwrap()
|
|
|
|
|
} else {
|
2023-04-17 02:10:33 -04:00
|
|
|
serai
|
|
|
|
|
.get_block_by_number(b)
|
|
|
|
|
.await?
|
|
|
|
|
.expect("couldn't get block before the latest finalized block")
|
2023-04-15 17:38:47 -04:00
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
.await?;
|
2023-04-20 05:04:08 -04:00
|
|
|
*last_block += 1;
|
|
|
|
|
db.set_last_block(*last_block);
|
2023-04-15 17:38:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|