2025-01-10 02:24:24 -05:00
|
|
|
use core::future::Future;
|
|
|
|
|
use std::sync::Arc;
|
2024-12-31 10:37:19 -05:00
|
|
|
|
|
|
|
|
use futures::stream::{StreamExt, FuturesOrdered};
|
|
|
|
|
|
2025-11-16 11:50:24 -05:00
|
|
|
use serai_client_serai::{
|
|
|
|
|
abi::primitives::{
|
|
|
|
|
BlockHash,
|
2025-11-18 20:50:32 -05:00
|
|
|
crypto::EmbeddedEllipticCurveKeys as EmbeddedEllipticCurveKeysStruct,
|
2025-11-16 11:50:24 -05:00
|
|
|
network_id::ExternalNetworkId,
|
|
|
|
|
validator_sets::{KeyShares, ExternalValidatorSet},
|
|
|
|
|
address::SeraiAddress,
|
|
|
|
|
},
|
2024-12-31 10:37:19 -05:00
|
|
|
Serai,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
use serai_db::*;
|
|
|
|
|
use serai_task::ContinuallyRan;
|
|
|
|
|
|
|
|
|
|
use serai_cosign::Cosigning;
|
|
|
|
|
|
|
|
|
|
use crate::NewSetInformation;
|
|
|
|
|
|
|
|
|
|
create_db!(
|
|
|
|
|
CoordinatorSubstrateEphemeral {
|
|
|
|
|
NextBlock: () -> u64,
|
2025-11-18 20:50:32 -05:00
|
|
|
EmbeddedEllipticCurveKeys: (
|
|
|
|
|
network: ExternalNetworkId,
|
|
|
|
|
validator: SeraiAddress
|
|
|
|
|
) -> EmbeddedEllipticCurveKeysStruct,
|
2024-12-31 10:37:19 -05:00
|
|
|
}
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
/// The event stream for ephemeral events.
|
|
|
|
|
pub struct EphemeralEventStream<D: Db> {
|
|
|
|
|
db: D,
|
2025-01-10 02:24:24 -05:00
|
|
|
serai: Arc<Serai>,
|
2025-01-15 12:51:35 -05:00
|
|
|
validator: SeraiAddress,
|
2024-12-31 10:37:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<D: Db> EphemeralEventStream<D> {
|
|
|
|
|
/// Create a new ephemeral event stream.
|
|
|
|
|
///
|
|
|
|
|
/// Only one of these may exist over the provided database.
|
2025-01-15 12:51:35 -05:00
|
|
|
pub fn new(db: D, serai: Arc<Serai>, validator: SeraiAddress) -> Self {
|
2024-12-31 10:37:19 -05:00
|
|
|
Self { db, serai, validator }
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
2025-01-12 18:29:08 -05:00
|
|
|
type Error = String;
|
|
|
|
|
|
|
|
|
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
2024-12-31 10:37:19 -05:00
|
|
|
async move {
|
|
|
|
|
let next_block = NextBlock::get(&self.db).unwrap_or(0);
|
|
|
|
|
let latest_finalized_block =
|
|
|
|
|
Cosigning::<D>::latest_cosigned_block_number(&self.db).map_err(|e| format!("{e:?}"))?;
|
|
|
|
|
|
|
|
|
|
// These are all the events which generate canonical messages
|
|
|
|
|
struct EphemeralEvents {
|
2025-11-16 11:50:24 -05:00
|
|
|
block_hash: BlockHash,
|
2024-12-31 10:37:19 -05:00
|
|
|
time: u64,
|
2025-11-18 20:50:32 -05:00
|
|
|
embedded_elliptic_curve_keys_events: Vec<serai_client_serai::abi::validator_sets::Event>,
|
2025-11-16 11:50:24 -05:00
|
|
|
set_decided_events: Vec<serai_client_serai::abi::validator_sets::Event>,
|
|
|
|
|
accepted_handover_events: Vec<serai_client_serai::abi::validator_sets::Event>,
|
2024-12-31 10:37:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// For a cosigned block, fetch all relevant events
|
|
|
|
|
let scan = {
|
|
|
|
|
let db = self.db.clone();
|
|
|
|
|
let serai = &self.serai;
|
|
|
|
|
move |block_number| {
|
|
|
|
|
let block_hash = Cosigning::<D>::cosigned_block(&db, block_number);
|
|
|
|
|
|
|
|
|
|
async move {
|
|
|
|
|
let block_hash = match block_hash {
|
|
|
|
|
Ok(Some(block_hash)) => block_hash,
|
|
|
|
|
Ok(None) => {
|
|
|
|
|
panic!("iterating to latest cosigned block but couldn't get cosigned block")
|
|
|
|
|
}
|
|
|
|
|
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
|
|
|
|
|
};
|
|
|
|
|
|
2025-11-18 20:50:32 -05:00
|
|
|
let events = serai.events(block_hash).await.map_err(|e| format!("{e}"))?;
|
|
|
|
|
let embedded_elliptic_curve_keys_events = events
|
|
|
|
|
.validator_sets()
|
|
|
|
|
.set_embedded_elliptic_curve_keys_events()
|
|
|
|
|
.cloned()
|
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
let set_decided_events =
|
|
|
|
|
events.validator_sets().set_decided_events().cloned().collect::<Vec<_>>();
|
|
|
|
|
let accepted_handover_events =
|
|
|
|
|
events.validator_sets().accepted_handover_events().cloned().collect::<Vec<_>>();
|
|
|
|
|
let Some(block) = serai.block(block_hash).await.map_err(|e| format!("{e:?}"))? else {
|
2024-12-31 10:37:19 -05:00
|
|
|
Err(format!("Serai node didn't have cosigned block #{block_number}"))?
|
|
|
|
|
};
|
|
|
|
|
|
2025-11-16 11:50:24 -05:00
|
|
|
// We use time in seconds, not milliseconds, here
|
|
|
|
|
let time = block.header.unix_time_in_millis() / 1000;
|
2024-12-31 10:37:19 -05:00
|
|
|
Ok((
|
|
|
|
|
block_number,
|
2025-11-18 20:50:32 -05:00
|
|
|
EphemeralEvents {
|
|
|
|
|
block_hash,
|
|
|
|
|
time,
|
|
|
|
|
embedded_elliptic_curve_keys_events,
|
|
|
|
|
set_decided_events,
|
|
|
|
|
accepted_handover_events,
|
|
|
|
|
},
|
2024-12-31 10:37:19 -05:00
|
|
|
))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Sync the next set of upcoming blocks all at once to minimize latency
|
|
|
|
|
const BLOCKS_TO_SYNC_AT_ONCE: u64 = 50;
|
2025-01-04 23:28:54 -05:00
|
|
|
// FuturesOrdered can be bad practice due to potentially causing tiemouts if it isn't
|
|
|
|
|
// sufficiently polled. Our processing loop isn't minimal, itself making multiple requests,
|
|
|
|
|
// but the loop body should only be executed a few times a week. It's better to get through
|
|
|
|
|
// most blocks with this optimization, and have timeouts a few times a week, than not have
|
|
|
|
|
// this at all.
|
2024-12-31 10:37:19 -05:00
|
|
|
let mut set = FuturesOrdered::new();
|
|
|
|
|
for block_number in
|
|
|
|
|
next_block ..= latest_finalized_block.min(next_block + BLOCKS_TO_SYNC_AT_ONCE)
|
|
|
|
|
{
|
|
|
|
|
set.push_back(scan(block_number));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for block_number in next_block ..= latest_finalized_block {
|
|
|
|
|
// Get the next block in our queue
|
|
|
|
|
let (popped_block_number, block) = set.next().await.unwrap()?;
|
|
|
|
|
assert_eq!(block_number, popped_block_number);
|
|
|
|
|
// Re-populate the queue
|
|
|
|
|
if (block_number + BLOCKS_TO_SYNC_AT_ONCE) <= latest_finalized_block {
|
|
|
|
|
set.push_back(scan(block_number + BLOCKS_TO_SYNC_AT_ONCE));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut txn = self.db.txn();
|
|
|
|
|
|
2025-11-18 20:50:32 -05:00
|
|
|
for event in block.embedded_elliptic_curve_keys_events {
|
|
|
|
|
let serai_client_serai::abi::validator_sets::Event::SetEmbeddedEllipticCurveKeys {
|
|
|
|
|
validator,
|
|
|
|
|
keys,
|
|
|
|
|
} = &event
|
|
|
|
|
else {
|
|
|
|
|
panic!(
|
|
|
|
|
"{}: {event:?}",
|
|
|
|
|
"`SetEmbeddedEllipticCurveKeys` event wasn't a `SetEmbeddedEllipticCurveKeys` event"
|
|
|
|
|
);
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
EmbeddedEllipticCurveKeys::set(&mut txn, keys.network(), *validator, keys);
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-16 11:50:24 -05:00
|
|
|
for set_decided in block.set_decided_events {
|
|
|
|
|
let serai_client_serai::abi::validator_sets::Event::SetDecided { set, validators } =
|
|
|
|
|
&set_decided
|
|
|
|
|
else {
|
|
|
|
|
panic!("`SetDecided` event wasn't a `SetDecided` event: {set_decided:?}");
|
2024-12-31 10:37:19 -05:00
|
|
|
};
|
2025-11-18 20:50:32 -05:00
|
|
|
|
2024-12-31 10:37:19 -05:00
|
|
|
// We only coordinate over external networks
|
2025-01-30 03:14:24 -05:00
|
|
|
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
2025-11-16 11:50:24 -05:00
|
|
|
let validators =
|
2025-11-18 20:50:32 -05:00
|
|
|
validators.iter().map(|(validator, weight)| (*validator, weight.0)).collect::<Vec<_>>();
|
|
|
|
|
|
2024-12-31 10:37:19 -05:00
|
|
|
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
|
|
|
|
|
if in_set {
|
|
|
|
|
if u16::try_from(validators.len()).is_err() {
|
|
|
|
|
Err("more than u16::MAX validators sent")?;
|
|
|
|
|
}
|
|
|
|
|
|
2025-01-14 07:51:39 -05:00
|
|
|
// Do the summation in u32 so we don't risk a u16 overflow
|
2024-12-31 10:37:19 -05:00
|
|
|
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
|
2025-11-16 11:50:24 -05:00
|
|
|
if total_weight > u32::from(KeyShares::MAX_PER_SET) {
|
2024-12-31 10:37:19 -05:00
|
|
|
Err(format!(
|
2025-11-16 11:50:24 -05:00
|
|
|
"{set:?} has {total_weight} key shares when the max is {}",
|
|
|
|
|
KeyShares::MAX_PER_SET
|
2024-12-31 10:37:19 -05:00
|
|
|
))?;
|
|
|
|
|
}
|
2025-11-16 11:50:24 -05:00
|
|
|
let total_weight = u16::try_from(total_weight)
|
|
|
|
|
.expect("value smaller than `u16` constant but doesn't fit in `u16`");
|
2024-12-31 10:37:19 -05:00
|
|
|
|
|
|
|
|
// Fetch all of the validators' embedded elliptic curve keys
|
|
|
|
|
let mut evrf_public_keys = Vec::with_capacity(usize::from(total_weight));
|
|
|
|
|
for (validator, weight) in &validators {
|
2025-11-18 20:50:32 -05:00
|
|
|
let keys = match EmbeddedEllipticCurveKeys::get(&txn, set.network, *validator)
|
|
|
|
|
.expect("selected validator lacked embedded elliptic curve keys")
|
|
|
|
|
{
|
|
|
|
|
EmbeddedEllipticCurveKeysStruct::Bitcoin(substrate, external) => {
|
|
|
|
|
assert_eq!(set.network, ExternalNetworkId::Bitcoin);
|
|
|
|
|
(substrate, external.to_vec())
|
|
|
|
|
}
|
|
|
|
|
EmbeddedEllipticCurveKeysStruct::Ethereum(substrate, external) => {
|
|
|
|
|
assert_eq!(set.network, ExternalNetworkId::Ethereum);
|
|
|
|
|
(substrate, external.to_vec())
|
|
|
|
|
}
|
|
|
|
|
EmbeddedEllipticCurveKeysStruct::Monero(substrate) => {
|
|
|
|
|
assert_eq!(set.network, ExternalNetworkId::Monero);
|
|
|
|
|
(substrate, substrate.to_vec())
|
|
|
|
|
}
|
2025-11-16 11:50:24 -05:00
|
|
|
};
|
|
|
|
|
for _ in 0 .. *weight {
|
2025-11-18 20:50:32 -05:00
|
|
|
evrf_public_keys.push(keys.clone());
|
2024-12-31 10:37:19 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-01-15 12:51:35 -05:00
|
|
|
let mut new_set = NewSetInformation {
|
2025-01-30 03:14:24 -05:00
|
|
|
set,
|
2025-11-16 11:50:24 -05:00
|
|
|
serai_block: block.block_hash.0,
|
2025-01-15 12:51:35 -05:00
|
|
|
declaration_time: block.time,
|
2025-01-30 03:14:24 -05:00
|
|
|
// TODO: This should be inlined into the Processor's key gen code
|
|
|
|
|
// It's legacy from when we removed participants from the key gen
|
2025-01-15 12:51:35 -05:00
|
|
|
threshold: ((total_weight * 2) / 3) + 1,
|
2025-11-18 20:50:32 -05:00
|
|
|
// TODO: Why are `validators` and `evrf_public_keys` two separate fields?
|
2025-01-15 12:51:35 -05:00
|
|
|
validators,
|
|
|
|
|
evrf_public_keys,
|
|
|
|
|
participant_indexes: Default::default(),
|
|
|
|
|
participant_indexes_reverse_lookup: Default::default(),
|
|
|
|
|
};
|
|
|
|
|
// These aren't serialized, and we immediately serialize and drop this, so this isn't
|
|
|
|
|
// necessary. It's just good practice not have this be dirty
|
|
|
|
|
new_set.init_participant_indexes();
|
|
|
|
|
crate::NewSet::send(&mut txn, &new_set);
|
2024-12-31 10:37:19 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for accepted_handover in block.accepted_handover_events {
|
2025-11-16 11:50:24 -05:00
|
|
|
let serai_client_serai::abi::validator_sets::Event::AcceptedHandover { set } =
|
2024-12-31 10:37:19 -05:00
|
|
|
&accepted_handover
|
|
|
|
|
else {
|
|
|
|
|
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
|
|
|
|
};
|
2025-01-30 03:14:24 -05:00
|
|
|
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
|
|
|
|
crate::SignSlashReport::send(&mut txn, set);
|
2024-12-31 10:37:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
txn.commit();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(next_block <= latest_finalized_block)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|