Files
serai/coordinator/substrate/src/publish_slash_report.rs

90 lines
3.3 KiB
Rust
Raw Normal View History

2025-01-12 17:47:48 -05:00
use core::future::Future;
use std::sync::Arc;
2025-01-14 01:58:26 -05:00
use serai_db::{DbTxn, Db};
2025-01-12 17:47:48 -05:00
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, Serai};
use serai_task::ContinuallyRan;
2025-01-14 01:58:26 -05:00
use crate::SlashReports;
2025-01-12 17:47:48 -05:00
2025-01-14 01:58:26 -05:00
/// Publish slash reports from `SlashReports` onto Serai.
pub struct PublishSlashReportTask<D: Db> {
db: D,
2025-01-12 17:47:48 -05:00
serai: Arc<Serai>,
}
2025-01-14 01:58:26 -05:00
impl<D: Db> PublishSlashReportTask<D> {
/// Create a task to publish slash reports onto Serai.
pub fn new(db: D, serai: Arc<Serai>) -> Self {
Self { db, serai }
}
}
impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
type Error = String;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
2025-01-12 17:47:48 -05:00
async move {
let mut made_progress = false;
for network in serai_client::primitives::NETWORKS {
if network == NetworkId::Serai {
continue;
};
let mut txn = self.db.txn();
let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else {
// No slash report to publish
continue;
};
let serai =
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
let serai = serai.validator_sets();
let session_after_slash_report = Session(session.0 + 1);
let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
let current_session = current_session.map(|session| session.0);
// Only attempt to publish the slash report for session #n while session #n+1 is still
// active
let session_after_slash_report_retired =
current_session > Some(session_after_slash_report.0);
if session_after_slash_report_retired {
2025-01-14 01:58:26 -05:00
// Commit the txn to drain this slash report from the database and not try it again later
2025-01-12 17:47:48 -05:00
txn.commit();
continue;
}
if Some(session_after_slash_report.0) != current_session {
// We already checked the current session wasn't greater, and they're not equal
assert!(current_session < Some(session_after_slash_report.0));
// This would mean the Serai node is resyncing and is behind where it prior was
2025-01-14 01:58:26 -05:00
Err("have a slash report for a session Serai has yet to retire".to_string())?;
2025-01-12 17:47:48 -05:00
}
// If this session which should publish a slash report already has, move on
let key_pending_slash_report =
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
if key_pending_slash_report.is_none() {
txn.commit();
continue;
};
match self.serai.publish(&slash_report).await {
Ok(()) => {
txn.commit();
made_progress = true;
}
// This could be specific to this TX (such as an already in mempool error) and it may be
// worthwhile to continue iteration with the other pending slash reports. We assume this
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
// miniscule compared to the window available to publish the slash report. That makes
// this a non-issue.
Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}"))?,
}
}
Ok(made_progress)
}
}
}