Allow scheduler's creation of transactions to be async and error

I don't love this, but it's the only way to select decoys without using a local
database. While the prior commit added such a databse, the performance of it
presumably wasn't viable, and while TODOs marked the needed improvements, it
was still messy with an immense scope re: any auditing.

The relevant scheduler functions now take `&self` (intentional, as all
mutations should be via the `&mut impl DbTxn` passed). The calls to `&self` are
expected to be completely deterministic (as usual).
This commit is contained in:
Luke Parker
2024-09-14 01:09:35 -04:00
parent 2edc2f3612
commit e1ad897f7e
11 changed files with 723 additions and 854 deletions

View File

@@ -1,4 +1,4 @@
use core::{marker::PhantomData, future::Future};
use core::future::Future;
use std::collections::{HashSet, HashMap};
use group::GroupEncoding;
@@ -102,11 +102,11 @@ fn intake_eventualities<S: ScannerFeed>(
pub(crate) struct EventualityTask<D: Db, S: ScannerFeed, Sch: Scheduler<S>> {
db: D,
feed: S,
scheduler: PhantomData<Sch>,
scheduler: Sch,
}
impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> EventualityTask<D, S, Sch> {
pub(crate) fn new(mut db: D, feed: S, start_block: u64) -> Self {
pub(crate) fn new(mut db: D, feed: S, scheduler: Sch, start_block: u64) -> Self {
if EventualityDb::<S>::next_to_check_for_eventualities_block(&db).is_none() {
// Initialize the DB
let mut txn = db.txn();
@@ -114,7 +114,7 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> EventualityTask<D, S, Sch> {
txn.commit();
}
Self { db, feed, scheduler: PhantomData }
Self { db, feed, scheduler }
}
#[allow(clippy::type_complexity)]
@@ -167,15 +167,19 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> EventualityTask<D, S, Sch> {
{
intaked_any = true;
let new_eventualities = Sch::fulfill(
&mut txn,
&block,
&keys_with_stages,
burns
.into_iter()
.filter_map(|burn| Payment::<AddressFor<S>>::try_from(burn).ok())
.collect(),
);
let new_eventualities = self
.scheduler
.fulfill(
&mut txn,
&block,
&keys_with_stages,
burns
.into_iter()
.filter_map(|burn| Payment::<AddressFor<S>>::try_from(burn).ok())
.collect(),
)
.await
.map_err(|e| format!("failed to queue fulfilling payments: {e:?}"))?;
intake_eventualities::<S>(&mut txn, new_eventualities);
}
txn.commit();
@@ -443,8 +447,11 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTas
determined off an earlier block than this (enabling an earlier LifetimeStage to be
used after a later one was already used).
*/
let new_eventualities =
Sch::update(&mut txn, &block, &keys_with_stages, scheduler_update);
let new_eventualities = self
.scheduler
.update(&mut txn, &block, &keys_with_stages, scheduler_update)
.await
.map_err(|e| format!("failed to update scheduler: {e:?}"))?;
// Intake the new Eventualities
for key in new_eventualities.keys() {
keys
@@ -464,8 +471,11 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTas
key.key != keys.last().unwrap().key,
"key which was forwarding was the last key (which has no key after it to forward to)"
);
let new_eventualities =
Sch::flush_key(&mut txn, &block, key.key, keys.last().unwrap().key);
let new_eventualities = self
.scheduler
.flush_key(&mut txn, &block, key.key, keys.last().unwrap().key)
.await
.map_err(|e| format!("failed to flush key from scheduler: {e:?}"))?;
intake_eventualities::<S>(&mut txn, new_eventualities);
}

View File

@@ -256,8 +256,17 @@ impl<S: ScannerFeed> SchedulerUpdate<S> {
}
}
/// Eventualities, keyed by the encoding of the key the Eventualities are for.
pub type KeyScopedEventualities<S> = HashMap<Vec<u8>, Vec<EventualityFor<S>>>;
/// The object responsible for accumulating outputs and planning new transactions.
pub trait Scheduler<S: ScannerFeed>: 'static + Send {
/// An error encountered when handling updates/payments.
///
/// This MUST be an ephemeral error. Retrying handling updates/payments MUST eventually
/// resolve without manual intervention/changing the arguments.
type EphemeralError: Debug;
/// The type for a signable transaction.
type SignableTransaction: scheduler_primitives::SignableTransaction;
@@ -278,11 +287,12 @@ pub trait Scheduler<S: ScannerFeed>: 'static + Send {
/// If the retiring key has any unfulfilled payments associated with it, those MUST be made
/// the responsibility of the new key.
fn flush_key(
&self,
txn: &mut impl DbTxn,
block: &BlockFor<S>,
retiring_key: KeyFor<S>,
new_key: KeyFor<S>,
) -> HashMap<Vec<u8>, Vec<EventualityFor<S>>>;
) -> impl Send + Future<Output = Result<KeyScopedEventualities<S>, Self::EphemeralError>>;
/// Retire a key as it'll no longer be used.
///
@@ -300,11 +310,12 @@ pub trait Scheduler<S: ScannerFeed>: 'static + Send {
/// The `Vec<u8>` used as the key in the returned HashMap should be the encoded key the
/// Eventualities are for.
fn update(
&self,
txn: &mut impl DbTxn,
block: &BlockFor<S>,
active_keys: &[(KeyFor<S>, LifetimeStage)],
update: SchedulerUpdate<S>,
) -> HashMap<Vec<u8>, Vec<EventualityFor<S>>>;
) -> impl Send + Future<Output = Result<KeyScopedEventualities<S>, Self::EphemeralError>>;
/// Fulfill a series of payments, yielding the Eventualities now to be scanned for.
///
@@ -339,11 +350,12 @@ pub trait Scheduler<S: ScannerFeed>: 'static + Send {
has an output-to-Serai, the new primary output).
*/
fn fulfill(
&self,
txn: &mut impl DbTxn,
block: &BlockFor<S>,
active_keys: &[(KeyFor<S>, LifetimeStage)],
payments: Vec<Payment<AddressFor<S>>>,
) -> HashMap<Vec<u8>, Vec<EventualityFor<S>>>;
) -> impl Send + Future<Output = Result<KeyScopedEventualities<S>, Self::EphemeralError>>;
}
/// A representation of a scanner.
@@ -358,14 +370,15 @@ impl<S: ScannerFeed> Scanner<S> {
/// This will begin its execution, spawning several asynchronous tasks.
///
/// This will return None if the Scanner was never initialized.
pub async fn new<Sch: Scheduler<S>>(db: impl Db, feed: S) -> Option<Self> {
pub async fn new(db: impl Db, feed: S, scheduler: impl Scheduler<S>) -> Option<Self> {
let start_block = ScannerGlobalDb::<S>::start_block(&db)?;
let index_task = index::IndexTask::new(db.clone(), feed.clone(), start_block).await;
let scan_task = scan::ScanTask::new(db.clone(), feed.clone(), start_block);
let report_task = report::ReportTask::<_, S>::new(db.clone(), start_block);
let substrate_task = substrate::SubstrateTask::<_, S>::new(db.clone());
let eventuality_task = eventuality::EventualityTask::<_, _, Sch>::new(db, feed, start_block);
let eventuality_task =
eventuality::EventualityTask::<_, _, _>::new(db, feed, scheduler, start_block);
let (index_task_def, _index_handle) = Task::new();
let (scan_task_def, scan_handle) = Task::new();
@@ -394,9 +407,10 @@ impl<S: ScannerFeed> Scanner<S> {
/// This will begin its execution, spawning several asynchronous tasks.
///
/// This passes through to `Scanner::new` if prior called.
pub async fn initialize<Sch: Scheduler<S>>(
pub async fn initialize(
mut db: impl Db,
feed: S,
scheduler: impl Scheduler<S>,
start_block: u64,
start_key: KeyFor<S>,
) -> Self {
@@ -407,7 +421,7 @@ impl<S: ScannerFeed> Scanner<S> {
txn.commit();
}
Self::new::<Sch>(db, feed).await.unwrap()
Self::new(db, feed, scheduler).await.unwrap()
}
/// Acknowledge a Batch having been published on Serai.