mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 20:29:23 +00:00
Finish the tree logic in the transaction-chaining scheduler
Also completes the DB functions, makes Scheduler never instantiated, and ensures tree roots have change outputs.
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
use core::marker::PhantomData;
|
||||
use std::collections::{HashSet, HashMap};
|
||||
|
||||
use group::GroupEncoding;
|
||||
@@ -101,11 +102,11 @@ fn intake_eventualities<S: ScannerFeed>(
|
||||
pub(crate) struct EventualityTask<D: Db, S: ScannerFeed, Sch: Scheduler<S>> {
|
||||
db: D,
|
||||
feed: S,
|
||||
scheduler: Sch,
|
||||
scheduler: PhantomData<Sch>,
|
||||
}
|
||||
|
||||
impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> EventualityTask<D, S, Sch> {
|
||||
pub(crate) fn new(mut db: D, feed: S, scheduler: Sch, start_block: u64) -> Self {
|
||||
pub(crate) fn new(mut db: D, feed: S, start_block: u64) -> Self {
|
||||
if EventualityDb::<S>::next_to_check_for_eventualities_block(&db).is_none() {
|
||||
// Initialize the DB
|
||||
let mut txn = db.txn();
|
||||
@@ -113,7 +114,7 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> EventualityTask<D, S, Sch> {
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
Self { db, feed, scheduler }
|
||||
Self { db, feed, scheduler: PhantomData }
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
@@ -146,7 +147,7 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> EventualityTask<D, S, Sch> {
|
||||
}
|
||||
|
||||
// Returns a boolean of if we intaked any Burns.
|
||||
fn intake_burns(&mut self) -> bool {
|
||||
async fn intake_burns(&mut self) -> Result<bool, String> {
|
||||
let mut intaked_any = false;
|
||||
|
||||
// If we've handled an notable block, we may have Burns being queued with it as the reference
|
||||
@@ -158,6 +159,8 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> EventualityTask<D, S, Sch> {
|
||||
// others the new key
|
||||
let (_keys, keys_with_stages) = self.keys_and_keys_with_stages(latest_handled_notable_block);
|
||||
|
||||
let block = self.feed.block_by_number(&self.db, latest_handled_notable_block).await?;
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
// Drain the entire channel
|
||||
while let Some(burns) =
|
||||
@@ -165,8 +168,9 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> EventualityTask<D, S, Sch> {
|
||||
{
|
||||
intaked_any = true;
|
||||
|
||||
let new_eventualities = self.scheduler.fulfill(
|
||||
let new_eventualities = Sch::fulfill(
|
||||
&mut txn,
|
||||
&block,
|
||||
&keys_with_stages,
|
||||
burns
|
||||
.into_iter()
|
||||
@@ -178,7 +182,7 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> EventualityTask<D, S, Sch> {
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
intaked_any
|
||||
Ok(intaked_any)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -197,7 +201,7 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTas
|
||||
|
||||
// Start by intaking any Burns we have sitting around
|
||||
// It's important we run this regardless of if we have a new block to handle
|
||||
made_progress |= self.intake_burns();
|
||||
made_progress |= self.intake_burns().await?;
|
||||
|
||||
/*
|
||||
Eventualities increase upon one of two cases:
|
||||
@@ -253,7 +257,7 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTas
|
||||
// state will be for the newer block)
|
||||
#[allow(unused_assignments)]
|
||||
{
|
||||
made_progress |= self.intake_burns();
|
||||
made_progress |= self.intake_burns().await?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,7 +282,7 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTas
|
||||
for key in &keys {
|
||||
// If this is the key's activation block, activate it
|
||||
if key.activation_block_number == b {
|
||||
self.scheduler.activate_key(&mut txn, key.key);
|
||||
Sch::activate_key(&mut txn, key.key);
|
||||
}
|
||||
|
||||
let completed_eventualities = {
|
||||
@@ -431,7 +435,7 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTas
|
||||
after a later one was already used).
|
||||
*/
|
||||
let new_eventualities =
|
||||
self.scheduler.update(&mut txn, &keys_with_stages, scheduler_update);
|
||||
Sch::update(&mut txn, &block, &keys_with_stages, scheduler_update);
|
||||
// Intake the new Eventualities
|
||||
for key in new_eventualities.keys() {
|
||||
keys
|
||||
@@ -451,7 +455,7 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTas
|
||||
key.key != keys.last().unwrap().key,
|
||||
"key which was forwarding was the last key (which has no key after it to forward to)"
|
||||
);
|
||||
self.scheduler.flush_key(&mut txn, key.key, keys.last().unwrap().key);
|
||||
Sch::flush_key(&mut txn, &block, key.key, keys.last().unwrap().key);
|
||||
}
|
||||
|
||||
// Now that we've intaked any Eventualities caused, check if we're retiring any keys
|
||||
@@ -469,7 +473,7 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTas
|
||||
|
||||
// We tell the scheduler to retire it now as we're done with it, and this fn doesn't
|
||||
// require it be called with a canonical order
|
||||
self.scheduler.retire_key(&mut txn, key.key);
|
||||
Sch::retire_key(&mut txn, key.key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,6 +163,8 @@ pub type AddressFor<S> = <<S as ScannerFeed>::Block as Block>::Address;
|
||||
pub type OutputFor<S> = <<S as ScannerFeed>::Block as Block>::Output;
|
||||
/// The eventuality type for this ScannerFeed.
|
||||
pub type EventualityFor<S> = <<S as ScannerFeed>::Block as Block>::Eventuality;
|
||||
/// The block type for this ScannerFeed.
|
||||
pub type BlockFor<S> = <S as ScannerFeed>::Block;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait BatchPublisher: 'static + Send + Sync {
|
||||
@@ -245,7 +247,7 @@ pub trait Scheduler<S: ScannerFeed>: 'static + Send {
|
||||
///
|
||||
/// This SHOULD setup any necessary database structures. This SHOULD NOT cause the new key to
|
||||
/// be used as the primary key. The multisig rotation time clearly establishes its steps.
|
||||
fn activate_key(&mut self, txn: &mut impl DbTxn, key: KeyFor<S>);
|
||||
fn activate_key(txn: &mut impl DbTxn, key: KeyFor<S>);
|
||||
|
||||
/// Flush all outputs within a retiring key to the new key.
|
||||
///
|
||||
@@ -257,14 +259,20 @@ pub trait Scheduler<S: ScannerFeed>: 'static + Send {
|
||||
///
|
||||
/// If the retiring key has any unfulfilled payments associated with it, those MUST be made
|
||||
/// the responsibility of the new key.
|
||||
fn flush_key(&mut self, txn: &mut impl DbTxn, retiring_key: KeyFor<S>, new_key: KeyFor<S>);
|
||||
// TODO: This needs to return a HashMap for the eventualities
|
||||
fn flush_key(
|
||||
txn: &mut impl DbTxn,
|
||||
block: &BlockFor<S>,
|
||||
retiring_key: KeyFor<S>,
|
||||
new_key: KeyFor<S>,
|
||||
);
|
||||
|
||||
/// Retire a key as it'll no longer be used.
|
||||
///
|
||||
/// Any key retired MUST NOT still have outputs associated with it. This SHOULD be a NOP other
|
||||
/// than any assertions and database cleanup. This MUST NOT be expected to be called in a fashion
|
||||
/// ordered to any other calls.
|
||||
fn retire_key(&mut self, txn: &mut impl DbTxn, key: KeyFor<S>);
|
||||
fn retire_key(txn: &mut impl DbTxn, key: KeyFor<S>);
|
||||
|
||||
/// Accumulate outputs into the scheduler, yielding the Eventualities now to be scanned for.
|
||||
///
|
||||
@@ -275,7 +283,6 @@ pub trait Scheduler<S: ScannerFeed>: 'static + Send {
|
||||
/// The `Vec<u8>` used as the key in the returned HashMap should be the encoded key the
|
||||
/// Eventualities are for.
|
||||
fn update(
|
||||
&mut self,
|
||||
txn: &mut impl DbTxn,
|
||||
block: &BlockFor<S>,
|
||||
active_keys: &[(KeyFor<S>, LifetimeStage)],
|
||||
@@ -315,7 +322,6 @@ pub trait Scheduler<S: ScannerFeed>: 'static + Send {
|
||||
has an output-to-Serai, the new primary output).
|
||||
*/
|
||||
fn fulfill(
|
||||
&mut self,
|
||||
txn: &mut impl DbTxn,
|
||||
block: &BlockFor<S>,
|
||||
active_keys: &[(KeyFor<S>, LifetimeStage)],
|
||||
@@ -333,18 +339,17 @@ impl<S: ScannerFeed> Scanner<S> {
|
||||
/// Create a new scanner.
|
||||
///
|
||||
/// This will begin its execution, spawning several asynchronous tasks.
|
||||
pub async fn new(
|
||||
pub async fn new<Sch: Scheduler<S>>(
|
||||
db: impl Db,
|
||||
feed: S,
|
||||
batch_publisher: impl BatchPublisher,
|
||||
scheduler: impl Scheduler<S>,
|
||||
start_block: u64,
|
||||
) -> Self {
|
||||
let index_task = index::IndexTask::new(db.clone(), feed.clone(), start_block).await;
|
||||
let scan_task = scan::ScanTask::new(db.clone(), feed.clone(), start_block);
|
||||
let report_task = report::ReportTask::<_, S, _>::new(db.clone(), batch_publisher, start_block);
|
||||
let substrate_task = substrate::SubstrateTask::<_, S>::new(db.clone());
|
||||
let eventuality_task = eventuality::EventualityTask::new(db, feed, scheduler, start_block);
|
||||
let eventuality_task = eventuality::EventualityTask::<_, _, Sch>::new(db, feed, start_block);
|
||||
|
||||
let (_index_handle, index_run) = RunNowHandle::new();
|
||||
let (scan_handle, scan_run) = RunNowHandle::new();
|
||||
|
||||
Reference in New Issue
Block a user