Add sanity checks we haven't prior reported an InInstruction for/accumulated an output

This commit is contained in:
Luke Parker
2024-08-29 21:35:22 -04:00
parent 2ca7fccb08
commit a8b9b7bad3
5 changed files with 80 additions and 88 deletions

View File

@@ -3,9 +3,9 @@ use core::marker::PhantomData;
use scale::Encode;
use serai_db::{Get, DbTxn, create_db};
use primitives::{EncodableG, Eventuality, EventualityTracker};
use primitives::{EncodableG, ReceivedOutput, Eventuality, EventualityTracker};
use crate::{ScannerFeed, KeyFor, EventualityFor};
use crate::{ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor};
create_db!(
ScannerEventuality {
@@ -15,6 +15,8 @@ create_db!(
LatestHandledNotableBlock: () -> u64,
SerializedEventualities: <K: Encode>(key: K) -> Vec<u8>,
AccumulatedOutput: (id: &[u8]) -> (),
}
);
@@ -65,4 +67,17 @@ impl<S: ScannerFeed> EventualityDb<S> {
}
res
}
pub(crate) fn prior_accumulated_output(
getter: &impl Get,
id: &<OutputFor<S> as ReceivedOutput<KeyFor<S>, AddressFor<S>>>::Id,
) -> bool {
AccumulatedOutput::get(getter, id.as_ref()).is_some()
}
pub(crate) fn accumulated_output(
txn: &mut impl DbTxn,
id: &<OutputFor<S> as ReceivedOutput<KeyFor<S>, AddressFor<S>>>::Id,
) {
AccumulatedOutput::set(txn, id.as_ref(), &());
}
}

View File

@@ -12,7 +12,8 @@ use crate::{
SeraiKey, OutputWithInInstruction, ReceiverScanData, ScannerGlobalDb, SubstrateToEventualityDb,
ScanToEventualityDb,
},
BlockExt, ScannerFeed, KeyFor, EventualityFor, SchedulerUpdate, Scheduler, sort_outputs,
BlockExt, ScannerFeed, KeyFor, OutputFor, EventualityFor, SchedulerUpdate, Scheduler,
sort_outputs,
scan::{next_to_scan_for_outputs_block, queue_output_until_block},
};
@@ -349,6 +350,22 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTas
scheduler_update.outputs.sort_by(sort_outputs);
scheduler_update.forwards.sort_by(sort_outputs);
scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output));
// Sanity check we've never accumulated these outputs before
{
let a: core::slice::Iter<'_, OutputFor<S>> = scheduler_update.outputs.iter();
let b: core::slice::Iter<'_, OutputFor<S>> = scheduler_update.forwards.iter();
let c = scheduler_update.returns.iter().map(|output_to_return| &output_to_return.output);
for output in a.chain(b).chain(c) {
assert!(
!EventualityDb::<S>::prior_accumulated_output(&txn, &output.id()),
"prior accumulated an output with this ID"
);
EventualityDb::<S>::accumulated_output(&mut txn, &output.id());
}
}
// Intake the new Eventualities
let new_eventualities =
self.scheduler.update(&mut txn, &keys_with_stages, scheduler_update);
@@ -375,7 +392,6 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTas
// Now that we've intaked any Eventualities caused, check if we're retiring any keys
if key.stage == LifetimeStage::Finishing {
let eventualities = EventualityDb::<S>::eventualities(&txn, key.key);
// TODO: This assumes the Scheduler is empty
if eventualities.active_eventualities.is_empty() {
log::info!(
"key {} has finished and is being retired",