mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 20:29:23 +00:00
Remove async-trait from processor/
Part of https://github.com/serai-dex/issues/607.
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
use core::marker::PhantomData;
|
||||
use core::{marker::PhantomData, future::Future};
|
||||
use std::collections::{HashSet, HashMap};
|
||||
|
||||
use group::GroupEncoding;
|
||||
@@ -185,317 +185,323 @@ impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> EventualityTask<D, S, Sch> {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<D: Db, S: ScannerFeed, Sch: Scheduler<S>> ContinuallyRan for EventualityTask<D, S, Sch> {
|
||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
||||
// Fetch the highest acknowledged block
|
||||
let Some(highest_acknowledged) = ScannerGlobalDb::<S>::highest_acknowledged_block(&self.db)
|
||||
else {
|
||||
// If we've never acknowledged a block, return
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
// A boolean of if we've made any progress to return at the end of the function
|
||||
let mut made_progress = false;
|
||||
|
||||
// Start by intaking any Burns we have sitting around
|
||||
// It's important we run this regardless of if we have a new block to handle
|
||||
made_progress |= self.intake_burns().await?;
|
||||
|
||||
/*
|
||||
Eventualities increase upon one of two cases:
|
||||
|
||||
1) We're fulfilling Burns
|
||||
2) We acknowledged a block
|
||||
|
||||
We can't know the processor has intaked all Burns it should have when we process block `b`.
|
||||
We solve this by executing a consensus protocol whenever a resolution for an Eventuality
|
||||
created to fulfill Burns occurs. Accordingly, we force ourselves to obtain synchrony on such
|
||||
blocks (and all preceding Burns).
|
||||
|
||||
This means we can only iterate up to the block currently pending acknowledgement.
|
||||
|
||||
We only know blocks will need acknowledgement *for sure* if they were scanned. The only other
|
||||
causes are key activation and retirement (both scheduled outside the scan window). This makes
|
||||
the exclusive upper bound the *next block to scan*.
|
||||
*/
|
||||
let exclusive_upper_bound = {
|
||||
// Fetch the next to scan block
|
||||
let next_to_scan = next_to_scan_for_outputs_block::<S>(&self.db)
|
||||
.expect("EventualityTask run before writing the start block");
|
||||
// If we haven't done any work, return
|
||||
if next_to_scan == 0 {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
// Fetch the highest acknowledged block
|
||||
let Some(highest_acknowledged) = ScannerGlobalDb::<S>::highest_acknowledged_block(&self.db)
|
||||
else {
|
||||
// If we've never acknowledged a block, return
|
||||
return Ok(false);
|
||||
}
|
||||
next_to_scan
|
||||
};
|
||||
};
|
||||
|
||||
// Fetch the next block to check
|
||||
let next_to_check = EventualityDb::<S>::next_to_check_for_eventualities_block(&self.db)
|
||||
.expect("EventualityTask run before writing the start block");
|
||||
// A boolean of if we've made any progress to return at the end of the function
|
||||
let mut made_progress = false;
|
||||
|
||||
// Check all blocks
|
||||
for b in next_to_check .. exclusive_upper_bound {
|
||||
let is_block_notable = ScannerGlobalDb::<S>::is_block_notable(&self.db, b);
|
||||
if is_block_notable {
|
||||
/*
|
||||
If this block is notable *and* not acknowledged, break.
|
||||
// Start by intaking any Burns we have sitting around
|
||||
// It's important we run this regardless of if we have a new block to handle
|
||||
made_progress |= self.intake_burns().await?;
|
||||
|
||||
This is so if Burns queued prior to this block's acknowledgement caused any Eventualities
|
||||
(which may resolve this block), we have them. If it wasn't for that, it'd be so if this
|
||||
block's acknowledgement caused any Eventualities, we have them, though those would only
|
||||
potentially resolve in the next block (letting us scan this block without delay).
|
||||
*/
|
||||
if b > highest_acknowledged {
|
||||
break;
|
||||
/*
|
||||
Eventualities increase upon one of two cases:
|
||||
|
||||
1) We're fulfilling Burns
|
||||
2) We acknowledged a block
|
||||
|
||||
We can't know the processor has intaked all Burns it should have when we process block `b`.
|
||||
We solve this by executing a consensus protocol whenever a resolution for an Eventuality
|
||||
created to fulfill Burns occurs. Accordingly, we force ourselves to obtain synchrony on
|
||||
such blocks (and all preceding Burns).
|
||||
|
||||
This means we can only iterate up to the block currently pending acknowledgement.
|
||||
|
||||
We only know blocks will need acknowledgement *for sure* if they were scanned. The only
|
||||
other causes are key activation and retirement (both scheduled outside the scan window).
|
||||
This makes the exclusive upper bound the *next block to scan*.
|
||||
*/
|
||||
let exclusive_upper_bound = {
|
||||
// Fetch the next to scan block
|
||||
let next_to_scan = next_to_scan_for_outputs_block::<S>(&self.db)
|
||||
.expect("EventualityTask run before writing the start block");
|
||||
// If we haven't done any work, return
|
||||
if next_to_scan == 0 {
|
||||
return Ok(false);
|
||||
}
|
||||
next_to_scan
|
||||
};
|
||||
|
||||
// Since this block is notable, ensure we've intaked all the Burns preceding it
|
||||
// We can know with certainty that the channel is fully populated at this time since we've
|
||||
// acknowledged a newer block (so we've handled the state up to this point and any new
|
||||
// state will be for the newer block)
|
||||
#[allow(unused_assignments)]
|
||||
{
|
||||
made_progress |= self.intake_burns().await?;
|
||||
}
|
||||
}
|
||||
// Fetch the next block to check
|
||||
let next_to_check = EventualityDb::<S>::next_to_check_for_eventualities_block(&self.db)
|
||||
.expect("EventualityTask run before writing the start block");
|
||||
|
||||
// Since we're handling this block, we are making progress
|
||||
made_progress = true;
|
||||
|
||||
let block = self.feed.block_by_number(&self.db, b).await?;
|
||||
|
||||
log::debug!("checking eventuality completions in block: {} ({b})", hex::encode(block.id()));
|
||||
|
||||
let (keys, keys_with_stages) = self.keys_and_keys_with_stages(b);
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
// Fetch the data from the scanner
|
||||
let scan_data = ScanToEventualityDb::recv_scan_data(&mut txn, b);
|
||||
assert_eq!(scan_data.block_number, b);
|
||||
let ReceiverScanData { block_number: _, received_external_outputs, forwards, returns } =
|
||||
scan_data;
|
||||
let mut outputs = received_external_outputs;
|
||||
|
||||
for key in &keys {
|
||||
// If this is the key's activation block, activate it
|
||||
if key.activation_block_number == b {
|
||||
Sch::activate_key(&mut txn, key.key);
|
||||
}
|
||||
|
||||
let completed_eventualities = {
|
||||
let mut eventualities = EventualityDb::<S>::eventualities(&txn, key.key);
|
||||
let completed_eventualities = block.check_for_eventuality_resolutions(&mut eventualities);
|
||||
EventualityDb::<S>::set_eventualities(&mut txn, key.key, &eventualities);
|
||||
completed_eventualities
|
||||
};
|
||||
|
||||
for (tx, eventuality) in &completed_eventualities {
|
||||
log::info!(
|
||||
"eventuality {} resolved by {}",
|
||||
hex::encode(eventuality.id()),
|
||||
hex::encode(tx.as_ref())
|
||||
);
|
||||
CompletedEventualities::send(&mut txn, &key.key, eventuality.id());
|
||||
}
|
||||
|
||||
// Fetch all non-External outputs
|
||||
let mut non_external_outputs = block.scan_for_outputs(key.key);
|
||||
non_external_outputs.retain(|output| output.kind() != OutputType::External);
|
||||
// Drop any outputs less than the dust limit
|
||||
non_external_outputs.retain(|output| {
|
||||
let balance = output.balance();
|
||||
balance.amount.0 >= S::dust(balance.coin).0
|
||||
});
|
||||
|
||||
/*
|
||||
Now that we have all non-External outputs, we filter them to be only the outputs which
|
||||
are from transactions which resolve our own Eventualities *if* the multisig is retiring.
|
||||
This implements step 6 of `spec/processor/Multisig Rotation.md`.
|
||||
|
||||
We may receive a Change output. The only issue with accumulating this would be if it
|
||||
extends the multisig's lifetime (by increasing the amount of outputs yet to be
|
||||
forwarded). By checking it's one we made, either:
|
||||
1) It's a legitimate Change output to be forwarded
|
||||
2) It's a Change output created by a user burning coins (specifying the Change address),
|
||||
which can only be created while the multisig is actively handling `Burn`s (therefore
|
||||
ensuring this multisig cannot be kept alive ad-infinitum)
|
||||
|
||||
The commentary on Change outputs also applies to Branch/Forwarded. They'll presumably get
|
||||
ignored if not usable however.
|
||||
*/
|
||||
if key.stage == LifetimeStage::Finishing {
|
||||
non_external_outputs
|
||||
.retain(|output| completed_eventualities.contains_key(&output.transaction_id()));
|
||||
}
|
||||
|
||||
// Finally, for non-External outputs we didn't make, we check they're worth more than the
|
||||
// cost to aggregate them to avoid some profitable spam attacks by malicious miners
|
||||
{
|
||||
// Fetch and cache the costs to aggregate as this call may be expensive
|
||||
let coins =
|
||||
non_external_outputs.iter().map(|output| output.balance().coin).collect::<HashSet<_>>();
|
||||
let mut costs_to_aggregate = HashMap::new();
|
||||
for coin in coins {
|
||||
costs_to_aggregate.insert(
|
||||
coin,
|
||||
self.feed.cost_to_aggregate(coin, &block).await.map_err(|e| {
|
||||
format!("EventualityTask couldn't fetch cost to aggregate {coin:?} at {b}: {e:?}")
|
||||
})?,
|
||||
);
|
||||
}
|
||||
|
||||
// Only retain out outputs/outputs sufficiently worthwhile
|
||||
non_external_outputs.retain(|output| {
|
||||
completed_eventualities.contains_key(&output.transaction_id()) || {
|
||||
let balance = output.balance();
|
||||
balance.amount.0 >= (2 * costs_to_aggregate[&balance.coin].0)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Now, we iterate over all Forwarded outputs and queue their InInstructions
|
||||
for output in
|
||||
non_external_outputs.iter().filter(|output| output.kind() == OutputType::Forwarded)
|
||||
{
|
||||
let Some(eventuality) = completed_eventualities.get(&output.transaction_id()) else {
|
||||
// Output sent to the forwarding address yet not one we made
|
||||
continue;
|
||||
};
|
||||
let Some(forwarded) = eventuality.singular_spent_output() else {
|
||||
// This was a TX made by us, yet someone burned to the forwarding address as it doesn't
|
||||
// follow the structure of forwarding transactions
|
||||
continue;
|
||||
};
|
||||
|
||||
let Some((return_address, mut in_instruction)) =
|
||||
ScannerGlobalDb::<S>::return_address_and_in_instruction_for_forwarded_output(
|
||||
&txn, &forwarded,
|
||||
)
|
||||
else {
|
||||
// This was a TX made by us, coincidentally with the necessary structure, yet wasn't
|
||||
// forwarding an output
|
||||
continue;
|
||||
};
|
||||
|
||||
// We use the original amount, minus twice the cost to aggregate
|
||||
// If the fees we paid to forward this now (less than the cost to aggregate now, yet not
|
||||
// necessarily the cost to aggregate historically) caused this amount to be less, reduce
|
||||
// it accordingly
|
||||
in_instruction.balance.amount.0 =
|
||||
in_instruction.balance.amount.0.min(output.balance().amount.0);
|
||||
|
||||
queue_output_until_block::<S>(
|
||||
&mut txn,
|
||||
b + S::WINDOW_LENGTH,
|
||||
&OutputWithInInstruction { output: output.clone(), return_address, in_instruction },
|
||||
);
|
||||
}
|
||||
|
||||
// Accumulate all of these outputs
|
||||
outputs.extend(non_external_outputs);
|
||||
}
|
||||
|
||||
// Update the scheduler
|
||||
{
|
||||
let mut scheduler_update = SchedulerUpdate { outputs, forwards, returns };
|
||||
scheduler_update.outputs.sort_by(sort_outputs);
|
||||
scheduler_update.forwards.sort_by(sort_outputs);
|
||||
scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output));
|
||||
|
||||
let empty = {
|
||||
let a: core::slice::Iter<'_, OutputFor<S>> = scheduler_update.outputs.iter();
|
||||
let b: core::slice::Iter<'_, OutputFor<S>> = scheduler_update.forwards.iter();
|
||||
let c = scheduler_update.returns.iter().map(|output_to_return| &output_to_return.output);
|
||||
let mut all_outputs = a.chain(b).chain(c).peekable();
|
||||
|
||||
// If we received any output, sanity check this block is notable
|
||||
let empty = all_outputs.peek().is_none();
|
||||
if !empty {
|
||||
assert!(is_block_notable, "accumulating output(s) in non-notable block");
|
||||
}
|
||||
|
||||
// Sanity check we've never accumulated these outputs before
|
||||
for output in all_outputs {
|
||||
assert!(
|
||||
!EventualityDb::<S>::prior_accumulated_output(&txn, &output.id()),
|
||||
"prior accumulated an output with this ID"
|
||||
);
|
||||
EventualityDb::<S>::accumulated_output(&mut txn, &output.id());
|
||||
}
|
||||
|
||||
empty
|
||||
};
|
||||
|
||||
if !empty {
|
||||
// Accumulate the outputs
|
||||
// Check all blocks
|
||||
for b in next_to_check .. exclusive_upper_bound {
|
||||
let is_block_notable = ScannerGlobalDb::<S>::is_block_notable(&self.db, b);
|
||||
if is_block_notable {
|
||||
/*
|
||||
This uses the `keys_with_stages` for the current block, yet this block is notable.
|
||||
Accordingly, all future intaked Burns will use at least this block when determining
|
||||
what LifetimeStage a key is. That makes the LifetimeStage monotonically incremented. If
|
||||
this block wasn't notable, we'd potentially intake Burns with the LifetimeStage
|
||||
determined off an earlier block than this (enabling an earlier LifetimeStage to be used
|
||||
after a later one was already used).
|
||||
If this block is notable *and* not acknowledged, break.
|
||||
|
||||
This is so if Burns queued prior to this block's acknowledgement caused any
|
||||
Eventualities (which may resolve this block), we have them. If it wasn't for that, it'd
|
||||
be so if this block's acknowledgement caused any Eventualities, we have them, though
|
||||
those would only potentially resolve in the next block (letting us scan this block
|
||||
without delay).
|
||||
*/
|
||||
let new_eventualities =
|
||||
Sch::update(&mut txn, &block, &keys_with_stages, scheduler_update);
|
||||
// Intake the new Eventualities
|
||||
for key in new_eventualities.keys() {
|
||||
keys
|
||||
.iter()
|
||||
.find(|serai_key| serai_key.key.to_bytes().as_ref() == key.as_slice())
|
||||
.expect("intaking Eventuality for key which isn't active");
|
||||
if b > highest_acknowledged {
|
||||
break;
|
||||
}
|
||||
intake_eventualities::<S>(&mut txn, new_eventualities);
|
||||
}
|
||||
}
|
||||
|
||||
for key in &keys {
|
||||
// If this is the block at which forwarding starts for this key, flush it
|
||||
// We do this after we issue the above update for any efficiencies gained by doing so
|
||||
if key.block_at_which_forwarding_starts == Some(b) {
|
||||
assert!(
|
||||
key.key != keys.last().unwrap().key,
|
||||
"key which was forwarding was the last key (which has no key after it to forward to)"
|
||||
);
|
||||
let new_eventualities =
|
||||
Sch::flush_key(&mut txn, &block, key.key, keys.last().unwrap().key);
|
||||
intake_eventualities::<S>(&mut txn, new_eventualities);
|
||||
// Since this block is notable, ensure we've intaked all the Burns preceding it
|
||||
// We can know with certainty that the channel is fully populated at this time since
|
||||
// we've acknowledged a newer block (so we've handled the state up to this point and any
|
||||
// new state will be for the newer block)
|
||||
#[allow(unused_assignments)]
|
||||
{
|
||||
made_progress |= self.intake_burns().await?;
|
||||
}
|
||||
}
|
||||
|
||||
// Now that we've intaked any Eventualities caused, check if we're retiring any keys
|
||||
if key.stage == LifetimeStage::Finishing {
|
||||
let eventualities = EventualityDb::<S>::eventualities(&txn, key.key);
|
||||
if eventualities.active_eventualities.is_empty() {
|
||||
// Since we're handling this block, we are making progress
|
||||
made_progress = true;
|
||||
|
||||
let block = self.feed.block_by_number(&self.db, b).await?;
|
||||
|
||||
log::debug!("checking eventuality completions in block: {} ({b})", hex::encode(block.id()));
|
||||
|
||||
let (keys, keys_with_stages) = self.keys_and_keys_with_stages(b);
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
// Fetch the data from the scanner
|
||||
let scan_data = ScanToEventualityDb::recv_scan_data(&mut txn, b);
|
||||
assert_eq!(scan_data.block_number, b);
|
||||
let ReceiverScanData { block_number: _, received_external_outputs, forwards, returns } =
|
||||
scan_data;
|
||||
let mut outputs = received_external_outputs;
|
||||
|
||||
for key in &keys {
|
||||
// If this is the key's activation block, activate it
|
||||
if key.activation_block_number == b {
|
||||
Sch::activate_key(&mut txn, key.key);
|
||||
}
|
||||
|
||||
let completed_eventualities = {
|
||||
let mut eventualities = EventualityDb::<S>::eventualities(&txn, key.key);
|
||||
let completed_eventualities =
|
||||
block.check_for_eventuality_resolutions(&mut eventualities);
|
||||
EventualityDb::<S>::set_eventualities(&mut txn, key.key, &eventualities);
|
||||
completed_eventualities
|
||||
};
|
||||
|
||||
for (tx, eventuality) in &completed_eventualities {
|
||||
log::info!(
|
||||
"key {} has finished and is being retired",
|
||||
hex::encode(key.key.to_bytes().as_ref())
|
||||
"eventuality {} resolved by {}",
|
||||
hex::encode(eventuality.id()),
|
||||
hex::encode(tx.as_ref())
|
||||
);
|
||||
CompletedEventualities::send(&mut txn, &key.key, eventuality.id());
|
||||
}
|
||||
|
||||
// Retire this key `WINDOW_LENGTH` blocks in the future to ensure the scan task never
|
||||
// has a malleable view of the keys.
|
||||
ScannerGlobalDb::<S>::retire_key(&mut txn, b + S::WINDOW_LENGTH, key.key);
|
||||
// Fetch all non-External outputs
|
||||
let mut non_external_outputs = block.scan_for_outputs(key.key);
|
||||
non_external_outputs.retain(|output| output.kind() != OutputType::External);
|
||||
// Drop any outputs less than the dust limit
|
||||
non_external_outputs.retain(|output| {
|
||||
let balance = output.balance();
|
||||
balance.amount.0 >= S::dust(balance.coin).0
|
||||
});
|
||||
|
||||
// We tell the scheduler to retire it now as we're done with it, and this fn doesn't
|
||||
// require it be called with a canonical order
|
||||
Sch::retire_key(&mut txn, key.key);
|
||||
/*
|
||||
Now that we have all non-External outputs, we filter them to be only the outputs which
|
||||
are from transactions which resolve our own Eventualities *if* the multisig is retiring.
|
||||
This implements step 6 of `spec/processor/Multisig Rotation.md`.
|
||||
|
||||
We may receive a Change output. The only issue with accumulating this would be if it
|
||||
extends the multisig's lifetime (by increasing the amount of outputs yet to be
|
||||
forwarded). By checking it's one we made, either:
|
||||
1) It's a legitimate Change output to be forwarded
|
||||
2) It's a Change output created by a user burning coins (specifying the Change address),
|
||||
which can only be created while the multisig is actively handling `Burn`s (therefore
|
||||
ensuring this multisig cannot be kept alive ad-infinitum)
|
||||
|
||||
The commentary on Change outputs also applies to Branch/Forwarded. They'll presumably
|
||||
get ignored if not usable however.
|
||||
*/
|
||||
if key.stage == LifetimeStage::Finishing {
|
||||
non_external_outputs
|
||||
.retain(|output| completed_eventualities.contains_key(&output.transaction_id()));
|
||||
}
|
||||
|
||||
// Finally, for non-External outputs we didn't make, we check they're worth more than the
|
||||
// cost to aggregate them to avoid some profitable spam attacks by malicious miners
|
||||
{
|
||||
// Fetch and cache the costs to aggregate as this call may be expensive
|
||||
let coins = non_external_outputs
|
||||
.iter()
|
||||
.map(|output| output.balance().coin)
|
||||
.collect::<HashSet<_>>();
|
||||
let mut costs_to_aggregate = HashMap::new();
|
||||
for coin in coins {
|
||||
costs_to_aggregate.insert(
|
||||
coin,
|
||||
self.feed.cost_to_aggregate(coin, &block).await.map_err(|e| {
|
||||
format!("EventualityTask couldn't fetch cost to aggregate {coin:?} at {b}: {e:?}")
|
||||
})?,
|
||||
);
|
||||
}
|
||||
|
||||
// Only retain out outputs/outputs sufficiently worthwhile
|
||||
non_external_outputs.retain(|output| {
|
||||
completed_eventualities.contains_key(&output.transaction_id()) || {
|
||||
let balance = output.balance();
|
||||
balance.amount.0 >= (2 * costs_to_aggregate[&balance.coin].0)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Now, we iterate over all Forwarded outputs and queue their InInstructions
|
||||
for output in
|
||||
non_external_outputs.iter().filter(|output| output.kind() == OutputType::Forwarded)
|
||||
{
|
||||
let Some(eventuality) = completed_eventualities.get(&output.transaction_id()) else {
|
||||
// Output sent to the forwarding address yet not one we made
|
||||
continue;
|
||||
};
|
||||
let Some(forwarded) = eventuality.singular_spent_output() else {
|
||||
// This was a TX made by us, yet someone burned to the forwarding address as it
|
||||
// doesn't follow the structure of forwarding transactions
|
||||
continue;
|
||||
};
|
||||
|
||||
let Some((return_address, mut in_instruction)) =
|
||||
ScannerGlobalDb::<S>::return_address_and_in_instruction_for_forwarded_output(
|
||||
&txn, &forwarded,
|
||||
)
|
||||
else {
|
||||
// This was a TX made by us, coincidentally with the necessary structure, yet wasn't
|
||||
// forwarding an output
|
||||
continue;
|
||||
};
|
||||
|
||||
// We use the original amount, minus twice the cost to aggregate
|
||||
// If the fees we paid to forward this now (less than the cost to aggregate now, yet not
|
||||
// necessarily the cost to aggregate historically) caused this amount to be less, reduce
|
||||
// it accordingly
|
||||
in_instruction.balance.amount.0 =
|
||||
in_instruction.balance.amount.0.min(output.balance().amount.0);
|
||||
|
||||
queue_output_until_block::<S>(
|
||||
&mut txn,
|
||||
b + S::WINDOW_LENGTH,
|
||||
&OutputWithInInstruction { output: output.clone(), return_address, in_instruction },
|
||||
);
|
||||
}
|
||||
|
||||
// Accumulate all of these outputs
|
||||
outputs.extend(non_external_outputs);
|
||||
}
|
||||
|
||||
// Update the scheduler
|
||||
{
|
||||
let mut scheduler_update = SchedulerUpdate { outputs, forwards, returns };
|
||||
scheduler_update.outputs.sort_by(sort_outputs);
|
||||
scheduler_update.forwards.sort_by(sort_outputs);
|
||||
scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output));
|
||||
|
||||
let empty = {
|
||||
let a: core::slice::Iter<'_, OutputFor<S>> = scheduler_update.outputs.iter();
|
||||
let b: core::slice::Iter<'_, OutputFor<S>> = scheduler_update.forwards.iter();
|
||||
let c =
|
||||
scheduler_update.returns.iter().map(|output_to_return| &output_to_return.output);
|
||||
let mut all_outputs = a.chain(b).chain(c).peekable();
|
||||
|
||||
// If we received any output, sanity check this block is notable
|
||||
let empty = all_outputs.peek().is_none();
|
||||
if !empty {
|
||||
assert!(is_block_notable, "accumulating output(s) in non-notable block");
|
||||
}
|
||||
|
||||
// Sanity check we've never accumulated these outputs before
|
||||
for output in all_outputs {
|
||||
assert!(
|
||||
!EventualityDb::<S>::prior_accumulated_output(&txn, &output.id()),
|
||||
"prior accumulated an output with this ID"
|
||||
);
|
||||
EventualityDb::<S>::accumulated_output(&mut txn, &output.id());
|
||||
}
|
||||
|
||||
empty
|
||||
};
|
||||
|
||||
if !empty {
|
||||
// Accumulate the outputs
|
||||
/*
|
||||
This uses the `keys_with_stages` for the current block, yet this block is notable.
|
||||
Accordingly, all future intaked Burns will use at least this block when determining
|
||||
what LifetimeStage a key is. That makes the LifetimeStage monotonically incremented.
|
||||
If this block wasn't notable, we'd potentially intake Burns with the LifetimeStage
|
||||
determined off an earlier block than this (enabling an earlier LifetimeStage to be
|
||||
used after a later one was already used).
|
||||
*/
|
||||
let new_eventualities =
|
||||
Sch::update(&mut txn, &block, &keys_with_stages, scheduler_update);
|
||||
// Intake the new Eventualities
|
||||
for key in new_eventualities.keys() {
|
||||
keys
|
||||
.iter()
|
||||
.find(|serai_key| serai_key.key.to_bytes().as_ref() == key.as_slice())
|
||||
.expect("intaking Eventuality for key which isn't active");
|
||||
}
|
||||
intake_eventualities::<S>(&mut txn, new_eventualities);
|
||||
}
|
||||
}
|
||||
|
||||
for key in &keys {
|
||||
// If this is the block at which forwarding starts for this key, flush it
|
||||
// We do this after we issue the above update for any efficiencies gained by doing so
|
||||
if key.block_at_which_forwarding_starts == Some(b) {
|
||||
assert!(
|
||||
key.key != keys.last().unwrap().key,
|
||||
"key which was forwarding was the last key (which has no key after it to forward to)"
|
||||
);
|
||||
let new_eventualities =
|
||||
Sch::flush_key(&mut txn, &block, key.key, keys.last().unwrap().key);
|
||||
intake_eventualities::<S>(&mut txn, new_eventualities);
|
||||
}
|
||||
|
||||
// Now that we've intaked any Eventualities caused, check if we're retiring any keys
|
||||
if key.stage == LifetimeStage::Finishing {
|
||||
let eventualities = EventualityDb::<S>::eventualities(&txn, key.key);
|
||||
if eventualities.active_eventualities.is_empty() {
|
||||
log::info!(
|
||||
"key {} has finished and is being retired",
|
||||
hex::encode(key.key.to_bytes().as_ref())
|
||||
);
|
||||
|
||||
// Retire this key `WINDOW_LENGTH` blocks in the future to ensure the scan task never
|
||||
// has a malleable view of the keys.
|
||||
ScannerGlobalDb::<S>::retire_key(&mut txn, b + S::WINDOW_LENGTH, key.key);
|
||||
|
||||
// We tell the scheduler to retire it now as we're done with it, and this fn doesn't
|
||||
// require it be called with a canonical order
|
||||
Sch::retire_key(&mut txn, key.key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update the next-to-check block
|
||||
EventualityDb::<S>::set_next_to_check_for_eventualities_block(&mut txn, next_to_check);
|
||||
|
||||
// If this block was notable, update the latest-handled notable block
|
||||
if is_block_notable {
|
||||
EventualityDb::<S>::set_latest_handled_notable_block(&mut txn, b);
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
// Update the next-to-check block
|
||||
EventualityDb::<S>::set_next_to_check_for_eventualities_block(&mut txn, next_to_check);
|
||||
|
||||
// If this block was notable, update the latest-handled notable block
|
||||
if is_block_notable {
|
||||
EventualityDb::<S>::set_latest_handled_notable_block(&mut txn, b);
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
// Run dependents if we successfully checked any blocks
|
||||
Ok(made_progress)
|
||||
}
|
||||
|
||||
// Run dependents if we successfully checked any blocks
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use serai_db::{Get, DbTxn, Db};
|
||||
use core::future::Future;
|
||||
|
||||
use serai_db::{Get, DbTxn, Db};
|
||||
use primitives::{task::ContinuallyRan, BlockHeader};
|
||||
|
||||
use crate::ScannerFeed;
|
||||
@@ -56,58 +57,59 @@ impl<D: Db, S: ScannerFeed> IndexTask<D, S> {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for IndexTask<D, S> {
|
||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
||||
// Fetch the latest finalized block
|
||||
let our_latest_finalized = IndexDb::latest_finalized_block(&self.db)
|
||||
.expect("IndexTask run before writing the start block");
|
||||
let latest_finalized = match self.feed.latest_finalized_block_number().await {
|
||||
Ok(latest_finalized) => latest_finalized,
|
||||
Err(e) => Err(format!("couldn't fetch the latest finalized block number: {e:?}"))?,
|
||||
};
|
||||
|
||||
if latest_finalized < our_latest_finalized {
|
||||
// Explicitly log this as an error as returned ephemeral errors are logged with debug
|
||||
// This doesn't panic as the node should sync along our indexed chain, and if it doesn't,
|
||||
// we'll panic at that point in time
|
||||
log::error!(
|
||||
"node is out of sync, latest finalized {} is behind our indexed {}",
|
||||
latest_finalized,
|
||||
our_latest_finalized
|
||||
);
|
||||
Err("node is out of sync".to_string())?;
|
||||
}
|
||||
|
||||
// Index the hashes of all blocks until the latest finalized block
|
||||
for b in (our_latest_finalized + 1) ..= latest_finalized {
|
||||
let block = match self.feed.unchecked_block_header_by_number(b).await {
|
||||
Ok(block) => block,
|
||||
Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?,
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
// Fetch the latest finalized block
|
||||
let our_latest_finalized = IndexDb::latest_finalized_block(&self.db)
|
||||
.expect("IndexTask run before writing the start block");
|
||||
let latest_finalized = match self.feed.latest_finalized_block_number().await {
|
||||
Ok(latest_finalized) => latest_finalized,
|
||||
Err(e) => Err(format!("couldn't fetch the latest finalized block number: {e:?}"))?,
|
||||
};
|
||||
|
||||
// Check this descends from our indexed chain
|
||||
{
|
||||
let expected_parent =
|
||||
IndexDb::block_id(&self.db, b - 1).expect("didn't have the ID of the prior block");
|
||||
if block.parent() != expected_parent {
|
||||
panic!(
|
||||
"current finalized block (#{b}, {}) doesn't build off finalized block (#{}, {})",
|
||||
hex::encode(block.parent()),
|
||||
b - 1,
|
||||
hex::encode(expected_parent)
|
||||
);
|
||||
}
|
||||
if latest_finalized < our_latest_finalized {
|
||||
// Explicitly log this as an error as returned ephemeral errors are logged with debug
|
||||
// This doesn't panic as the node should sync along our indexed chain, and if it doesn't,
|
||||
// we'll panic at that point in time
|
||||
log::error!(
|
||||
"node is out of sync, latest finalized {} is behind our indexed {}",
|
||||
latest_finalized,
|
||||
our_latest_finalized
|
||||
);
|
||||
Err("node is out of sync".to_string())?;
|
||||
}
|
||||
|
||||
// Update the latest finalized block
|
||||
let mut txn = self.db.txn();
|
||||
IndexDb::set_block(&mut txn, b, block.id());
|
||||
IndexDb::set_latest_finalized_block(&mut txn, b);
|
||||
txn.commit();
|
||||
}
|
||||
// Index the hashes of all blocks until the latest finalized block
|
||||
for b in (our_latest_finalized + 1) ..= latest_finalized {
|
||||
let block = match self.feed.unchecked_block_header_by_number(b).await {
|
||||
Ok(block) => block,
|
||||
Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?,
|
||||
};
|
||||
|
||||
// Have dependents run if we updated the latest finalized block
|
||||
Ok(our_latest_finalized != latest_finalized)
|
||||
// Check this descends from our indexed chain
|
||||
{
|
||||
let expected_parent =
|
||||
IndexDb::block_id(&self.db, b - 1).expect("didn't have the ID of the prior block");
|
||||
if block.parent() != expected_parent {
|
||||
panic!(
|
||||
"current finalized block (#{b}, {}) doesn't build off finalized block (#{}, {})",
|
||||
hex::encode(block.parent()),
|
||||
b - 1,
|
||||
hex::encode(expected_parent)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Update the latest finalized block
|
||||
let mut txn = self.db.txn();
|
||||
IndexDb::set_block(&mut txn, b, block.id());
|
||||
IndexDb::set_latest_finalized_block(&mut txn, b);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
// Have dependents run if we updated the latest finalized block
|
||||
Ok(our_latest_finalized != latest_finalized)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::{marker::PhantomData, fmt::Debug};
|
||||
use core::{marker::PhantomData, future::Future, fmt::Debug};
|
||||
use std::{io, collections::HashMap};
|
||||
|
||||
use group::GroupEncoding;
|
||||
@@ -59,7 +59,6 @@ impl<B: Block> BlockExt for B {
|
||||
/// A feed usable to scan a blockchain.
|
||||
///
|
||||
/// This defines the primitive types used, along with various getters necessary for indexing.
|
||||
#[async_trait::async_trait]
|
||||
pub trait ScannerFeed: 'static + Send + Sync + Clone {
|
||||
/// The ID of the network being scanned for.
|
||||
const NETWORK: NetworkId;
|
||||
@@ -110,38 +109,43 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone {
|
||||
///
|
||||
/// The block number is its zero-indexed position within a linear view of the external network's
|
||||
/// consensus. The genesis block accordingly has block number 0.
|
||||
async fn latest_finalized_block_number(&self) -> Result<u64, Self::EphemeralError>;
|
||||
fn latest_finalized_block_number(
|
||||
&self,
|
||||
) -> impl Send + Future<Output = Result<u64, Self::EphemeralError>>;
|
||||
|
||||
/// Fetch the timestamp of a block (represented in seconds since the epoch).
|
||||
///
|
||||
/// This must be monotonically incrementing. Two blocks may share a timestamp.
|
||||
async fn time_of_block(&self, number: u64) -> Result<u64, Self::EphemeralError>;
|
||||
fn time_of_block(
|
||||
&self,
|
||||
number: u64,
|
||||
) -> impl Send + Future<Output = Result<u64, Self::EphemeralError>>;
|
||||
|
||||
/// Fetch a block header by its number.
|
||||
///
|
||||
/// This does not check the returned BlockHeader is the header for the block we indexed.
|
||||
async fn unchecked_block_header_by_number(
|
||||
fn unchecked_block_header_by_number(
|
||||
&self,
|
||||
number: u64,
|
||||
) -> Result<<Self::Block as Block>::Header, Self::EphemeralError>;
|
||||
) -> impl Send + Future<Output = Result<<Self::Block as Block>::Header, Self::EphemeralError>>;
|
||||
|
||||
/// Fetch a block by its number.
|
||||
///
|
||||
/// This does not check the returned Block is the block we indexed.
|
||||
async fn unchecked_block_by_number(
|
||||
fn unchecked_block_by_number(
|
||||
&self,
|
||||
number: u64,
|
||||
) -> Result<Self::Block, Self::EphemeralError>;
|
||||
) -> impl Send + Future<Output = Result<Self::Block, Self::EphemeralError>>;
|
||||
|
||||
/// Fetch a block by its number.
|
||||
///
|
||||
/// Panics if the block requested wasn't indexed.
|
||||
async fn block_by_number(
|
||||
fn block_by_number(
|
||||
&self,
|
||||
getter: &(impl Send + Sync + Get),
|
||||
number: u64,
|
||||
) -> Result<Self::Block, String> {
|
||||
let block = match self.unchecked_block_by_number(number).await {
|
||||
) -> impl Send + Future<Output = Result<Self::Block, String>> {
|
||||
async move {let block = match self.unchecked_block_by_number(number).await {
|
||||
Ok(block) => block,
|
||||
Err(e) => Err(format!("couldn't fetch block {number}: {e:?}"))?,
|
||||
};
|
||||
@@ -159,7 +163,7 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(block)
|
||||
Ok(block)}
|
||||
}
|
||||
|
||||
/// The dust threshold for the specified coin.
|
||||
@@ -171,11 +175,11 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone {
|
||||
/// The cost to aggregate an input as of the specified block.
|
||||
///
|
||||
/// This is defined as the transaction fee for a 2-input, 1-output transaction.
|
||||
async fn cost_to_aggregate(
|
||||
fn cost_to_aggregate(
|
||||
&self,
|
||||
coin: Coin,
|
||||
reference_block: &Self::Block,
|
||||
) -> Result<Amount, Self::EphemeralError>;
|
||||
) -> impl Send + Future<Output = Result<Amount, Self::EphemeralError>>;
|
||||
}
|
||||
|
||||
/// The key type for this ScannerFeed.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use core::marker::PhantomData;
|
||||
use core::{marker::PhantomData, future::Future};
|
||||
|
||||
use scale::Encode;
|
||||
use serai_db::{DbTxn, Db};
|
||||
@@ -65,113 +65,119 @@ impl<D: Db, S: ScannerFeed> ReportTask<D, S> {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for ReportTask<D, S> {
|
||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
||||
let highest_reportable = {
|
||||
// Fetch the next to scan block
|
||||
let next_to_scan = next_to_scan_for_outputs_block::<S>(&self.db)
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let highest_reportable = {
|
||||
// Fetch the next to scan block
|
||||
let next_to_scan = next_to_scan_for_outputs_block::<S>(&self.db)
|
||||
.expect("ReportTask run before writing the start block");
|
||||
// If we haven't done any work, return
|
||||
if next_to_scan == 0 {
|
||||
return Ok(false);
|
||||
}
|
||||
// The last scanned block is the block prior to this
|
||||
#[allow(clippy::let_and_return)]
|
||||
let last_scanned = next_to_scan - 1;
|
||||
// The last scanned block is the highest reportable block as we only scan blocks within a
|
||||
// window where it's safe to immediately report the block
|
||||
// See `eventuality.rs` for more info
|
||||
last_scanned
|
||||
};
|
||||
|
||||
let next_to_potentially_report = ReportDb::<S>::next_to_potentially_report_block(&self.db)
|
||||
.expect("ReportTask run before writing the start block");
|
||||
// If we haven't done any work, return
|
||||
if next_to_scan == 0 {
|
||||
return Ok(false);
|
||||
}
|
||||
// The last scanned block is the block prior to this
|
||||
#[allow(clippy::let_and_return)]
|
||||
let last_scanned = next_to_scan - 1;
|
||||
// The last scanned block is the highest reportable block as we only scan blocks within a
|
||||
// window where it's safe to immediately report the block
|
||||
// See `eventuality.rs` for more info
|
||||
last_scanned
|
||||
};
|
||||
|
||||
let next_to_potentially_report = ReportDb::<S>::next_to_potentially_report_block(&self.db)
|
||||
.expect("ReportTask run before writing the start block");
|
||||
for b in next_to_potentially_report ..= highest_reportable {
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
for b in next_to_potentially_report ..= highest_reportable {
|
||||
let mut txn = self.db.txn();
|
||||
// Receive the InInstructions for this block
|
||||
// We always do this as we can't trivially tell if we should recv InInstructions before we
|
||||
// do
|
||||
let InInstructionData {
|
||||
external_key_for_session_to_sign_batch,
|
||||
returnable_in_instructions: in_instructions,
|
||||
} = ScanToReportDb::<S>::recv_in_instructions(&mut txn, b);
|
||||
let notable = ScannerGlobalDb::<S>::is_block_notable(&txn, b);
|
||||
if !notable {
|
||||
assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions");
|
||||
}
|
||||
// If this block is notable, create the Batch(s) for it
|
||||
if notable {
|
||||
let network = S::NETWORK;
|
||||
let block_hash = index::block_id(&txn, b);
|
||||
let mut batch_id = ReportDb::<S>::acquire_batch_id(&mut txn, b);
|
||||
|
||||
// Receive the InInstructions for this block
|
||||
// We always do this as we can't trivially tell if we should recv InInstructions before we do
|
||||
let InInstructionData {
|
||||
external_key_for_session_to_sign_batch,
|
||||
returnable_in_instructions: in_instructions,
|
||||
} = ScanToReportDb::<S>::recv_in_instructions(&mut txn, b);
|
||||
let notable = ScannerGlobalDb::<S>::is_block_notable(&txn, b);
|
||||
if !notable {
|
||||
assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions");
|
||||
}
|
||||
// If this block is notable, create the Batch(s) for it
|
||||
if notable {
|
||||
let network = S::NETWORK;
|
||||
let block_hash = index::block_id(&txn, b);
|
||||
let mut batch_id = ReportDb::<S>::acquire_batch_id(&mut txn, b);
|
||||
// start with empty batch
|
||||
let mut batches = vec![Batch {
|
||||
network,
|
||||
id: batch_id,
|
||||
block: BlockHash(block_hash),
|
||||
instructions: vec![],
|
||||
}];
|
||||
// We also track the return information for the InInstructions within a Batch in case
|
||||
// they error
|
||||
let mut return_information = vec![vec![]];
|
||||
|
||||
// start with empty batch
|
||||
let mut batches =
|
||||
vec![Batch { network, id: batch_id, block: BlockHash(block_hash), instructions: vec![] }];
|
||||
// We also track the return information for the InInstructions within a Batch in case they
|
||||
// error
|
||||
let mut return_information = vec![vec![]];
|
||||
for Returnable { return_address, in_instruction } in in_instructions {
|
||||
let balance = in_instruction.balance;
|
||||
|
||||
for Returnable { return_address, in_instruction } in in_instructions {
|
||||
let balance = in_instruction.balance;
|
||||
let batch = batches.last_mut().unwrap();
|
||||
batch.instructions.push(in_instruction);
|
||||
|
||||
let batch = batches.last_mut().unwrap();
|
||||
batch.instructions.push(in_instruction);
|
||||
// check if batch is over-size
|
||||
if batch.encode().len() > MAX_BATCH_SIZE {
|
||||
// pop the last instruction so it's back in size
|
||||
let in_instruction = batch.instructions.pop().unwrap();
|
||||
|
||||
// check if batch is over-size
|
||||
if batch.encode().len() > MAX_BATCH_SIZE {
|
||||
// pop the last instruction so it's back in size
|
||||
let in_instruction = batch.instructions.pop().unwrap();
|
||||
// bump the id for the new batch
|
||||
batch_id = ReportDb::<S>::acquire_batch_id(&mut txn, b);
|
||||
|
||||
// bump the id for the new batch
|
||||
batch_id = ReportDb::<S>::acquire_batch_id(&mut txn, b);
|
||||
// make a new batch with this instruction included
|
||||
batches.push(Batch {
|
||||
network,
|
||||
id: batch_id,
|
||||
block: BlockHash(block_hash),
|
||||
instructions: vec![in_instruction],
|
||||
});
|
||||
// Since we're allocating a new batch, allocate a new set of return addresses for it
|
||||
return_information.push(vec![]);
|
||||
}
|
||||
|
||||
// make a new batch with this instruction included
|
||||
batches.push(Batch {
|
||||
network,
|
||||
id: batch_id,
|
||||
block: BlockHash(block_hash),
|
||||
instructions: vec![in_instruction],
|
||||
});
|
||||
// Since we're allocating a new batch, allocate a new set of return addresses for it
|
||||
return_information.push(vec![]);
|
||||
// For the set of return addresses for the InInstructions for the batch we just pushed
|
||||
// onto, push this InInstruction's return addresses
|
||||
return_information
|
||||
.last_mut()
|
||||
.unwrap()
|
||||
.push(return_address.map(|address| ReturnInformation { address, balance }));
|
||||
}
|
||||
|
||||
// For the set of return addresses for the InInstructions for the batch we just pushed
|
||||
// onto, push this InInstruction's return addresses
|
||||
return_information
|
||||
.last_mut()
|
||||
.unwrap()
|
||||
.push(return_address.map(|address| ReturnInformation { address, balance }));
|
||||
// Save the return addresses to the database
|
||||
assert_eq!(batches.len(), return_information.len());
|
||||
for (batch, return_information) in batches.iter().zip(&return_information) {
|
||||
assert_eq!(batch.instructions.len(), return_information.len());
|
||||
ReportDb::<S>::save_external_key_for_session_to_sign_batch(
|
||||
&mut txn,
|
||||
batch.id,
|
||||
&external_key_for_session_to_sign_batch,
|
||||
);
|
||||
ReportDb::<S>::save_return_information(&mut txn, batch.id, return_information);
|
||||
}
|
||||
|
||||
for batch in batches {
|
||||
Batches::send(&mut txn, &batch);
|
||||
BatchesToSign::send(&mut txn, &external_key_for_session_to_sign_batch, &batch);
|
||||
}
|
||||
}
|
||||
|
||||
// Save the return addresses to the database
|
||||
assert_eq!(batches.len(), return_information.len());
|
||||
for (batch, return_information) in batches.iter().zip(&return_information) {
|
||||
assert_eq!(batch.instructions.len(), return_information.len());
|
||||
ReportDb::<S>::save_external_key_for_session_to_sign_batch(
|
||||
&mut txn,
|
||||
batch.id,
|
||||
&external_key_for_session_to_sign_batch,
|
||||
);
|
||||
ReportDb::<S>::save_return_information(&mut txn, batch.id, return_information);
|
||||
}
|
||||
// Update the next to potentially report block
|
||||
ReportDb::<S>::set_next_to_potentially_report_block(&mut txn, b + 1);
|
||||
|
||||
for batch in batches {
|
||||
Batches::send(&mut txn, &batch);
|
||||
BatchesToSign::send(&mut txn, &external_key_for_session_to_sign_batch, &batch);
|
||||
}
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
// Update the next to potentially report block
|
||||
ReportDb::<S>::set_next_to_potentially_report_block(&mut txn, b + 1);
|
||||
|
||||
txn.commit();
|
||||
// Run dependents if we decided to report any blocks
|
||||
Ok(next_to_potentially_report <= highest_reportable)
|
||||
}
|
||||
|
||||
// Run dependents if we decided to report any blocks
|
||||
Ok(next_to_potentially_report <= highest_reportable)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use core::future::Future;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use scale::Decode;
|
||||
@@ -107,258 +108,262 @@ impl<D: Db, S: ScannerFeed> ScanTask<D, S> {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for ScanTask<D, S> {
|
||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
||||
// Fetch the safe to scan block
|
||||
let latest_scannable =
|
||||
latest_scannable_block::<S>(&self.db).expect("ScanTask run before writing the start block");
|
||||
// Fetch the next block to scan
|
||||
let next_to_scan = ScanDb::<S>::next_to_scan_for_outputs_block(&self.db)
|
||||
.expect("ScanTask run before writing the start block");
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
// Fetch the safe to scan block
|
||||
let latest_scannable =
|
||||
latest_scannable_block::<S>(&self.db).expect("ScanTask run before writing the start block");
|
||||
// Fetch the next block to scan
|
||||
let next_to_scan = ScanDb::<S>::next_to_scan_for_outputs_block(&self.db)
|
||||
.expect("ScanTask run before writing the start block");
|
||||
|
||||
for b in next_to_scan ..= latest_scannable {
|
||||
let block = self.feed.block_by_number(&self.db, b).await?;
|
||||
for b in next_to_scan ..= latest_scannable {
|
||||
let block = self.feed.block_by_number(&self.db, b).await?;
|
||||
|
||||
log::info!("scanning block: {} ({b})", hex::encode(block.id()));
|
||||
log::info!("scanning block: {} ({b})", hex::encode(block.id()));
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
assert_eq!(ScanDb::<S>::next_to_scan_for_outputs_block(&txn).unwrap(), b);
|
||||
assert_eq!(ScanDb::<S>::next_to_scan_for_outputs_block(&txn).unwrap(), b);
|
||||
|
||||
// Tidy the keys, then fetch them
|
||||
// We don't have to tidy them here, we just have to somewhere, so why not here?
|
||||
ScannerGlobalDb::<S>::tidy_keys(&mut txn);
|
||||
let keys = ScannerGlobalDb::<S>::active_keys_as_of_next_to_scan_for_outputs_block(&txn)
|
||||
.expect("scanning for a blockchain without any keys set");
|
||||
// Tidy the keys, then fetch them
|
||||
// We don't have to tidy them here, we just have to somewhere, so why not here?
|
||||
ScannerGlobalDb::<S>::tidy_keys(&mut txn);
|
||||
let keys = ScannerGlobalDb::<S>::active_keys_as_of_next_to_scan_for_outputs_block(&txn)
|
||||
.expect("scanning for a blockchain without any keys set");
|
||||
|
||||
// The scan data for this block
|
||||
let mut scan_data = SenderScanData {
|
||||
block_number: b,
|
||||
received_external_outputs: vec![],
|
||||
forwards: vec![],
|
||||
returns: vec![],
|
||||
};
|
||||
// The InInstructions for this block
|
||||
let mut in_instructions = vec![];
|
||||
|
||||
// The outputs queued for this block
|
||||
let queued_outputs = {
|
||||
let mut queued_outputs = ScanDb::<S>::take_queued_outputs(&mut txn, b);
|
||||
// Sort the queued outputs in case they weren't queued in a deterministic fashion
|
||||
queued_outputs.sort_by(|a, b| sort_outputs(&a.output, &b.output));
|
||||
queued_outputs
|
||||
};
|
||||
for queued_output in queued_outputs {
|
||||
in_instructions.push((
|
||||
queued_output.output.id(),
|
||||
Returnable {
|
||||
return_address: queued_output.return_address,
|
||||
in_instruction: queued_output.in_instruction,
|
||||
},
|
||||
));
|
||||
scan_data.received_external_outputs.push(queued_output.output);
|
||||
}
|
||||
|
||||
// We subtract the cost to aggregate from some outputs we scan
|
||||
// This cost is fetched with an asynchronous function which may be non-trivial
|
||||
// We cache the result of this function here to avoid calling it multiple times
|
||||
let mut costs_to_aggregate = HashMap::with_capacity(1);
|
||||
|
||||
// Scan for each key
|
||||
for key in &keys {
|
||||
for output in block.scan_for_outputs(key.key) {
|
||||
assert_eq!(output.key(), key.key);
|
||||
|
||||
/*
|
||||
The scan task runs ahead of time, obtaining ordering on the external network's blocks
|
||||
with relation to events on the Serai network. This is done via publishing a Batch which
|
||||
contains the InInstructions from External outputs. Accordingly, the scan process only
|
||||
has to yield External outputs.
|
||||
|
||||
It'd appear to make sense to scan for all outputs, and after scanning for all outputs,
|
||||
yield all outputs. The issue is we can't identify outputs we created here. We can only
|
||||
identify the outputs we receive and their *declared intention*.
|
||||
|
||||
We only want to handle Change/Branch/Forwarded outputs we made ourselves. For
|
||||
Forwarded, the reasoning is obvious (retiring multisigs should only downsize, yet
|
||||
accepting new outputs solely because they claim to be Forwarded would increase the size
|
||||
of the multisig). For Change/Branch, it's because such outputs which aren't ours are
|
||||
pointless. They wouldn't hurt to accumulate though.
|
||||
|
||||
The issue is they would hurt to accumulate. We want to filter outputs which are less
|
||||
than their cost to aggregate, a variable itself variable to the current blockchain. We
|
||||
can filter such outputs here, yet if we drop a Change output, we create an insolvency.
|
||||
We'd need to track the loss and offset it later. That means we can't filter such
|
||||
outputs, as we expect any Change output we make.
|
||||
|
||||
The issue is the Change outputs we don't make. Someone can create an output declaring
|
||||
to be Change, yet not actually Change. If we don't filter it, it'd be queued for
|
||||
accumulation, yet it may cost more to accumulate than it's worth.
|
||||
|
||||
The solution is to let the Eventuality task, which does know if we made an output or
|
||||
not (or rather, if a transaction is identical to a transaction which should exist
|
||||
regarding effects) decide to keep/yield the outputs which we should only keep if we
|
||||
made them (as Serai itself should not make worthless outputs, so we can assume they're
|
||||
worthwhile, and even if they're not economically, they are technically).
|
||||
|
||||
The alternative, we drop outputs here with a generic filter rule and then report back
|
||||
the insolvency created, still doesn't work as we'd only be creating an insolvency if
|
||||
the output was actually made by us (and not simply someone else sending in). We can
|
||||
have the Eventuality task report the insolvency, yet that requires the scanner be
|
||||
responsible for such filter logic. It's more flexible, and has a cleaner API,
|
||||
to do so at a higher level.
|
||||
*/
|
||||
if output.kind() != OutputType::External {
|
||||
// While we don't report these outputs, we still need consensus on this block and
|
||||
// accordingly still need to set it as notable
|
||||
let balance = output.balance();
|
||||
// We ensure it's over the dust limit to prevent people sending 1 satoshi from causing
|
||||
// an invocation of a consensus/signing protocol
|
||||
if balance.amount.0 >= S::dust(balance.coin).0 {
|
||||
ScannerGlobalDb::<S>::flag_notable_due_to_non_external_output(&mut txn, b);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check this isn't dust
|
||||
let balance_to_use = {
|
||||
let mut balance = output.balance();
|
||||
|
||||
// First, subtract 2 * the cost to aggregate, as detailed in
|
||||
// `spec/processor/UTXO Management.md`
|
||||
|
||||
// We cache this, so if it isn't yet cached, insert it into the cache
|
||||
if let std::collections::hash_map::Entry::Vacant(e) =
|
||||
costs_to_aggregate.entry(balance.coin)
|
||||
{
|
||||
e.insert(self.feed.cost_to_aggregate(balance.coin, &block).await.map_err(|e| {
|
||||
format!(
|
||||
"ScanTask couldn't fetch cost to aggregate {:?} at {b}: {e:?}",
|
||||
balance.coin
|
||||
)
|
||||
})?);
|
||||
}
|
||||
let cost_to_aggregate = costs_to_aggregate[&balance.coin];
|
||||
balance.amount.0 -= 2 * cost_to_aggregate.0;
|
||||
|
||||
// Now, check it's still past the dust threshold
|
||||
if balance.amount.0 < S::dust(balance.coin).0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
balance
|
||||
};
|
||||
|
||||
// Fetch the InInstruction/return addr for this output
|
||||
let output_with_in_instruction = match in_instruction_from_output::<S>(&output) {
|
||||
(return_address, Some(instruction)) => OutputWithInInstruction {
|
||||
output,
|
||||
return_address,
|
||||
in_instruction: InInstructionWithBalance { instruction, balance: balance_to_use },
|
||||
},
|
||||
(Some(address), None) => {
|
||||
// Since there was no instruction here, return this since we parsed a return address
|
||||
if key.stage != LifetimeStage::Finishing {
|
||||
scan_data.returns.push(Return { address, output });
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// Since we didn't receive an instruction nor can we return this, queue this for
|
||||
// accumulation and move on
|
||||
(None, None) => {
|
||||
if key.stage != LifetimeStage::Finishing {
|
||||
scan_data.received_external_outputs.push(output);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Drop External outputs if they're to a multisig which won't report them
|
||||
// This means we should report any External output we save to disk here
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match key.stage {
|
||||
// This multisig isn't yet reporting its External outputs to avoid a DoS
|
||||
// Queue the output to be reported when this multisig starts reporting
|
||||
LifetimeStage::ActiveYetNotReporting => {
|
||||
ScanDb::<S>::queue_output_until_block(
|
||||
&mut txn,
|
||||
key.block_at_which_reporting_starts,
|
||||
&output_with_in_instruction,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
// We should report External outputs in these cases
|
||||
LifetimeStage::Active | LifetimeStage::UsingNewForChange => {}
|
||||
// We should report External outputs only once forwarded, where they'll appear as
|
||||
// OutputType::Forwarded. We save them now for when they appear
|
||||
LifetimeStage::Forwarding => {
|
||||
// When the forwarded output appears, we can see which Plan it's associated with and
|
||||
// from there recover this output
|
||||
scan_data.forwards.push(output_with_in_instruction);
|
||||
continue;
|
||||
}
|
||||
// We should drop these as we should not be handling new External outputs at this
|
||||
// time
|
||||
LifetimeStage::Finishing => {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// Ensures we didn't miss a `continue` above
|
||||
assert!(matches!(key.stage, LifetimeStage::Active | LifetimeStage::UsingNewForChange));
|
||||
// The scan data for this block
|
||||
let mut scan_data = SenderScanData {
|
||||
block_number: b,
|
||||
received_external_outputs: vec![],
|
||||
forwards: vec![],
|
||||
returns: vec![],
|
||||
};
|
||||
// The InInstructions for this block
|
||||
let mut in_instructions = vec![];
|
||||
|
||||
// The outputs queued for this block
|
||||
let queued_outputs = {
|
||||
let mut queued_outputs = ScanDb::<S>::take_queued_outputs(&mut txn, b);
|
||||
// Sort the queued outputs in case they weren't queued in a deterministic fashion
|
||||
queued_outputs.sort_by(|a, b| sort_outputs(&a.output, &b.output));
|
||||
queued_outputs
|
||||
};
|
||||
for queued_output in queued_outputs {
|
||||
in_instructions.push((
|
||||
output_with_in_instruction.output.id(),
|
||||
queued_output.output.id(),
|
||||
Returnable {
|
||||
return_address: output_with_in_instruction.return_address,
|
||||
in_instruction: output_with_in_instruction.in_instruction,
|
||||
return_address: queued_output.return_address,
|
||||
in_instruction: queued_output.in_instruction,
|
||||
},
|
||||
));
|
||||
scan_data.received_external_outputs.push(output_with_in_instruction.output);
|
||||
scan_data.received_external_outputs.push(queued_output.output);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort the InInstructions by the output ID
|
||||
in_instructions.sort_by(|(output_id_a, _), (output_id_b, _)| {
|
||||
use core::cmp::{Ordering, Ord};
|
||||
let res = output_id_a.as_ref().cmp(output_id_b.as_ref());
|
||||
assert!(res != Ordering::Equal, "two outputs within a collection had the same ID");
|
||||
res
|
||||
});
|
||||
// Check we haven't prior reported an InInstruction for this output
|
||||
// This is a sanity check which is intended to prevent multiple instances of sriXYZ on-chain
|
||||
// due to a single output
|
||||
for (id, _) in &in_instructions {
|
||||
assert!(
|
||||
!ScanDb::<S>::prior_reported_in_instruction_for_output(&txn, id),
|
||||
"prior reported an InInstruction for an output with this ID"
|
||||
// We subtract the cost to aggregate from some outputs we scan
|
||||
// This cost is fetched with an asynchronous function which may be non-trivial
|
||||
// We cache the result of this function here to avoid calling it multiple times
|
||||
let mut costs_to_aggregate = HashMap::with_capacity(1);
|
||||
|
||||
// Scan for each key
|
||||
for key in &keys {
|
||||
for output in block.scan_for_outputs(key.key) {
|
||||
assert_eq!(output.key(), key.key);
|
||||
|
||||
/*
|
||||
The scan task runs ahead of time, obtaining ordering on the external network's blocks
|
||||
with relation to events on the Serai network. This is done via publishing a Batch
|
||||
which contains the InInstructions from External outputs. Accordingly, the scan
|
||||
process only has to yield External outputs.
|
||||
|
||||
It'd appear to make sense to scan for all outputs, and after scanning for all
|
||||
outputs, yield all outputs. The issue is we can't identify outputs we created here.
|
||||
We can only identify the outputs we receive and their *declared intention*.
|
||||
|
||||
We only want to handle Change/Branch/Forwarded outputs we made ourselves. For
|
||||
Forwarded, the reasoning is obvious (retiring multisigs should only downsize, yet
|
||||
accepting new outputs solely because they claim to be Forwarded would increase the
|
||||
size of the multisig). For Change/Branch, it's because such outputs which aren't ours
|
||||
are pointless. They wouldn't hurt to accumulate though.
|
||||
|
||||
The issue is they would hurt to accumulate. We want to filter outputs which are less
|
||||
than their cost to aggregate, a variable itself variable to the current blockchain.
|
||||
We can filter such outputs here, yet if we drop a Change output, we create an
|
||||
insolvency. We'd need to track the loss and offset it later. That means we can't
|
||||
filter such outputs, as we expect any Change output we make.
|
||||
|
||||
The issue is the Change outputs we don't make. Someone can create an output declaring
|
||||
to be Change, yet not actually Change. If we don't filter it, it'd be queued for
|
||||
accumulation, yet it may cost more to accumulate than it's worth.
|
||||
|
||||
The solution is to let the Eventuality task, which does know if we made an output or
|
||||
not (or rather, if a transaction is identical to a transaction which should exist
|
||||
regarding effects) decide to keep/yield the outputs which we should only keep if we
|
||||
made them (as Serai itself should not make worthless outputs, so we can assume
|
||||
they're worthwhile, and even if they're not economically, they are technically).
|
||||
|
||||
The alternative, we drop outputs here with a generic filter rule and then report back
|
||||
the insolvency created, still doesn't work as we'd only be creating an insolvency if
|
||||
the output was actually made by us (and not simply someone else sending in). We can
|
||||
have the Eventuality task report the insolvency, yet that requires the scanner be
|
||||
responsible for such filter logic. It's more flexible, and has a cleaner API,
|
||||
to do so at a higher level.
|
||||
*/
|
||||
if output.kind() != OutputType::External {
|
||||
// While we don't report these outputs, we still need consensus on this block and
|
||||
// accordingly still need to set it as notable
|
||||
let balance = output.balance();
|
||||
// We ensure it's over the dust limit to prevent people sending 1 satoshi from
|
||||
// causing an invocation of a consensus/signing protocol
|
||||
if balance.amount.0 >= S::dust(balance.coin).0 {
|
||||
ScannerGlobalDb::<S>::flag_notable_due_to_non_external_output(&mut txn, b);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check this isn't dust
|
||||
let balance_to_use = {
|
||||
let mut balance = output.balance();
|
||||
|
||||
// First, subtract 2 * the cost to aggregate, as detailed in
|
||||
// `spec/processor/UTXO Management.md`
|
||||
|
||||
// We cache this, so if it isn't yet cached, insert it into the cache
|
||||
if let std::collections::hash_map::Entry::Vacant(e) =
|
||||
costs_to_aggregate.entry(balance.coin)
|
||||
{
|
||||
e.insert(self.feed.cost_to_aggregate(balance.coin, &block).await.map_err(|e| {
|
||||
format!(
|
||||
"ScanTask couldn't fetch cost to aggregate {:?} at {b}: {e:?}",
|
||||
balance.coin
|
||||
)
|
||||
})?);
|
||||
}
|
||||
let cost_to_aggregate = costs_to_aggregate[&balance.coin];
|
||||
balance.amount.0 -= 2 * cost_to_aggregate.0;
|
||||
|
||||
// Now, check it's still past the dust threshold
|
||||
if balance.amount.0 < S::dust(balance.coin).0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
balance
|
||||
};
|
||||
|
||||
// Fetch the InInstruction/return addr for this output
|
||||
let output_with_in_instruction = match in_instruction_from_output::<S>(&output) {
|
||||
(return_address, Some(instruction)) => OutputWithInInstruction {
|
||||
output,
|
||||
return_address,
|
||||
in_instruction: InInstructionWithBalance { instruction, balance: balance_to_use },
|
||||
},
|
||||
(Some(address), None) => {
|
||||
// Since there was no instruction here, return this since we parsed a return
|
||||
// address
|
||||
if key.stage != LifetimeStage::Finishing {
|
||||
scan_data.returns.push(Return { address, output });
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// Since we didn't receive an instruction nor can we return this, queue this for
|
||||
// accumulation and move on
|
||||
(None, None) => {
|
||||
if key.stage != LifetimeStage::Finishing {
|
||||
scan_data.received_external_outputs.push(output);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Drop External outputs if they're to a multisig which won't report them
|
||||
// This means we should report any External output we save to disk here
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match key.stage {
|
||||
// This multisig isn't yet reporting its External outputs to avoid a DoS
|
||||
// Queue the output to be reported when this multisig starts reporting
|
||||
LifetimeStage::ActiveYetNotReporting => {
|
||||
ScanDb::<S>::queue_output_until_block(
|
||||
&mut txn,
|
||||
key.block_at_which_reporting_starts,
|
||||
&output_with_in_instruction,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
// We should report External outputs in these cases
|
||||
LifetimeStage::Active | LifetimeStage::UsingNewForChange => {}
|
||||
// We should report External outputs only once forwarded, where they'll appear as
|
||||
// OutputType::Forwarded. We save them now for when they appear
|
||||
LifetimeStage::Forwarding => {
|
||||
// When the forwarded output appears, we can see which Plan it's associated with
|
||||
// and from there recover this output
|
||||
scan_data.forwards.push(output_with_in_instruction);
|
||||
continue;
|
||||
}
|
||||
// We should drop these as we should not be handling new External outputs at this
|
||||
// time
|
||||
LifetimeStage::Finishing => {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// Ensures we didn't miss a `continue` above
|
||||
assert!(matches!(key.stage, LifetimeStage::Active | LifetimeStage::UsingNewForChange));
|
||||
|
||||
in_instructions.push((
|
||||
output_with_in_instruction.output.id(),
|
||||
Returnable {
|
||||
return_address: output_with_in_instruction.return_address,
|
||||
in_instruction: output_with_in_instruction.in_instruction,
|
||||
},
|
||||
));
|
||||
scan_data.received_external_outputs.push(output_with_in_instruction.output);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort the InInstructions by the output ID
|
||||
in_instructions.sort_by(|(output_id_a, _), (output_id_b, _)| {
|
||||
use core::cmp::{Ordering, Ord};
|
||||
let res = output_id_a.as_ref().cmp(output_id_b.as_ref());
|
||||
assert!(res != Ordering::Equal, "two outputs within a collection had the same ID");
|
||||
res
|
||||
});
|
||||
// Check we haven't prior reported an InInstruction for this output
|
||||
// This is a sanity check which is intended to prevent multiple instances of sriXYZ
|
||||
// on-chain due to a single output
|
||||
for (id, _) in &in_instructions {
|
||||
assert!(
|
||||
!ScanDb::<S>::prior_reported_in_instruction_for_output(&txn, id),
|
||||
"prior reported an InInstruction for an output with this ID"
|
||||
);
|
||||
ScanDb::<S>::reported_in_instruction_for_output(&mut txn, id);
|
||||
}
|
||||
// Reformat the InInstructions to just the InInstructions
|
||||
let in_instructions = in_instructions
|
||||
.into_iter()
|
||||
.map(|(_id, in_instruction)| in_instruction)
|
||||
.collect::<Vec<_>>();
|
||||
// Send the InInstructions to the report task
|
||||
// We need to also specify which key is responsible for signing the Batch for these, which
|
||||
// will always be the oldest key (as the new key signing the Batch signifies handover
|
||||
// acceptance)
|
||||
ScanToReportDb::<S>::send_in_instructions(
|
||||
&mut txn,
|
||||
b,
|
||||
&InInstructionData {
|
||||
external_key_for_session_to_sign_batch: keys[0].key,
|
||||
returnable_in_instructions: in_instructions,
|
||||
},
|
||||
);
|
||||
ScanDb::<S>::reported_in_instruction_for_output(&mut txn, id);
|
||||
|
||||
// Send the scan data to the eventuality task
|
||||
ScanToEventualityDb::<S>::send_scan_data(&mut txn, b, &scan_data);
|
||||
// Update the next to scan block
|
||||
ScanDb::<S>::set_next_to_scan_for_outputs_block(&mut txn, b + 1);
|
||||
txn.commit();
|
||||
}
|
||||
// Reformat the InInstructions to just the InInstructions
|
||||
let in_instructions =
|
||||
in_instructions.into_iter().map(|(_id, in_instruction)| in_instruction).collect::<Vec<_>>();
|
||||
// Send the InInstructions to the report task
|
||||
// We need to also specify which key is responsible for signing the Batch for these, which
|
||||
// will always be the oldest key (as the new key signing the Batch signifies handover
|
||||
// acceptance)
|
||||
ScanToReportDb::<S>::send_in_instructions(
|
||||
&mut txn,
|
||||
b,
|
||||
&InInstructionData {
|
||||
external_key_for_session_to_sign_batch: keys[0].key,
|
||||
returnable_in_instructions: in_instructions,
|
||||
},
|
||||
);
|
||||
|
||||
// Send the scan data to the eventuality task
|
||||
ScanToEventualityDb::<S>::send_scan_data(&mut txn, b, &scan_data);
|
||||
// Update the next to scan block
|
||||
ScanDb::<S>::set_next_to_scan_for_outputs_block(&mut txn, b + 1);
|
||||
txn.commit();
|
||||
// Run dependents if we successfully scanned any blocks
|
||||
Ok(next_to_scan <= latest_scannable)
|
||||
}
|
||||
|
||||
// Run dependents if we successfully scanned any blocks
|
||||
Ok(next_to_scan <= latest_scannable)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use core::marker::PhantomData;
|
||||
use core::{marker::PhantomData, future::Future};
|
||||
|
||||
use serai_db::{DbTxn, Db};
|
||||
|
||||
@@ -52,115 +52,121 @@ impl<D: Db, S: ScannerFeed> SubstrateTask<D, S> {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for SubstrateTask<D, S> {
|
||||
async fn run_iteration(&mut self) -> Result<bool, String> {
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
// Fetch the next action to handle
|
||||
let mut txn = self.db.txn();
|
||||
let Some(action) = SubstrateDb::<S>::next_action(&mut txn) else {
|
||||
drop(txn);
|
||||
return Ok(made_progress);
|
||||
};
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
// Fetch the next action to handle
|
||||
let mut txn = self.db.txn();
|
||||
let Some(action) = SubstrateDb::<S>::next_action(&mut txn) else {
|
||||
drop(txn);
|
||||
return Ok(made_progress);
|
||||
};
|
||||
|
||||
match action {
|
||||
Action::AcknowledgeBatch(AcknowledgeBatch {
|
||||
batch_id,
|
||||
in_instruction_succeededs,
|
||||
mut burns,
|
||||
key_to_activate,
|
||||
}) => {
|
||||
// Check if we have the information for this batch
|
||||
let Some(block_number) = report::take_block_number_for_batch::<S>(&mut txn, batch_id)
|
||||
else {
|
||||
// If we don't, drop this txn (restoring the action to the database)
|
||||
drop(txn);
|
||||
return Ok(made_progress);
|
||||
};
|
||||
match action {
|
||||
Action::AcknowledgeBatch(AcknowledgeBatch {
|
||||
batch_id,
|
||||
in_instruction_succeededs,
|
||||
mut burns,
|
||||
key_to_activate,
|
||||
}) => {
|
||||
// Check if we have the information for this batch
|
||||
let Some(block_number) = report::take_block_number_for_batch::<S>(&mut txn, batch_id)
|
||||
else {
|
||||
// If we don't, drop this txn (restoring the action to the database)
|
||||
drop(txn);
|
||||
return Ok(made_progress);
|
||||
};
|
||||
|
||||
{
|
||||
let external_key_for_session_to_sign_batch =
|
||||
report::take_external_key_for_session_to_sign_batch::<S>(&mut txn, batch_id).unwrap();
|
||||
AcknowledgedBatches::send(&mut txn, &external_key_for_session_to_sign_batch, batch_id);
|
||||
}
|
||||
|
||||
// Mark we made progress and handle this
|
||||
made_progress = true;
|
||||
|
||||
assert!(
|
||||
ScannerGlobalDb::<S>::is_block_notable(&txn, block_number),
|
||||
"acknowledging a block which wasn't notable"
|
||||
);
|
||||
if let Some(prior_highest_acknowledged_block) =
|
||||
ScannerGlobalDb::<S>::highest_acknowledged_block(&txn)
|
||||
{
|
||||
// If a single block produced multiple Batches, the block number won't increment
|
||||
assert!(
|
||||
block_number >= prior_highest_acknowledged_block,
|
||||
"acknowledging blocks out-of-order"
|
||||
);
|
||||
for b in (prior_highest_acknowledged_block + 1) .. block_number {
|
||||
assert!(
|
||||
!ScannerGlobalDb::<S>::is_block_notable(&txn, b),
|
||||
"skipped acknowledging a block which was notable"
|
||||
{
|
||||
let external_key_for_session_to_sign_batch =
|
||||
report::take_external_key_for_session_to_sign_batch::<S>(&mut txn, batch_id)
|
||||
.unwrap();
|
||||
AcknowledgedBatches::send(
|
||||
&mut txn,
|
||||
&external_key_for_session_to_sign_batch,
|
||||
batch_id,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
ScannerGlobalDb::<S>::set_highest_acknowledged_block(&mut txn, block_number);
|
||||
if let Some(key_to_activate) = key_to_activate {
|
||||
ScannerGlobalDb::<S>::queue_key(
|
||||
&mut txn,
|
||||
block_number + S::WINDOW_LENGTH,
|
||||
key_to_activate,
|
||||
// Mark we made progress and handle this
|
||||
made_progress = true;
|
||||
|
||||
assert!(
|
||||
ScannerGlobalDb::<S>::is_block_notable(&txn, block_number),
|
||||
"acknowledging a block which wasn't notable"
|
||||
);
|
||||
}
|
||||
if let Some(prior_highest_acknowledged_block) =
|
||||
ScannerGlobalDb::<S>::highest_acknowledged_block(&txn)
|
||||
{
|
||||
// If a single block produced multiple Batches, the block number won't increment
|
||||
assert!(
|
||||
block_number >= prior_highest_acknowledged_block,
|
||||
"acknowledging blocks out-of-order"
|
||||
);
|
||||
for b in (prior_highest_acknowledged_block + 1) .. block_number {
|
||||
assert!(
|
||||
!ScannerGlobalDb::<S>::is_block_notable(&txn, b),
|
||||
"skipped acknowledging a block which was notable"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Return the balances for any InInstructions which failed to execute
|
||||
{
|
||||
let return_information = report::take_return_information::<S>(&mut txn, batch_id)
|
||||
.expect("didn't save the return information for Batch we published");
|
||||
assert_eq!(
|
||||
ScannerGlobalDb::<S>::set_highest_acknowledged_block(&mut txn, block_number);
|
||||
if let Some(key_to_activate) = key_to_activate {
|
||||
ScannerGlobalDb::<S>::queue_key(
|
||||
&mut txn,
|
||||
block_number + S::WINDOW_LENGTH,
|
||||
key_to_activate,
|
||||
);
|
||||
}
|
||||
|
||||
// Return the balances for any InInstructions which failed to execute
|
||||
{
|
||||
let return_information = report::take_return_information::<S>(&mut txn, batch_id)
|
||||
.expect("didn't save the return information for Batch we published");
|
||||
assert_eq!(
|
||||
in_instruction_succeededs.len(),
|
||||
return_information.len(),
|
||||
"amount of InInstruction succeededs differed from amount of return information saved"
|
||||
);
|
||||
|
||||
// We map these into standard Burns
|
||||
for (succeeded, return_information) in
|
||||
in_instruction_succeededs.into_iter().zip(return_information)
|
||||
{
|
||||
if succeeded {
|
||||
continue;
|
||||
}
|
||||
// We map these into standard Burns
|
||||
for (succeeded, return_information) in
|
||||
in_instruction_succeededs.into_iter().zip(return_information)
|
||||
{
|
||||
if succeeded {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(report::ReturnInformation { address, balance }) = return_information {
|
||||
burns.push(OutInstructionWithBalance {
|
||||
instruction: OutInstruction { address: address.into(), data: None },
|
||||
balance,
|
||||
});
|
||||
if let Some(report::ReturnInformation { address, balance }) = return_information {
|
||||
burns.push(OutInstructionWithBalance {
|
||||
instruction: OutInstruction { address: address.into(), data: None },
|
||||
balance,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We send these Burns as stemming from this block we just acknowledged
|
||||
// This causes them to be acted on after we accumulate the outputs from this block
|
||||
SubstrateToEventualityDb::send_burns::<S>(&mut txn, block_number, burns);
|
||||
}
|
||||
|
||||
// We send these Burns as stemming from this block we just acknowledged
|
||||
// This causes them to be acted on after we accumulate the outputs from this block
|
||||
SubstrateToEventualityDb::send_burns::<S>(&mut txn, block_number, burns);
|
||||
Action::QueueBurns(burns) => {
|
||||
// We can instantly handle this so long as we've handled all prior actions
|
||||
made_progress = true;
|
||||
|
||||
let queue_as_of = ScannerGlobalDb::<S>::highest_acknowledged_block(&txn)
|
||||
.expect("queueing Burns yet never acknowledged a block");
|
||||
|
||||
SubstrateToEventualityDb::send_burns::<S>(&mut txn, queue_as_of, burns);
|
||||
}
|
||||
}
|
||||
|
||||
Action::QueueBurns(burns) => {
|
||||
// We can instantly handle this so long as we've handled all prior actions
|
||||
made_progress = true;
|
||||
|
||||
let queue_as_of = ScannerGlobalDb::<S>::highest_acknowledged_block(&txn)
|
||||
.expect("queueing Burns yet never acknowledged a block");
|
||||
|
||||
SubstrateToEventualityDb::send_burns::<S>(&mut txn, queue_as_of, burns);
|
||||
}
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user