Allow scheduler's creation of transactions to be async and error

I don't love this, but it's the only way to select decoys without using a local
database. While the prior commit added such a databse, the performance of it
presumably wasn't viable, and while TODOs marked the needed improvements, it
was still messy with an immense scope re: any auditing.

The relevant scheduler functions now take `&self` (intentional, as all
mutations should be via the `&mut impl DbTxn` passed). The calls to `&self` are
expected to be completely deterministic (as usual).
This commit is contained in:
Luke Parker
2024-09-14 01:09:35 -04:00
parent 2edc2f3612
commit e1ad897f7e
11 changed files with 723 additions and 854 deletions

View File

@@ -2,7 +2,7 @@
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
use core::marker::PhantomData;
use core::{marker::PhantomData, future::Future};
use std::collections::HashMap;
use group::GroupEncoding;
@@ -14,7 +14,7 @@ use serai_db::DbTxn;
use primitives::{ReceivedOutput, Payment};
use scanner::{
LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor,
SchedulerUpdate, Scheduler as SchedulerTrait,
SchedulerUpdate, KeyScopedEventualities, Scheduler as SchedulerTrait,
};
use scheduler_primitives::*;
use utxo_scheduler_primitives::*;
@@ -23,16 +23,27 @@ mod db;
use db::Db;
/// A scheduler of transactions for networks premised on the UTXO model.
pub struct Scheduler<S: ScannerFeed, P: TransactionPlanner<S, ()>>(PhantomData<S>, PhantomData<P>);
#[allow(non_snake_case)]
#[derive(Clone)]
pub struct Scheduler<S: ScannerFeed, P: TransactionPlanner<S, ()>> {
planner: P,
_S: PhantomData<S>,
}
impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
fn aggregate_inputs(
/// Create a new scheduler.
pub fn new(planner: P) -> Self {
Self { planner, _S: PhantomData }
}
async fn aggregate_inputs(
&self,
txn: &mut impl DbTxn,
block: &BlockFor<S>,
key_for_change: KeyFor<S>,
key: KeyFor<S>,
coin: Coin,
) -> Vec<EventualityFor<S>> {
) -> Result<Vec<EventualityFor<S>>, <Self as SchedulerTrait<S>>::EphemeralError> {
let mut eventualities = vec![];
let mut operating_costs = Db::<S>::operating_costs(txn, coin).0;
@@ -41,13 +52,17 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
while outputs.len() > P::MAX_INPUTS {
let to_aggregate = outputs.drain(.. P::MAX_INPUTS).collect::<Vec<_>>();
let Some(planned) = P::plan_transaction_with_fee_amortization(
&mut operating_costs,
P::fee_rate(block, coin),
to_aggregate,
vec![],
Some(key_for_change),
) else {
let Some(planned) = self
.planner
.plan_transaction_with_fee_amortization(
&mut operating_costs,
P::fee_rate(block, coin),
to_aggregate,
vec![],
Some(key_for_change),
)
.await?
else {
continue;
};
@@ -57,7 +72,7 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
Db::<S>::set_outputs(txn, key, coin, &outputs);
Db::<S>::set_operating_costs(txn, coin, Amount(operating_costs));
eventualities
Ok(eventualities)
}
fn fulfillable_payments(
@@ -140,31 +155,36 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
}
}
fn handle_branch(
async fn handle_branch(
&self,
txn: &mut impl DbTxn,
block: &BlockFor<S>,
eventualities: &mut Vec<EventualityFor<S>>,
output: OutputFor<S>,
tx: TreeTransaction<AddressFor<S>>,
) -> bool {
) -> Result<bool, <Self as SchedulerTrait<S>>::EphemeralError> {
let key = output.key();
let coin = output.balance().coin;
let Some(payments) = tx.payments::<S>(coin, &P::branch_address(key), output.balance().amount.0)
else {
// If this output has become too small to satisfy this branch, drop it
return false;
return Ok(false);
};
let Some(planned) = P::plan_transaction_with_fee_amortization(
// Uses 0 as there's no operating costs to incur/amortize here
&mut 0,
P::fee_rate(block, coin),
vec![output],
payments,
None,
) else {
let Some(planned) = self
.planner
.plan_transaction_with_fee_amortization(
// Uses 0 as there's no operating costs to incur/amortize here
&mut 0,
P::fee_rate(block, coin),
vec![output],
payments,
None,
)
.await?
else {
// This Branch isn't viable, so drop it (and its children)
return false;
return Ok(false);
};
TransactionsToSign::<P::SignableTransaction>::send(txn, &key, &planned.signable);
@@ -172,15 +192,16 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
Self::queue_branches(txn, key, coin, planned.effected_payments, tx);
true
Ok(true)
}
fn step(
async fn step(
&self,
txn: &mut impl DbTxn,
active_keys: &[(KeyFor<S>, LifetimeStage)],
block: &BlockFor<S>,
key: KeyFor<S>,
) -> Vec<EventualityFor<S>> {
) -> Result<Vec<EventualityFor<S>>, <Self as SchedulerTrait<S>>::EphemeralError> {
let mut eventualities = vec![];
let key_for_change = match active_keys[0].1 {
@@ -198,7 +219,8 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
let coin = *coin;
// Perform any input aggregation we should
eventualities.append(&mut Self::aggregate_inputs(txn, block, key_for_change, key, coin));
eventualities
.append(&mut self.aggregate_inputs(txn, block, key_for_change, key, coin).await?);
// Fetch the operating costs/outputs
let mut operating_costs = Db::<S>::operating_costs(txn, coin).0;
@@ -228,15 +250,19 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
// scanner API)
let mut planned_outer = None;
for i in 0 .. 2 {
let Some(planned) = P::plan_transaction_with_fee_amortization(
&mut operating_costs,
P::fee_rate(block, coin),
outputs.clone(),
tree[0]
.payments::<S>(coin, &branch_address, tree[0].value())
.expect("payments were dropped despite providing an input of the needed value"),
Some(key_for_change),
) else {
let Some(planned) = self
.planner
.plan_transaction_with_fee_amortization(
&mut operating_costs,
P::fee_rate(block, coin),
outputs.clone(),
tree[0]
.payments::<S>(coin, &branch_address, tree[0].value())
.expect("payments were dropped despite providing an input of the needed value"),
Some(key_for_change),
)
.await?
else {
// This should trip on the first iteration or not at all
assert_eq!(i, 0);
// This doesn't have inputs even worth aggregating so drop the entire tree
@@ -272,46 +298,53 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
Self::queue_branches(txn, key, coin, planned.effected_payments, tree.remove(0));
}
eventualities
Ok(eventualities)
}
fn flush_outputs(
async fn flush_outputs(
&self,
txn: &mut impl DbTxn,
eventualities: &mut HashMap<Vec<u8>, Vec<EventualityFor<S>>>,
eventualities: &mut KeyScopedEventualities<S>,
block: &BlockFor<S>,
from: KeyFor<S>,
to: KeyFor<S>,
coin: Coin,
) {
) -> Result<(), <Self as SchedulerTrait<S>>::EphemeralError> {
let from_bytes = from.to_bytes().as_ref().to_vec();
// Ensure our inputs are aggregated
eventualities
.entry(from_bytes.clone())
.or_insert(vec![])
.append(&mut Self::aggregate_inputs(txn, block, to, from, coin));
.append(&mut self.aggregate_inputs(txn, block, to, from, coin).await?);
// Now that our inputs are aggregated, transfer all of them to the new key
let mut operating_costs = Db::<S>::operating_costs(txn, coin).0;
let outputs = Db::<S>::outputs(txn, from, coin).unwrap();
if outputs.is_empty() {
return;
return Ok(());
}
let planned = P::plan_transaction_with_fee_amortization(
&mut operating_costs,
P::fee_rate(block, coin),
outputs,
vec![],
Some(to),
);
let planned = self
.planner
.plan_transaction_with_fee_amortization(
&mut operating_costs,
P::fee_rate(block, coin),
outputs,
vec![],
Some(to),
)
.await?;
Db::<S>::set_operating_costs(txn, coin, Amount(operating_costs));
let Some(planned) = planned else { return };
let Some(planned) = planned else { return Ok(()) };
TransactionsToSign::<P::SignableTransaction>::send(txn, &from, &planned.signable);
eventualities.get_mut(&from_bytes).unwrap().push(planned.eventuality);
Ok(())
}
}
impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> SchedulerTrait<S> for Scheduler<S, P> {
type EphemeralError = P::EphemeralError;
type SignableTransaction = P::SignableTransaction;
fn activate_key(txn: &mut impl DbTxn, key: KeyFor<S>) {
@@ -324,29 +357,32 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> SchedulerTrait<S> for Schedul
}
fn flush_key(
&self,
txn: &mut impl DbTxn,
block: &BlockFor<S>,
retiring_key: KeyFor<S>,
new_key: KeyFor<S>,
) -> HashMap<Vec<u8>, Vec<EventualityFor<S>>> {
let mut eventualities = HashMap::new();
for coin in S::NETWORK.coins() {
// Move the payments to the new key
{
let still_queued = Db::<S>::queued_payments(txn, retiring_key, *coin).unwrap();
let mut new_queued = Db::<S>::queued_payments(txn, new_key, *coin).unwrap();
) -> impl Send + Future<Output = Result<KeyScopedEventualities<S>, Self::EphemeralError>> {
async move {
let mut eventualities = HashMap::new();
for coin in S::NETWORK.coins() {
// Move the payments to the new key
{
let still_queued = Db::<S>::queued_payments(txn, retiring_key, *coin).unwrap();
let mut new_queued = Db::<S>::queued_payments(txn, new_key, *coin).unwrap();
let mut queued = still_queued;
queued.append(&mut new_queued);
let mut queued = still_queued;
queued.append(&mut new_queued);
Db::<S>::set_queued_payments(txn, retiring_key, *coin, &[]);
Db::<S>::set_queued_payments(txn, new_key, *coin, &queued);
Db::<S>::set_queued_payments(txn, retiring_key, *coin, &[]);
Db::<S>::set_queued_payments(txn, new_key, *coin, &queued);
}
// Move the outputs to the new key
self.flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin).await?;
}
// Move the outputs to the new key
Self::flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin);
Ok(eventualities)
}
eventualities
}
fn retire_key(txn: &mut impl DbTxn, key: KeyFor<S>) {
@@ -359,155 +395,174 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> SchedulerTrait<S> for Schedul
}
fn update(
&self,
txn: &mut impl DbTxn,
block: &BlockFor<S>,
active_keys: &[(KeyFor<S>, LifetimeStage)],
update: SchedulerUpdate<S>,
) -> HashMap<Vec<u8>, Vec<EventualityFor<S>>> {
let mut eventualities = HashMap::new();
) -> impl Send + Future<Output = Result<KeyScopedEventualities<S>, Self::EphemeralError>> {
async move {
let mut eventualities = HashMap::new();
// Accumulate the new outputs
{
let mut outputs_by_key = HashMap::new();
for output in update.outputs() {
// If this aligns for a branch, handle it
if let Some(branch) = Db::<S>::take_pending_branch(txn, output.key(), output.balance()) {
if Self::handle_branch(
txn,
block,
eventualities.entry(output.key().to_bytes().as_ref().to_vec()).or_insert(vec![]),
output.clone(),
branch,
) {
// If we could use it for a branch, we do and move on
// Else, we let it be accumulated by the standard accumulation code
continue;
// Accumulate the new outputs
{
let mut outputs_by_key = HashMap::new();
for output in update.outputs() {
// If this aligns for a branch, handle it
if let Some(branch) = Db::<S>::take_pending_branch(txn, output.key(), output.balance()) {
if self
.handle_branch(
txn,
block,
eventualities.entry(output.key().to_bytes().as_ref().to_vec()).or_insert(vec![]),
output.clone(),
branch,
)
.await?
{
// If we could use it for a branch, we do and move on
// Else, we let it be accumulated by the standard accumulation code
continue;
}
}
let coin = output.balance().coin;
outputs_by_key
// Index by key and coin
.entry((output.key().to_bytes().as_ref().to_vec(), coin))
// If we haven't accumulated here prior, read the outputs from the database
.or_insert_with(|| (output.key(), Db::<S>::outputs(txn, output.key(), coin).unwrap()))
.1
.push(output.clone());
}
// Write the outputs back to the database
for ((_key_vec, coin), (key, outputs)) in outputs_by_key {
Db::<S>::set_outputs(txn, key, coin, &outputs);
}
}
// Fulfill the payments we prior couldn't
for (key, _stage) in active_keys {
eventualities
.entry(key.to_bytes().as_ref().to_vec())
.or_insert(vec![])
.append(&mut self.step(txn, active_keys, block, *key).await?);
}
// If this key has been flushed, forward all outputs
match active_keys[0].1 {
LifetimeStage::ActiveYetNotReporting |
LifetimeStage::Active |
LifetimeStage::UsingNewForChange => {}
LifetimeStage::Forwarding | LifetimeStage::Finishing => {
for coin in S::NETWORK.coins() {
self
.flush_outputs(
txn,
&mut eventualities,
block,
active_keys[0].0,
active_keys[1].0,
*coin,
)
.await?;
}
}
let coin = output.balance().coin;
outputs_by_key
// Index by key and coin
.entry((output.key().to_bytes().as_ref().to_vec(), coin))
// If we haven't accumulated here prior, read the outputs from the database
.or_insert_with(|| (output.key(), Db::<S>::outputs(txn, output.key(), coin).unwrap()))
.1
.push(output.clone());
}
// Write the outputs back to the database
for ((_key_vec, coin), (key, outputs)) in outputs_by_key {
Db::<S>::set_outputs(txn, key, coin, &outputs);
}
}
// Fulfill the payments we prior couldn't
for (key, _stage) in active_keys {
eventualities
.entry(key.to_bytes().as_ref().to_vec())
.or_insert(vec![])
.append(&mut Self::step(txn, active_keys, block, *key));
}
// Create the transactions for the forwards/burns
{
let mut planned_txs = vec![];
for forward in update.forwards() {
let key = forward.key();
// If this key has been flushed, forward all outputs
match active_keys[0].1 {
LifetimeStage::ActiveYetNotReporting |
LifetimeStage::Active |
LifetimeStage::UsingNewForChange => {}
LifetimeStage::Forwarding | LifetimeStage::Finishing => {
for coin in S::NETWORK.coins() {
Self::flush_outputs(
txn,
&mut eventualities,
block,
active_keys[0].0,
active_keys[1].0,
*coin,
);
assert_eq!(active_keys.len(), 2);
assert_eq!(active_keys[0].1, LifetimeStage::Forwarding);
assert_eq!(active_keys[1].1, LifetimeStage::Active);
let forward_to_key = active_keys[1].0;
let Some(plan) = self
.planner
.plan_transaction_with_fee_amortization(
// This uses 0 for the operating costs as we don't incur any here
// If the output can't pay for itself to be forwarded, we simply drop it
&mut 0,
P::fee_rate(block, forward.balance().coin),
vec![forward.clone()],
vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)],
None,
)
.await?
else {
continue;
};
planned_txs.push((key, plan));
}
for to_return in update.returns() {
let key = to_return.output().key();
let out_instruction =
Payment::new(to_return.address().clone(), to_return.output().balance(), None);
let Some(plan) = self
.planner
.plan_transaction_with_fee_amortization(
// This uses 0 for the operating costs as we don't incur any here
// If the output can't pay for itself to be returned, we simply drop it
&mut 0,
P::fee_rate(block, out_instruction.balance().coin),
vec![to_return.output().clone()],
vec![out_instruction],
None,
)
.await?
else {
continue;
};
planned_txs.push((key, plan));
}
for (key, planned_tx) in planned_txs {
// Send the transactions off for signing
TransactionsToSign::<P::SignableTransaction>::send(txn, &key, &planned_tx.signable);
// Insert the Eventualities into the result
eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality);
}
Ok(eventualities)
}
}
// Create the transactions for the forwards/burns
{
let mut planned_txs = vec![];
for forward in update.forwards() {
let key = forward.key();
assert_eq!(active_keys.len(), 2);
assert_eq!(active_keys[0].1, LifetimeStage::Forwarding);
assert_eq!(active_keys[1].1, LifetimeStage::Active);
let forward_to_key = active_keys[1].0;
let Some(plan) = P::plan_transaction_with_fee_amortization(
// This uses 0 for the operating costs as we don't incur any here
// If the output can't pay for itself to be forwarded, we simply drop it
&mut 0,
P::fee_rate(block, forward.balance().coin),
vec![forward.clone()],
vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)],
None,
) else {
continue;
};
planned_txs.push((key, plan));
}
for to_return in update.returns() {
let key = to_return.output().key();
let out_instruction =
Payment::new(to_return.address().clone(), to_return.output().balance(), None);
let Some(plan) = P::plan_transaction_with_fee_amortization(
// This uses 0 for the operating costs as we don't incur any here
// If the output can't pay for itself to be returned, we simply drop it
&mut 0,
P::fee_rate(block, out_instruction.balance().coin),
vec![to_return.output().clone()],
vec![out_instruction],
None,
) else {
continue;
};
planned_txs.push((key, plan));
}
for (key, planned_tx) in planned_txs {
// Send the transactions off for signing
TransactionsToSign::<P::SignableTransaction>::send(txn, &key, &planned_tx.signable);
// Insert the Eventualities into the result
eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality);
}
eventualities
}
}
fn fulfill(
&self,
txn: &mut impl DbTxn,
block: &BlockFor<S>,
active_keys: &[(KeyFor<S>, LifetimeStage)],
payments: Vec<Payment<AddressFor<S>>>,
) -> HashMap<Vec<u8>, Vec<EventualityFor<S>>> {
// Find the key to filfill these payments with
let fulfillment_key = match active_keys[0].1 {
LifetimeStage::ActiveYetNotReporting => {
panic!("expected to fulfill payments despite not reporting for the oldest key")
) -> impl Send + Future<Output = Result<KeyScopedEventualities<S>, Self::EphemeralError>> {
async move {
// Find the key to filfill these payments with
let fulfillment_key = match active_keys[0].1 {
LifetimeStage::ActiveYetNotReporting => {
panic!("expected to fulfill payments despite not reporting for the oldest key")
}
LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0,
LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0,
};
// Queue the payments for this key
for coin in S::NETWORK.coins() {
let mut queued_payments = Db::<S>::queued_payments(txn, fulfillment_key, *coin).unwrap();
queued_payments
.extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned());
Db::<S>::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments);
}
LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0,
LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0,
};
// Queue the payments for this key
for coin in S::NETWORK.coins() {
let mut queued_payments = Db::<S>::queued_payments(txn, fulfillment_key, *coin).unwrap();
queued_payments
.extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned());
Db::<S>::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments);
// Handle the queued payments
Ok(HashMap::from([(
fulfillment_key.to_bytes().as_ref().to_vec(),
self.step(txn, active_keys, block, fulfillment_key).await?,
)]))
}
// Handle the queued payments
HashMap::from([(
fulfillment_key.to_bytes().as_ref().to_vec(),
Self::step(txn, active_keys, block, fulfillment_key),
)])
}
}